Browse Source

Merge commit 'origin/master' into next

Manual merge of:
	drivers/char/hvc_console.c
	drivers/char/hvc_console.h
Benjamin Herrenschmidt 15 years ago
parent
commit
874f2f997d
100 changed files with 2272 additions and 2484 deletions
  1. 0 1
      Documentation/dontdiff
  2. 9 0
      Documentation/kernel-parameters.txt
  3. 0 1
      Documentation/lguest/lguest.c
  4. 4 4
      Documentation/networking/ip-sysctl.txt
  5. 13 8
      MAINTAINERS
  6. 1 1
      Makefile
  7. 2 1
      arch/arm/include/asm/cacheflush.h
  8. 1 0
      arch/arm/kernel/setup.c
  9. 2 2
      arch/arm/mach-gemini/gpio.c
  10. 5 7
      arch/arm/mach-omap2/mux.c
  11. 3 0
      arch/arm/mm/alignment.c
  12. 44 2
      arch/arm/tools/mach-types
  13. 1 0
      arch/ia64/include/asm/acpi.h
  14. 1 1
      arch/ia64/sn/kernel/setup.c
  15. 1 0
      arch/microblaze/Kconfig
  16. 1 1
      arch/microblaze/include/asm/io.h
  17. 0 20
      arch/microblaze/include/asm/prom.h
  18. 8 19
      arch/microblaze/kernel/cpu/cache.c
  19. 1 1
      arch/microblaze/kernel/of_platform.c
  20. 13 977
      arch/microblaze/kernel/prom.c
  21. 8 0
      arch/mips/bcm47xx/prom.c
  22. 1 0
      arch/mips/mm/highmem.c
  23. 0 1
      arch/parisc/Kconfig
  24. 5 2
      arch/parisc/kernel/pci.c
  25. 1 0
      arch/powerpc/Kconfig
  26. 0 18
      arch/powerpc/include/asm/prom.h
  27. 1 1
      arch/powerpc/kernel/of_platform.c
  28. 1 1
      arch/powerpc/kernel/pci_64.c
  29. 49 838
      arch/powerpc/kernel/prom.c
  30. 2 1
      arch/powerpc/platforms/85xx/mpc85xx_mds.c
  31. 2 2
      arch/powerpc/platforms/85xx/xes_mpc85xx.c
  32. 1 1
      arch/powerpc/platforms/cell/cbe_powerbutton.c
  33. 1 1
      arch/powerpc/platforms/cell/ras.c
  34. 3 3
      arch/powerpc/platforms/cell/spu_manage.c
  35. 2 2
      arch/powerpc/platforms/pasemi/cpufreq.c
  36. 7 7
      arch/powerpc/platforms/powermac/cpufreq_32.c
  37. 7 7
      arch/powerpc/platforms/powermac/cpufreq_64.c
  38. 1 1
      arch/powerpc/platforms/powermac/feature.c
  39. 1 1
      arch/powerpc/platforms/powermac/pfunc_core.c
  40. 6 6
      arch/powerpc/platforms/powermac/smp.c
  41. 4 4
      arch/powerpc/platforms/powermac/time.c
  42. 3 3
      arch/powerpc/platforms/powermac/udbg_scc.c
  43. 2 2
      arch/powerpc/sysdev/grackle.c
  44. 2 2
      arch/sparc/include/asm/stat.h
  45. 1 1
      arch/sparc/kernel/devices.c
  46. 4 0
      arch/sparc/kernel/kstack.h
  47. 2 2
      arch/sparc/kernel/of_device_32.c
  48. 1 1
      arch/sparc/kernel/of_device_64.c
  49. 7 0
      arch/sparc/kernel/pci.c
  50. 0 3
      arch/sparc/kernel/prom.h
  51. 3 15
      arch/sparc/kernel/prom_common.c
  52. 1 1
      arch/sparc/kernel/smp_64.c
  53. 4 2
      arch/sparc/kernel/tsb.S
  54. 2 0
      arch/x86/include/asm/processor.h
  55. 0 8
      arch/x86/kernel/acpi/boot.c
  56. 7 23
      arch/x86/kernel/hw_breakpoint.c
  57. 5 2
      arch/x86/kernel/ptrace.c
  58. 2 9
      block/blk-core.c
  59. 1 0
      drivers/acpi/dock.c
  60. 24 12
      drivers/acpi/processor_idle.c
  61. 14 0
      drivers/acpi/processor_pdc.c
  62. 5 1
      drivers/acpi/processor_perflib.c
  63. 22 5
      drivers/acpi/scan.c
  64. 2 2
      drivers/acpi/tables.c
  65. 10 2
      drivers/ata/ahci.c
  66. 2 0
      drivers/base/class.c
  67. 46 15
      drivers/block/virtio_blk.c
  68. 8 0
      drivers/char/Kconfig
  69. 1 1
      drivers/char/hvc_beat.c
  70. 1403 145
      drivers/char/virtio_console.c
  71. 1 1
      drivers/clocksource/cs5535-clockevt.c
  72. 45 2
      drivers/gpu/drm/drm_edid.c
  73. 9 21
      drivers/gpu/drm/i915/i915_drv.c
  74. 7 0
      drivers/gpu/drm/i915/intel_lvds.c
  75. 3 4
      drivers/gpu/drm/nouveau/nouveau_bios.c
  76. 1 1
      drivers/gpu/drm/nouveau/nouveau_bios.h
  77. 1 0
      drivers/gpu/drm/nouveau/nouveau_drv.h
  78. 72 41
      drivers/gpu/drm/nouveau/nouveau_mem.c
  79. 5 1
      drivers/gpu/drm/nouveau/nv04_dac.c
  80. 2 0
      drivers/gpu/drm/nouveau/nv17_tv.c
  81. 40 18
      drivers/gpu/drm/nouveau/nv50_instmem.c
  82. 1 1
      drivers/gpu/drm/radeon/atom.c
  83. 0 3
      drivers/gpu/drm/radeon/r600_blit_kms.c
  84. 6 3
      drivers/gpu/drm/radeon/r600_cp.c
  85. 5 4
      drivers/gpu/drm/radeon/radeon.h
  86. 9 0
      drivers/gpu/drm/radeon/radeon_atombios.c
  87. 22 22
      drivers/gpu/drm/radeon/radeon_combios.c
  88. 2 3
      drivers/gpu/drm/radeon/radeon_connectors.c
  89. 4 6
      drivers/gpu/drm/radeon/radeon_cs.c
  90. 2 1
      drivers/gpu/drm/radeon/radeon_drv.h
  91. 15 21
      drivers/gpu/drm/radeon/radeon_object.c
  92. 2 2
      drivers/gpu/drm/radeon/radeon_object.h
  93. 42 65
      drivers/gpu/drm/radeon/radeon_ring.c
  94. 6 3
      drivers/gpu/drm/radeon/rv770.c
  95. 11 7
      drivers/gpu/drm/ttm/ttm_tt.c
  96. 16 33
      drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
  97. 92 16
      drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
  98. 3 0
      drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
  99. 1 1
      drivers/gpu/vga/vgaarb.c
  100. 50 4
      drivers/hid/Kconfig

+ 0 - 1
Documentation/dontdiff

@@ -69,7 +69,6 @@ av_permissions.h
 bbootsect
 bin2c
 binkernel.spec
-binoffset
 bootsect
 bounds.h
 bsetup

+ 9 - 0
Documentation/kernel-parameters.txt

@@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
 			acpi_display_output=video
 			See above.
 
+	acpi_early_pdc_eval	[HW,ACPI] Evaluate processor _PDC methods
+				early. Needed on some platforms to properly
+				initialize the EC.
+
 	acpi_irq_balance [HW,ACPI]
 			ACPI will balance active IRQs
 			default in APIC mode
@@ -311,6 +315,11 @@ and is between 256 and 4096 characters. It is defined in the file
 	aic79xx=	[HW,SCSI]
 			See Documentation/scsi/aic79xx.txt.
 
+	alignment=	[KNL,ARM]
+			Allow the default userspace alignment fault handler
+			behaviour to be specified.  Bit 0 enables warnings,
+			bit 1 enables fixups, and bit 2 sends a segfault.
+
 	amd_iommu=	[HW,X86-84]
 			Pass parameters to the AMD IOMMU driver in the system.
 			Possible values are:

+ 0 - 1
Documentation/lguest/lguest.c

@@ -34,7 +34,6 @@
 #include <sys/uio.h>
 #include <termios.h>
 #include <getopt.h>
-#include <zlib.h>
 #include <assert.h>
 #include <sched.h>
 #include <limits.h>

+ 4 - 4
Documentation/networking/ip-sysctl.txt

@@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
 	Default: 5
 
 max_addresses - INTEGER
-	Number of maximum addresses per interface.  0 disables limitation.
-	It is recommended not set too large value (or 0) because it would
-	be too easy way to crash kernel to allow to create too much of
-	autoconfigured addresses.
+	Maximum number of autoconfigured addresses per interface.  Setting
+	to zero disables the limitation.  It is not recommended to set this
+	value too large (or to zero) because it would be an easy way to
+	crash the kernel by allowing too many addresses to be created.
 	Default: 16
 
 disable_ipv6 - BOOLEAN

+ 13 - 8
MAINTAINERS

@@ -616,10 +616,10 @@ M:	Richard Purdie <rpurdie@rpsys.net>
 S:	Maintained
 
 ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
-M:	Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+M:	Paulius Zaleckas <paulius.zaleckas@gmail.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:	git git://gitorious.org/linux-gemini/mainline.git
-S:	Maintained
+S:	Odd Fixes
 F:	arch/arm/mach-gemini/
 
 ARM/EBSA110 MACHINE SUPPORT
@@ -641,9 +641,9 @@ T:	topgit git://git.openezx.org/openezx.git
 F:	arch/arm/mach-pxa/ezx.c
 
 ARM/FARADAY FA526 PORT
-M:	Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+M:	Paulius Zaleckas <paulius.zaleckas@gmail.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:	Maintained
+S:	Odd Fixes
 F:	arch/arm/mm/*-fa*
 
 ARM/FOOTBRIDGE ARCHITECTURE
@@ -1733,10 +1733,9 @@ F:	include/linux/tfrc.h
 F:	net/dccp/
 
 DECnet NETWORK LAYER
-M:	Christine Caulfield <christine.caulfield@googlemail.com>
 W:	http://linux-decnet.sourceforge.net
 L:	linux-decnet-user@lists.sourceforge.net
-S:	Maintained
+S:	Orphan
 F:	Documentation/networking/decnet.txt
 F:	net/decnet/
 
@@ -2394,6 +2393,12 @@ L:	linuxppc-dev@ozlabs.org
 S:	Odd Fixes
 F:	drivers/char/hvc_*
 
+VIRTIO CONSOLE DRIVER
+M:	Amit Shah <amit.shah@redhat.com>
+L:	virtualization@lists.linux-foundation.org
+S:	Maintained
+F:	drivers/char/virtio_console.c
+
 GSPCA FINEPIX SUBDRIVER
 M:	Frank Zago <frank@zago.net>
 L:	linux-media@vger.kernel.org
@@ -3490,9 +3495,9 @@ S:	Maintained
 F:	drivers/net/wireless/libertas/
 
 MARVELL MV643XX ETHERNET DRIVER
-M:	Lennert Buytenhek <buytenh@marvell.com>
+M:	Lennert Buytenhek <buytenh@wantstofly.org>
 L:	netdev@vger.kernel.org
-S:	Supported
+S:	Maintained
 F:	drivers/net/mv643xx_eth.*
 F:	include/linux/mv643xx.h
 

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 33
-EXTRAVERSION = -rc8
+EXTRAVERSION =
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*

+ 2 - 1
arch/arm/include/asm/cacheflush.h

@@ -42,7 +42,8 @@
 #endif
 
 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
-    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
+    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
+    defined(CONFIG_CPU_ARM1026)
 # define MULTI_CACHE 1
 #endif
 

+ 1 - 0
arch/arm/kernel/setup.c

@@ -102,6 +102,7 @@ struct cpu_cache_fns cpu_cache;
 #endif
 #ifdef CONFIG_OUTER_CACHE
 struct outer_cache_fns outer_cache;
+EXPORT_SYMBOL(outer_cache);
 #endif
 
 struct stack {

+ 2 - 2
arch/arm/mach-gemini/gpio.c

@@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
 	unsigned int reg_both, reg_level, reg_type;
 
 	reg_type = __raw_readl(base + GPIO_INT_TYPE);
-	reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE);
+	reg_level = __raw_readl(base + GPIO_INT_LEVEL);
 	reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
 
 	switch (type) {
@@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
 	}
 
 	__raw_writel(reg_type, base + GPIO_INT_TYPE);
-	__raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE);
+	__raw_writel(reg_level, base + GPIO_INT_LEVEL);
 	__raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
 
 	gpio_ack_irq(irq);

+ 5 - 7
arch/arm/mach-omap2/mux.c

@@ -961,16 +961,14 @@ static void __init omap_mux_init_list(struct omap_mux *superset)
 	while (superset->reg_offset !=  OMAP_MUX_TERMINATOR) {
 		struct omap_mux *entry;
 
-#ifndef CONFIG_OMAP_MUX
-		/* Skip pins that are not muxed as GPIO by bootloader */
-		if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
+#ifdef CONFIG_OMAP_MUX
+		if (!superset->muxnames || !superset->muxnames[0]) {
 			superset++;
 			continue;
 		}
-#endif
-
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)
-		if (!superset->muxnames || !superset->muxnames[0]) {
+#else
+		/* Skip pins that are not muxed as GPIO by bootloader */
+		if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
 			superset++;
 			continue;
 		}

+ 3 - 0
arch/arm/mm/alignment.c

@@ -11,6 +11,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/moduleparam.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -77,6 +78,8 @@ static unsigned long ai_dword;
 static unsigned long ai_multi;
 static int ai_usermode;
 
+core_param(alignment, ai_usermode, int, 0600);
+
 #define UM_WARN		(1 << 0)
 #define UM_FIXUP	(1 << 1)
 #define UM_SIGNAL	(1 << 2)

+ 44 - 2
arch/arm/tools/mach-types

@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Thu Jan 28 22:15:54 2010
+# Last update: Sat Feb 20 14:16:15 2010
 #
 # machine_is_xxx	CONFIG_xxxx		MACH_TYPE_xxx		number
 #
@@ -2257,7 +2257,7 @@ oratisalog		MACH_ORATISALOG		ORATISALOG		2268
 oratismadi		MACH_ORATISMADI		ORATISMADI		2269
 oratisot16		MACH_ORATISOT16		ORATISOT16		2270
 oratisdesk		MACH_ORATISDESK		ORATISDESK		2271
-v2_ca9			MACH_V2P_CA9		V2P_CA9			2272
+vexpress		MACH_VEXPRESS		VEXPRESS		2272
 sintexo			MACH_SINTEXO		SINTEXO			2273
 cm3389			MACH_CM3389		CM3389			2274
 omap3_cio		MACH_OMAP3_CIO		OMAP3_CIO		2275
@@ -2636,3 +2636,45 @@ hw90240			MACH_HW90240		HW90240			2648
 dm365_leopard		MACH_DM365_LEOPARD	DM365_LEOPARD		2649
 mityomapl138		MACH_MITYOMAPL138	MITYOMAPL138		2650
 scat110			MACH_SCAT110		SCAT110			2651
+acer_a1			MACH_ACER_A1		ACER_A1			2652
+cmcontrol		MACH_CMCONTROL		CMCONTROL		2653
+pelco_lamar		MACH_PELCO_LAMAR	PELCO_LAMAR		2654
+rfp43			MACH_RFP43		RFP43			2655
+sk86r0301		MACH_SK86R0301		SK86R0301		2656
+ctpxa			MACH_CTPXA		CTPXA			2657
+epb_arm9_a		MACH_EPB_ARM9_A		EPB_ARM9_A		2658
+guruplug		MACH_GURUPLUG		GURUPLUG		2659
+spear310		MACH_SPEAR310		SPEAR310		2660
+spear320		MACH_SPEAR320		SPEAR320		2661
+robotx			MACH_ROBOTX		ROBOTX			2662
+lsxhl			MACH_LSXHL		LSXHL			2663
+smartlite		MACH_SMARTLITE		SMARTLITE		2664
+cws2			MACH_CWS2		CWS2			2665
+m619			MACH_M619		M619			2666
+smartview		MACH_SMARTVIEW		SMARTVIEW		2667
+lsa_salsa		MACH_LSA_SALSA		LSA_SALSA		2668
+kizbox			MACH_KIZBOX		KIZBOX			2669
+htccharmer		MACH_HTCCHARMER		HTCCHARMER		2670
+guf_neso_lt		MACH_GUF_NESO_LT	GUF_NESO_LT		2671
+pm9g45			MACH_PM9G45		PM9G45			2672
+htcpanther		MACH_HTCPANTHER		HTCPANTHER		2673
+htcpanther_cdma		MACH_HTCPANTHER_CDMA	HTCPANTHER_CDMA		2674
+reb01			MACH_REB01		REB01			2675
+aquila			MACH_AQUILA		AQUILA			2676
+spark_sls_hw2		MACH_SPARK_SLS_HW2	SPARK_SLS_HW2		2677
+sheeva_esata		MACH_ESATA_SHEEVAPLUG	ESATA_SHEEVAPLUG	2678
+surf7x30		MACH_SURF7X30		SURF7X30		2679
+micro2440		MACH_MICRO2440		MICRO2440		2680
+am2440			MACH_AM2440		AM2440			2681
+tq2440			MACH_TQ2440		TQ2440			2682
+lpc2478oem		MACH_LPC2478OEM		LPC2478OEM		2683
+ak880x			MACH_AK880X		AK880X			2684
+cobra3530		MACH_COBRA3530		COBRA3530		2685
+pmppb			MACH_PMPPB		PMPPB			2686
+u6715			MACH_U6715		U6715			2687
+axar1500_sender		MACH_AXAR1500_SENDER	AXAR1500_SENDER		2688
+g30_dvb			MACH_G30_DVB		G30_DVB			2689
+vc088x			MACH_VC088X		VC088X			2690
+mioa702			MACH_MIOA702		MIOA702			2691
+hpmin			MACH_HPMIN		HPMIN			2692
+ak880xak		MACH_AK880XAK		AK880XAK		2693

+ 1 - 0
arch/ia64/include/asm/acpi.h

@@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
 #define acpi_noirq 0	/* ACPI always enabled on IA64 */
 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
 #define acpi_strict 1	/* no ACPI spec workarounds on IA64 */
+#define acpi_ht 0	/* no HT-only mode on IA64 */
 #endif
 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
 static inline void disable_acpi(void) { }

+ 1 - 1
arch/ia64/sn/kernel/setup.c

@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
 DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
 EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
 
-DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
+DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
 EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
 
 DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);

+ 1 - 0
arch/microblaze/Kconfig

@@ -130,6 +130,7 @@ config CMDLINE_FORCE
 
 config OF
 	def_bool y
+	select OF_FLATTREE
 
 config PROC_DEVICETREE
 	bool "Support for device tree in /proc"

+ 1 - 1
arch/microblaze/include/asm/io.h

@@ -217,7 +217,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
  * Little endian
  */
 
-#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a));
+#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
 #define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
 
 #define in_le32(a) __le32_to_cpu(__raw_readl(a))

+ 0 - 20
arch/microblaze/include/asm/prom.h

@@ -26,31 +26,11 @@
 #include <asm/irq.h>
 #include <asm/atomic.h>
 
-#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT	1
-#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT	1
-
-#define of_compat_cmp(s1, s2, l)	strncasecmp((s1), (s2), (l))
-#define of_prop_cmp(s1, s2)		strcmp((s1), (s2))
-#define of_node_cmp(s1, s2)		strcasecmp((s1), (s2))
-
-extern struct device_node *of_chosen;
-
 #define HAVE_ARCH_DEVTREE_FIXUPS
 
-extern struct device_node *allnodes;	/* temporary while merging */
-extern rwlock_t devtree_lock;	/* temporary while merging */
-
-/* For updating the device tree at runtime */
-extern void of_attach_node(struct device_node *);
-extern void of_detach_node(struct device_node *);
-
 /* Other Prototypes */
 extern int early_uartlite_console(void);
 
-extern struct resource *request_OF_resource(struct device_node *node,
-				int index, const char *name_postfix);
-extern int release_OF_resource(struct device_node *node, int index);
-
 /*
  * OF address retreival & translation
  */

+ 8 - 19
arch/microblaze/kernel/cpu/cache.c

@@ -172,16 +172,15 @@ do {									\
 /* It is used only first parameter for OP - for wic, wdc */
 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)			\
 do {									\
-	int step = -line_length;					\
-	int count = end - start;					\
-	BUG_ON(count <= 0);						\
+	int volatile temp;						\
+	BUG_ON(end - start <= 0);					\
 									\
-	__asm__ __volatile__ (" 1:	addk	%0, %0, %1;		\
-					" #op " %0, r0;			\
-					bgtid   %1, 1b;			\
-					addk    %1, %1, %2;		\
-					" : : "r" (start), "r" (count),	\
-					"r" (step) : "memory");		\
+	__asm__ __volatile__ (" 1:	" #op " %1, r0;			\
+					cmpu	%0, %1, %2;		\
+					bgtid	%0, 1b;			\
+					addk	%1, %1, %3;		\
+				" : : "r" (temp), "r" (start), "r" (end),\
+					"r" (line_length) : "memory");	\
 } while (0);
 
 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
@@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
 	pr_debug("%s\n", __func__);
 	CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
 					wdc.clear)
-
-#if 0
-	unsigned int i;
-
-	pr_debug("%s\n", __func__);
-
-	/* Just loop through cache size and invalidate it */
-	for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
-			__invalidate_dcache(0, i);
-#endif
 }
 
 static void __invalidate_dcache_range_wb(unsigned long start,

+ 1 - 1
arch/microblaze/kernel/of_platform.c

@@ -185,7 +185,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
 static int of_dev_phandle_match(struct device *dev, void *data)
 {
 	phandle *ph = data;
-	return to_of_device(dev)->node->linux_phandle == *ph;
+	return to_of_device(dev)->node->phandle == *ph;
 }
 
 struct of_device *of_find_device_by_phandle(phandle ph)

+ 13 - 977
arch/microblaze/kernel/prom.c

@@ -42,698 +42,21 @@
 #include <asm/sections.h>
 #include <asm/pci-bridge.h>
 
-static int __initdata dt_root_addr_cells;
-static int __initdata dt_root_size_cells;
-
-typedef u32 cell_t;
-
-static struct boot_param_header *initial_boot_params;
-
-/* export that to outside world */
-struct device_node *of_chosen;
-
-static inline char *find_flat_dt_string(u32 offset)
-{
-	return ((char *)initial_boot_params) +
-		initial_boot_params->off_dt_strings + offset;
-}
-
-/**
- * This function is used to scan the flattened device-tree, it is
- * used to extract the memory informations at boot before we can
- * unflatten the tree
- */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
-				     const char *uname, int depth,
-				     void *data),
-			   void *data)
-{
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	int rc = 0;
-	int depth = -1;
-
-	do {
-		u32 tag = *((u32 *)p);
-		char *pathp;
-
-		p += 4;
-		if (tag == OF_DT_END_NODE) {
-			depth--;
-			continue;
-		}
-		if (tag == OF_DT_NOP)
-			continue;
-		if (tag == OF_DT_END)
-			break;
-		if (tag == OF_DT_PROP) {
-			u32 sz = *((u32 *)p);
-			p += 8;
-			if (initial_boot_params->version < 0x10)
-				p = _ALIGN(p, sz >= 8 ? 8 : 4);
-			p += sz;
-			p = _ALIGN(p, 4);
-			continue;
-		}
-		if (tag != OF_DT_BEGIN_NODE) {
-			printk(KERN_WARNING "Invalid tag %x scanning flattened"
-				" device tree !\n", tag);
-			return -EINVAL;
-		}
-		depth++;
-		pathp = (char *)p;
-		p = _ALIGN(p + strlen(pathp) + 1, 4);
-		if ((*pathp) == '/') {
-			char *lp, *np;
-			for (lp = NULL, np = pathp; *np; np++)
-				if ((*np) == '/')
-					lp = np+1;
-			if (lp != NULL)
-				pathp = lp;
-		}
-		rc = it(p, pathp, depth, data);
-		if (rc != 0)
-			break;
-	} while (1);
-
-	return rc;
-}
-
-unsigned long __init of_get_flat_dt_root(void)
-{
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-
-	while (*((u32 *)p) == OF_DT_NOP)
-		p += 4;
-	BUG_ON(*((u32 *)p) != OF_DT_BEGIN_NODE);
-	p += 4;
-	return _ALIGN(p + strlen((char *)p) + 1, 4);
-}
-
-/**
- * This function can be used within scan_flattened_dt callback to get
- * access to properties
- */
-void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
-				unsigned long *size)
-{
-	unsigned long p = node;
-
-	do {
-		u32 tag = *((u32 *)p);
-		u32 sz, noff;
-		const char *nstr;
-
-		p += 4;
-		if (tag == OF_DT_NOP)
-			continue;
-		if (tag != OF_DT_PROP)
-			return NULL;
-
-		sz = *((u32 *)p);
-		noff = *((u32 *)(p + 4));
-		p += 8;
-		if (initial_boot_params->version < 0x10)
-			p = _ALIGN(p, sz >= 8 ? 8 : 4);
-
-		nstr = find_flat_dt_string(noff);
-		if (nstr == NULL) {
-			printk(KERN_WARNING "Can't find property index"
-				" name !\n");
-			return NULL;
-		}
-		if (strcmp(name, nstr) == 0) {
-			if (size)
-				*size = sz;
-			return (void *)p;
-		}
-		p += sz;
-		p = _ALIGN(p, 4);
-	} while (1);
-}
-
-int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
-{
-	const char *cp;
-	unsigned long cplen, l;
-
-	cp = of_get_flat_dt_prop(node, "compatible", &cplen);
-	if (cp == NULL)
-		return 0;
-	while (cplen > 0) {
-		if (strncasecmp(cp, compat, strlen(compat)) == 0)
-			return 1;
-		l = strlen(cp) + 1;
-		cp += l;
-		cplen -= l;
-	}
-
-	return 0;
-}
-
-static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
-					unsigned long align)
-{
-	void *res;
-
-	*mem = _ALIGN(*mem, align);
-	res = (void *)*mem;
-	*mem += size;
-
-	return res;
-}
-
-static unsigned long __init unflatten_dt_node(unsigned long mem,
-					unsigned long *p,
-					struct device_node *dad,
-					struct device_node ***allnextpp,
-					unsigned long fpsize)
-{
-	struct device_node *np;
-	struct property *pp, **prev_pp = NULL;
-	char *pathp;
-	u32 tag;
-	unsigned int l, allocl;
-	int has_name = 0;
-	int new_format = 0;
-
-	tag = *((u32 *)(*p));
-	if (tag != OF_DT_BEGIN_NODE) {
-		printk("Weird tag at start of node: %x\n", tag);
-		return mem;
-	}
-	*p += 4;
-	pathp = (char *)*p;
-	l = allocl = strlen(pathp) + 1;
-	*p = _ALIGN(*p + l, 4);
-
-	/* version 0x10 has a more compact unit name here instead of the full
-	 * path. we accumulate the full path size using "fpsize", we'll rebuild
-	 * it later. We detect this because the first character of the name is
-	 * not '/'.
-	 */
-	if ((*pathp) != '/') {
-		new_format = 1;
-		if (fpsize == 0) {
-			/* root node: special case. fpsize accounts for path
-			 * plus terminating zero. root node only has '/', so
-			 * fpsize should be 2, but we want to avoid the first
-			 * level nodes to have two '/' so we use fpsize 1 here
-			 */
-			fpsize = 1;
-			allocl = 2;
-		} else {
-			/* account for '/' and path size minus terminal 0
-			 * already in 'l'
-			 */
-			fpsize += l;
-			allocl = fpsize;
-		}
-	}
-
-	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
-				__alignof__(struct device_node));
-	if (allnextpp) {
-		memset(np, 0, sizeof(*np));
-		np->full_name = ((char *)np) + sizeof(struct device_node);
-		if (new_format) {
-			char *p2 = np->full_name;
-			/* rebuild full path for new format */
-			if (dad && dad->parent) {
-				strcpy(p2, dad->full_name);
-#ifdef DEBUG
-				if ((strlen(p2) + l + 1) != allocl) {
-					pr_debug("%s: p: %d, l: %d, a: %d\n",
-						pathp, (int)strlen(p2),
-						l, allocl);
-				}
-#endif
-				p2 += strlen(p2);
-			}
-			*(p2++) = '/';
-			memcpy(p2, pathp, l);
-		} else
-			memcpy(np->full_name, pathp, l);
-		prev_pp = &np->properties;
-		**allnextpp = np;
-		*allnextpp = &np->allnext;
-		if (dad != NULL) {
-			np->parent = dad;
-			/* we temporarily use the next field as `last_child'*/
-			if (dad->next == NULL)
-				dad->child = np;
-			else
-				dad->next->sibling = np;
-			dad->next = np;
-		}
-		kref_init(&np->kref);
-	}
-	while (1) {
-		u32 sz, noff;
-		char *pname;
-
-		tag = *((u32 *)(*p));
-		if (tag == OF_DT_NOP) {
-			*p += 4;
-			continue;
-		}
-		if (tag != OF_DT_PROP)
-			break;
-		*p += 4;
-		sz = *((u32 *)(*p));
-		noff = *((u32 *)((*p) + 4));
-		*p += 8;
-		if (initial_boot_params->version < 0x10)
-			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
-
-		pname = find_flat_dt_string(noff);
-		if (pname == NULL) {
-			printk(KERN_INFO
-				"Can't find property name in list !\n");
-			break;
-		}
-		if (strcmp(pname, "name") == 0)
-			has_name = 1;
-		l = strlen(pname) + 1;
-		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
-					__alignof__(struct property));
-		if (allnextpp) {
-			if (strcmp(pname, "linux,phandle") == 0) {
-				np->node = *((u32 *)*p);
-				if (np->linux_phandle == 0)
-					np->linux_phandle = np->node;
-			}
-			if (strcmp(pname, "ibm,phandle") == 0)
-				np->linux_phandle = *((u32 *)*p);
-			pp->name = pname;
-			pp->length = sz;
-			pp->value = (void *)*p;
-			*prev_pp = pp;
-			prev_pp = &pp->next;
-		}
-		*p = _ALIGN((*p) + sz, 4);
-	}
-	/* with version 0x10 we may not have the name property, recreate
-	 * it here from the unit name if absent
-	 */
-	if (!has_name) {
-		char *p1 = pathp, *ps = pathp, *pa = NULL;
-		int sz;
-
-		while (*p1) {
-			if ((*p1) == '@')
-				pa = p1;
-			if ((*p1) == '/')
-				ps = p1 + 1;
-			p1++;
-		}
-		if (pa < ps)
-			pa = p1;
-		sz = (pa - ps) + 1;
-		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
-					__alignof__(struct property));
-		if (allnextpp) {
-			pp->name = "name";
-			pp->length = sz;
-			pp->value = pp + 1;
-			*prev_pp = pp;
-			prev_pp = &pp->next;
-			memcpy(pp->value, ps, sz - 1);
-			((char *)pp->value)[sz - 1] = 0;
-			pr_debug("fixed up name for %s -> %s\n", pathp,
-				(char *)pp->value);
-		}
-	}
-	if (allnextpp) {
-		*prev_pp = NULL;
-		np->name = of_get_property(np, "name", NULL);
-		np->type = of_get_property(np, "device_type", NULL);
-
-		if (!np->name)
-			np->name = "<NULL>";
-		if (!np->type)
-			np->type = "<NULL>";
-	}
-	while (tag == OF_DT_BEGIN_NODE) {
-		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
-		tag = *((u32 *)(*p));
-	}
-	if (tag != OF_DT_END_NODE) {
-		printk(KERN_INFO "Weird tag at end of node: %x\n", tag);
-		return mem;
-	}
-	*p += 4;
-	return mem;
-}
-
-/**
- * unflattens the device-tree passed by the firmware, creating the
- * tree of struct device_node. It also fills the "name" and "type"
- * pointers of the nodes so the normal device-tree walking functions
- * can be used (this used to be done by finish_device_tree)
- */
-void __init unflatten_device_tree(void)
-{
-	unsigned long start, mem, size;
-	struct device_node **allnextp = &allnodes;
-
-	pr_debug(" -> unflatten_device_tree()\n");
-
-	/* First pass, scan for size */
-	start = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
-	size = (size | 3) + 1;
-
-	pr_debug("  size is %lx, allocating...\n", size);
-
-	/* Allocate memory for the expanded device tree */
-	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
-	mem = (unsigned long) __va(mem);
-
-	((u32 *)mem)[size / 4] = 0xdeadbeef;
-
-	pr_debug("  unflattening %lx...\n", mem);
-
-	/* Second pass, do actual unflattening */
-	start = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
-	if (*((u32 *)start) != OF_DT_END)
-		printk(KERN_WARNING "Weird tag at end of tree: %08x\n",
-			*((u32 *)start));
-	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
-		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
-			((u32 *)mem)[size / 4]);
-	*allnextp = NULL;
-
-	/* Get pointer to OF "/chosen" node for use everywhere */
-	of_chosen = of_find_node_by_path("/chosen");
-	if (of_chosen == NULL)
-		of_chosen = of_find_node_by_path("/chosen@0");
-
-	pr_debug(" <- unflatten_device_tree()\n");
-}
-
-#define early_init_dt_scan_drconf_memory(node) 0
-
-static int __init early_init_dt_scan_cpus(unsigned long node,
-					  const char *uname, int depth,
-					  void *data)
-{
-	static int logical_cpuid;
-	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-	const u32 *intserv;
-	int i, nthreads;
-	int found = 0;
-
-	/* We are scanning "cpu" nodes only */
-	if (type == NULL || strcmp(type, "cpu") != 0)
-		return 0;
-
-	/* Get physical cpuid */
-	intserv = of_get_flat_dt_prop(node, "reg", NULL);
-	nthreads = 1;
-
-	/*
-	 * Now see if any of these threads match our boot cpu.
-	 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
-	 */
-	for (i = 0; i < nthreads; i++) {
-		/*
-		 * version 2 of the kexec param format adds the phys cpuid of
-		 * booted proc.
-		 */
-		if (initial_boot_params && initial_boot_params->version >= 2) {
-			if (intserv[i] ==
-					initial_boot_params->boot_cpuid_phys) {
-				found = 1;
-				break;
-			}
-		} else {
-			/*
-			 * Check if it's the boot-cpu, set it's hw index now,
-			 * unfortunately this format did not support booting
-			 * off secondary threads.
-			 */
-			if (of_get_flat_dt_prop(node,
-					"linux,boot-cpu", NULL) != NULL) {
-				found = 1;
-				break;
-			}
-		}
-
-#ifdef CONFIG_SMP
-		/* logical cpu id is always 0 on UP kernels */
-		logical_cpuid++;
-#endif
-	}
-
-	if (found) {
-		pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid,
-			intserv[i]);
-		boot_cpuid = logical_cpuid;
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init early_init_dt_check_for_initrd(unsigned long node)
-{
-	unsigned long l;
-	u32 *prop;
-
-	pr_debug("Looking for initrd properties... ");
-
-	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
-	if (prop) {
-		initrd_start = (unsigned long)
-					__va((u32)of_read_ulong(prop, l/4));
-
-		prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
-		if (prop) {
-			initrd_end = (unsigned long)
-					__va((u32)of_read_ulong(prop, 1/4));
-			initrd_below_start_ok = 1;
-		} else {
-			initrd_start = 0;
-		}
-	}
-
-	pr_debug("initrd_start=0x%lx  initrd_end=0x%lx\n",
-					initrd_start, initrd_end);
-}
-#else
-static inline void early_init_dt_check_for_initrd(unsigned long node)
-{
-}
-#endif /* CONFIG_BLK_DEV_INITRD */
-
-static int __init early_init_dt_scan_chosen(unsigned long node,
-				const char *uname, int depth, void *data)
-{
-	unsigned long l;
-	char *p;
-
-	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
-
-	if (depth != 1 ||
-		(strcmp(uname, "chosen") != 0 &&
-				strcmp(uname, "chosen@0") != 0))
-		return 0;
-
-#ifdef CONFIG_KEXEC
-	lprop = (u64 *)of_get_flat_dt_prop(node,
-				"linux,crashkernel-base", NULL);
-	if (lprop)
-		crashk_res.start = *lprop;
-
-	lprop = (u64 *)of_get_flat_dt_prop(node,
-				"linux,crashkernel-size", NULL);
-	if (lprop)
-		crashk_res.end = crashk_res.start + *lprop - 1;
-#endif
-
-	early_init_dt_check_for_initrd(node);
-
-	/* Retreive command line */
-	p = of_get_flat_dt_prop(node, "bootargs", &l);
-	if (p != NULL && l > 0)
-		strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
-
-#ifdef CONFIG_CMDLINE
-#ifndef CONFIG_CMDLINE_FORCE
-	if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
-#endif
-		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
-
-	pr_debug("Command line is: %s\n", cmd_line);
-
-	/* break now */
-	return 1;
-}
-
-static int __init early_init_dt_scan_root(unsigned long node,
-				const char *uname, int depth, void *data)
-{
-	u32 *prop;
-
-	if (depth != 0)
-		return 0;
-
-	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
-	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
-	pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
-
-	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
-	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
-	pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
-
-	/* break now */
-	return 1;
-}
-
-static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
+void __init early_init_dt_scan_chosen_arch(unsigned long node)
 {
-	cell_t *p = *cellp;
-
-	*cellp = p + s;
-	return of_read_number(p, s);
+	/* No Microblaze specific code here */
 }
 
-static int __init early_init_dt_scan_memory(unsigned long node,
-				const char *uname, int depth, void *data)
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-	cell_t *reg, *endp;
-	unsigned long l;
-
-	/* Look for the ibm,dynamic-reconfiguration-memory node */
-/*	if (depth == 1 &&
-		strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
-		return early_init_dt_scan_drconf_memory(node);
-*/
-	/* We are scanning "memory" nodes only */
-	if (type == NULL) {
-		/*
-		 * The longtrail doesn't have a device_type on the
-		 * /memory node, so look for the node called /memory@0.
-		 */
-		if (depth != 1 || strcmp(uname, "memory@0") != 0)
-			return 0;
-	} else if (strcmp(type, "memory") != 0)
-		return 0;
-
-	reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
-	if (reg == NULL)
-		reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
-	if (reg == NULL)
-		return 0;
-
-	endp = reg + (l / sizeof(cell_t));
-
-	pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
-		uname, l, reg[0], reg[1], reg[2], reg[3]);
-
-	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
-		u64 base, size;
-
-		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
-		size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
-		if (size == 0)
-			continue;
-		pr_debug(" - %llx ,  %llx\n", (unsigned long long)base,
-			(unsigned long long)size);
-
-		lmb_add(base, size);
-	}
-	return 0;
+	lmb_add(base, size);
 }
 
-#ifdef CONFIG_PHYP_DUMP
-/**
- * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
- *
- * Function to find the largest size we need to reserve
- * during early boot process.
- *
- * It either looks for boot param and returns that OR
- * returns larger of 256 or 5% rounded down to multiples of 256MB.
- *
- */
-static inline unsigned long phyp_dump_calculate_reserve_size(void)
+u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 {
-	unsigned long tmp;
-
-	if (phyp_dump_info->reserve_bootvar)
-		return phyp_dump_info->reserve_bootvar;
-
-	/* divide by 20 to get 5% of value */
-	tmp = lmb_end_of_DRAM();
-	do_div(tmp, 20);
-
-	/* round it down in multiples of 256 */
-	tmp = tmp & ~0x0FFFFFFFUL;
-
-	return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
+	return lmb_alloc(size, align);
 }
 
-/**
- * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
- *
- * This routine may reserve memory regions in the kernel only
- * if the system is supported and a dump was taken in last
- * boot instance or if the hardware is supported and the
- * scratch area needs to be setup. In other instances it returns
- * without reserving anything. The memory in case of dump being
- * active is freed when the dump is collected (by userland tools).
- */
-static void __init phyp_dump_reserve_mem(void)
-{
-	unsigned long base, size;
-	unsigned long variable_reserve_size;
-
-	if (!phyp_dump_info->phyp_dump_configured) {
-		printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
-		return;
-	}
-
-	if (!phyp_dump_info->phyp_dump_at_boot) {
-		printk(KERN_INFO "Phyp-dump disabled at boot time\n");
-		return;
-	}
-
-	variable_reserve_size = phyp_dump_calculate_reserve_size();
-
-	if (phyp_dump_info->phyp_dump_is_active) {
-		/* Reserve *everything* above RMR.Area freed by userland tools*/
-		base = variable_reserve_size;
-		size = lmb_end_of_DRAM() - base;
-
-		/* XXX crashed_ram_end is wrong, since it may be beyond
-		 * the memory_limit, it will need to be adjusted. */
-		lmb_reserve(base, size);
-
-		phyp_dump_info->init_reserve_start = base;
-		phyp_dump_info->init_reserve_size = size;
-	} else {
-		size = phyp_dump_info->cpu_state_size +
-			phyp_dump_info->hpte_region_size +
-			variable_reserve_size;
-		base = lmb_end_of_DRAM() - size;
-		lmb_reserve(base, size);
-		phyp_dump_info->init_reserve_start = base;
-		phyp_dump_info->init_reserve_size = size;
-	}
-}
-#else
-static inline void __init phyp_dump_reserve_mem(void) {}
-#endif /* CONFIG_PHYP_DUMP  && CONFIG_PPC_RTAS */
-
 #ifdef CONFIG_EARLY_PRINTK
 /* MS this is Microblaze specifig function */
 static int __init early_init_dt_scan_serial(unsigned long node,
@@ -775,11 +98,6 @@ void __init early_init_devtree(void *params)
 	/* Setup flat device-tree pointer */
 	initial_boot_params = params;
 
-#ifdef CONFIG_PHYP_DUMP
-	/* scan tree to see if dump occured during last boot */
-	of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
-#endif
-
 	/* Retrieve various informations from the /chosen node of the
 	 * device-tree, including the platform type, initrd location and
 	 * size, TCE reserve, and more ...
@@ -799,33 +117,18 @@ void __init early_init_devtree(void *params)
 
 	pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
 
-	pr_debug("Scanning CPUs ...\n");
-
-	/* Retreive CPU related informations from the flat tree
-	 * (altivec support, boot CPU ID, ...)
-	 */
-	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
-
 	pr_debug(" <- early_init_devtree()\n");
 }
 
-/**
- * Indicates whether the root node has a given value in its
- * compatible property.
- */
-int machine_is_compatible(const char *compat)
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+		unsigned long end)
 {
-	struct device_node *root;
-	int rc = 0;
-
-	root = of_find_node_by_path("/");
-	if (root) {
-		rc = of_device_is_compatible(root, compat);
-		of_node_put(root);
-	}
-	return rc;
+	initrd_start = (unsigned long)__va(start);
+	initrd_end = (unsigned long)__va(end);
+	initrd_below_start_ok = 1;
 }
-EXPORT_SYMBOL(machine_is_compatible);
+#endif
 
 /*******
  *
@@ -838,273 +141,6 @@ EXPORT_SYMBOL(machine_is_compatible);
  *
  *******/
 
-/**
- *	of_find_node_by_phandle - Find a node given a phandle
- *	@handle:	phandle of the node to find
- *
- *	Returns a node pointer with refcount incremented, use
- *	of_node_put() on it when done.
- */
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
-	struct device_node *np;
-
-	read_lock(&devtree_lock);
-	for (np = allnodes; np != NULL; np = np->allnext)
-		if (np->linux_phandle == handle)
-			break;
-	of_node_get(np);
-	read_unlock(&devtree_lock);
-	return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
-/**
- *	of_node_get - Increment refcount of a node
- *	@node:	Node to inc refcount, NULL is supported to
- *		simplify writing of callers
- *
- *	Returns node.
- */
-struct device_node *of_node_get(struct device_node *node)
-{
-	if (node)
-		kref_get(&node->kref);
-	return node;
-}
-EXPORT_SYMBOL(of_node_get);
-
-static inline struct device_node *kref_to_device_node(struct kref *kref)
-{
-	return container_of(kref, struct device_node, kref);
-}
-
-/**
- *	of_node_release - release a dynamically allocated node
- *	@kref:  kref element of the node to be released
- *
- *	In of_node_put() this function is passed to kref_put()
- *	as the destructor.
- */
-static void of_node_release(struct kref *kref)
-{
-	struct device_node *node = kref_to_device_node(kref);
-	struct property *prop = node->properties;
-
-	/* We should never be releasing nodes that haven't been detached. */
-	if (!of_node_check_flag(node, OF_DETACHED)) {
-		printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n",
-			node->full_name);
-		dump_stack();
-		kref_init(&node->kref);
-		return;
-	}
-
-	if (!of_node_check_flag(node, OF_DYNAMIC))
-		return;
-
-	while (prop) {
-		struct property *next = prop->next;
-		kfree(prop->name);
-		kfree(prop->value);
-		kfree(prop);
-		prop = next;
-
-		if (!prop) {
-			prop = node->deadprops;
-			node->deadprops = NULL;
-		}
-	}
-	kfree(node->full_name);
-	kfree(node->data);
-	kfree(node);
-}
-
-/**
- *	of_node_put - Decrement refcount of a node
- *	@node:	Node to dec refcount, NULL is supported to
- *		simplify writing of callers
- *
- */
-void of_node_put(struct device_node *node)
-{
-	if (node)
-		kref_put(&node->kref, of_node_release);
-}
-EXPORT_SYMBOL(of_node_put);
-
-/*
- * Plug a device node into the tree and global list.
- */
-void of_attach_node(struct device_node *np)
-{
-	unsigned long flags;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	np->sibling = np->parent->child;
-	np->allnext = allnodes;
-	np->parent->child = np;
-	allnodes = np;
-	write_unlock_irqrestore(&devtree_lock, flags);
-}
-
-/*
- * "Unplug" a node from the device tree.  The caller must hold
- * a reference to the node.  The memory associated with the node
- * is not freed until its refcount goes to zero.
- */
-void of_detach_node(struct device_node *np)
-{
-	struct device_node *parent;
-	unsigned long flags;
-
-	write_lock_irqsave(&devtree_lock, flags);
-
-	parent = np->parent;
-	if (!parent)
-		goto out_unlock;
-
-	if (allnodes == np)
-		allnodes = np->allnext;
-	else {
-		struct device_node *prev;
-		for (prev = allnodes;
-		     prev->allnext != np;
-		     prev = prev->allnext)
-			;
-		prev->allnext = np->allnext;
-	}
-
-	if (parent->child == np)
-		parent->child = np->sibling;
-	else {
-		struct device_node *prevsib;
-		for (prevsib = np->parent->child;
-		     prevsib->sibling != np;
-		     prevsib = prevsib->sibling)
-			;
-		prevsib->sibling = np->sibling;
-	}
-
-	of_node_set_flag(np, OF_DETACHED);
-
-out_unlock:
-	write_unlock_irqrestore(&devtree_lock, flags);
-}
-
-/*
- * Add a property to a node
- */
-int prom_add_property(struct device_node *np, struct property *prop)
-{
-	struct property **next;
-	unsigned long flags;
-
-	prop->next = NULL;
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (strcmp(prop->name, (*next)->name) == 0) {
-			/* duplicate ! don't insert it */
-			write_unlock_irqrestore(&devtree_lock, flags);
-			return -1;
-		}
-		next = &(*next)->next;
-	}
-	*next = prop;
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to add to proc as well if it was initialized */
-	if (np->pde)
-		proc_device_tree_add_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-/*
- * Remove a property from a node.  Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property.  Instead we just move the property
- * to the "dead properties" list, so it won't be found any more.
- */
-int prom_remove_property(struct device_node *np, struct property *prop)
-{
-	struct property **next;
-	unsigned long flags;
-	int found = 0;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (*next == prop) {
-			/* found the node */
-			*next = prop->next;
-			prop->next = np->deadprops;
-			np->deadprops = prop;
-			found = 1;
-			break;
-		}
-		next = &(*next)->next;
-	}
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-	if (!found)
-		return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to remove the proc node as well */
-	if (np->pde)
-		proc_device_tree_remove_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-/*
- * Update a property in a node.  Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property.  Instead we just move the property
- * to the "dead properties" list, and add the new property to the
- * property list
- */
-int prom_update_property(struct device_node *np,
-			 struct property *newprop,
-			 struct property *oldprop)
-{
-	struct property **next;
-	unsigned long flags;
-	int found = 0;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (*next == oldprop) {
-			/* found the node */
-			newprop->next = oldprop->next;
-			*next = newprop;
-			oldprop->next = np->deadprops;
-			np->deadprops = oldprop;
-			found = 1;
-			break;
-		}
-		next = &(*next)->next;
-	}
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-	if (!found)
-		return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to add to proc as well if it was initialized */
-	if (np->pde)
-		proc_device_tree_update_prop(np->pde, newprop, oldprop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
 static struct debugfs_blob_wrapper flat_dt_blob;
 

+ 8 - 0
arch/mips/bcm47xx/prom.c

@@ -141,6 +141,14 @@ static __init void prom_init_mem(void)
 			break;
 	}
 
+	/* Ignoring the last page when ddr size is 128M. Cached
+	 * accesses to last page is causing the processor to prefetch
+	 * using address above 128M stepping out of the ddr address
+	 * space.
+	 */
+	if (mem == 0x8000000)
+		mem -= 0x1000;
+
 	add_memory_region(0, mem, BOOT_MEM_RAM);
 }
 

+ 1 - 0
arch/mips/mm/highmem.c

@@ -1,5 +1,6 @@
 #include <linux/module.h>
 #include <linux/highmem.h>
+#include <linux/sched.h>
 #include <linux/smp.h>
 #include <asm/fixmap.h>
 #include <asm/tlbflush.h>

+ 0 - 1
arch/parisc/Kconfig

@@ -18,7 +18,6 @@ config PARISC
 	select BUG
 	select HAVE_PERF_EVENTS
 	select GENERIC_ATOMIC64 if !64BIT
-	select HAVE_ARCH_TRACEHOOK
 	help
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
 	  in many of their workstations & servers (HP9000 700 and 800 series,

+ 5 - 2
arch/parisc/kernel/pci.c

@@ -18,7 +18,6 @@
 
 #include <asm/io.h>
 #include <asm/system.h>
-#include <asm/cache.h>		/* for L1_CACHE_BYTES */
 #include <asm/superio.h>
 
 #define DEBUG_RESOURCES 0
@@ -123,6 +122,10 @@ static int __init pcibios_init(void)
 	} else {
 		printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
 	}
+
+	/* Set the CLS for PCI as early as possible. */
+	pci_cache_line_size = pci_dfl_cache_line_size;
+
 	return 0;
 }
 
@@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
 	** upper byte is PCI_LATENCY_TIMER.
 	*/
 	pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
-				(0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
+			      (0x80 << 8) | pci_cache_line_size);
 }
 
 

+ 1 - 0
arch/powerpc/Kconfig

@@ -173,6 +173,7 @@ config PPC_OF
 
 config OF
 	def_bool y
+	select OF_FLATTREE
 
 config PPC_UDBG_16550
 	bool

+ 0 - 18
arch/powerpc/include/asm/prom.h

@@ -23,21 +23,8 @@
 #include <asm/irq.h>
 #include <asm/atomic.h>
 
-#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT	1
-#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT	1
-
-#define of_compat_cmp(s1, s2, l)	strcasecmp((s1), (s2))
-#define of_prop_cmp(s1, s2)		strcmp((s1), (s2))
-#define of_node_cmp(s1, s2)		strcasecmp((s1), (s2))
-
-extern struct device_node *of_chosen;
-
 #define HAVE_ARCH_DEVTREE_FIXUPS
 
-/* For updating the device tree at runtime */
-extern void of_attach_node(struct device_node *);
-extern void of_detach_node(struct device_node *);
-
 #ifdef CONFIG_PPC32
 /*
  * PCI <-> OF matching functions
@@ -52,11 +39,6 @@ extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
 extern void pci_create_OF_bus_map(void);
 #endif
 
-extern struct resource *request_OF_resource(struct device_node* node,
-				int index, const char* name_postfix);
-extern int release_OF_resource(struct device_node* node, int index);
-
-
 /*
  * OF address retreival & translation
  */

+ 1 - 1
arch/powerpc/kernel/of_platform.c

@@ -214,7 +214,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
 static int of_dev_phandle_match(struct device *dev, void *data)
 {
 	phandle *ph = data;
-	return to_of_device(dev)->node->linux_phandle == *ph;
+	return to_of_device(dev)->node->phandle == *ph;
 }
 
 struct of_device *of_find_device_by_phandle(phandle ph)

+ 1 - 1
arch/powerpc/kernel/pci_64.c

@@ -224,7 +224,7 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
 	 * G5 machines... So when something asks for bus 0 io base
 	 * (bus 0 is HT root), we return the AGP one instead.
 	 */
-	if (in_bus == 0 && machine_is_compatible("MacRISC4")) {
+	if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) {
 		struct device_node *agp;
 
 		agp = of_find_compatible_node(NULL, NULL, "u3-agp");

+ 49 - 838
arch/powerpc/kernel/prom.c

@@ -61,365 +61,12 @@
 #define DBG(fmt...)
 #endif
 
-
-static int __initdata dt_root_addr_cells;
-static int __initdata dt_root_size_cells;
-
 #ifdef CONFIG_PPC64
 int __initdata iommu_is_off;
 int __initdata iommu_force_on;
 unsigned long tce_alloc_start, tce_alloc_end;
 #endif
 
-typedef u32 cell_t;
-
-#if 0
-static struct boot_param_header *initial_boot_params __initdata;
-#else
-struct boot_param_header *initial_boot_params;
-#endif
-
-extern struct device_node *allnodes;	/* temporary while merging */
-
-extern rwlock_t devtree_lock;	/* temporary while merging */
-
-/* export that to outside world */
-struct device_node *of_chosen;
-
-static inline char *find_flat_dt_string(u32 offset)
-{
-	return ((char *)initial_boot_params) +
-		initial_boot_params->off_dt_strings + offset;
-}
-
-/**
- * This function is used to scan the flattened device-tree, it is
- * used to extract the memory informations at boot before we can
- * unflatten the tree
- */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
-				     const char *uname, int depth,
-				     void *data),
-			   void *data)
-{
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	int rc = 0;
-	int depth = -1;
-
-	do {
-		u32 tag = *((u32 *)p);
-		char *pathp;
-		
-		p += 4;
-		if (tag == OF_DT_END_NODE) {
-			depth --;
-			continue;
-		}
-		if (tag == OF_DT_NOP)
-			continue;
-		if (tag == OF_DT_END)
-			break;
-		if (tag == OF_DT_PROP) {
-			u32 sz = *((u32 *)p);
-			p += 8;
-			if (initial_boot_params->version < 0x10)
-				p = _ALIGN(p, sz >= 8 ? 8 : 4);
-			p += sz;
-			p = _ALIGN(p, 4);
-			continue;
-		}
-		if (tag != OF_DT_BEGIN_NODE) {
-			printk(KERN_WARNING "Invalid tag %x scanning flattened"
-			       " device tree !\n", tag);
-			return -EINVAL;
-		}
-		depth++;
-		pathp = (char *)p;
-		p = _ALIGN(p + strlen(pathp) + 1, 4);
-		if ((*pathp) == '/') {
-			char *lp, *np;
-			for (lp = NULL, np = pathp; *np; np++)
-				if ((*np) == '/')
-					lp = np+1;
-			if (lp != NULL)
-				pathp = lp;
-		}
-		rc = it(p, pathp, depth, data);
-		if (rc != 0)
-			break;		
-	} while(1);
-
-	return rc;
-}
-
-unsigned long __init of_get_flat_dt_root(void)
-{
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-
-	while(*((u32 *)p) == OF_DT_NOP)
-		p += 4;
-	BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE);
-	p += 4;
-	return _ALIGN(p + strlen((char *)p) + 1, 4);
-}
-
-/**
- * This  function can be used within scan_flattened_dt callback to get
- * access to properties
- */
-void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
-				 unsigned long *size)
-{
-	unsigned long p = node;
-
-	do {
-		u32 tag = *((u32 *)p);
-		u32 sz, noff;
-		const char *nstr;
-
-		p += 4;
-		if (tag == OF_DT_NOP)
-			continue;
-		if (tag != OF_DT_PROP)
-			return NULL;
-
-		sz = *((u32 *)p);
-		noff = *((u32 *)(p + 4));
-		p += 8;
-		if (initial_boot_params->version < 0x10)
-			p = _ALIGN(p, sz >= 8 ? 8 : 4);
-
-		nstr = find_flat_dt_string(noff);
-		if (nstr == NULL) {
-			printk(KERN_WARNING "Can't find property index"
-			       " name !\n");
-			return NULL;
-		}
-		if (strcmp(name, nstr) == 0) {
-			if (size)
-				*size = sz;
-			return (void *)p;
-		}
-		p += sz;
-		p = _ALIGN(p, 4);
-	} while(1);
-}
-
-int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
-{
-	const char* cp;
-	unsigned long cplen, l;
-
-	cp = of_get_flat_dt_prop(node, "compatible", &cplen);
-	if (cp == NULL)
-		return 0;
-	while (cplen > 0) {
-		if (strncasecmp(cp, compat, strlen(compat)) == 0)
-			return 1;
-		l = strlen(cp) + 1;
-		cp += l;
-		cplen -= l;
-	}
-
-	return 0;
-}
-
-static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
-				       unsigned long align)
-{
-	void *res;
-
-	*mem = _ALIGN(*mem, align);
-	res = (void *)*mem;
-	*mem += size;
-
-	return res;
-}
-
-static unsigned long __init unflatten_dt_node(unsigned long mem,
-					      unsigned long *p,
-					      struct device_node *dad,
-					      struct device_node ***allnextpp,
-					      unsigned long fpsize)
-{
-	struct device_node *np;
-	struct property *pp, **prev_pp = NULL;
-	char *pathp;
-	u32 tag;
-	unsigned int l, allocl;
-	int has_name = 0;
-	int new_format = 0;
-
-	tag = *((u32 *)(*p));
-	if (tag != OF_DT_BEGIN_NODE) {
-		printk("Weird tag at start of node: %x\n", tag);
-		return mem;
-	}
-	*p += 4;
-	pathp = (char *)*p;
-	l = allocl = strlen(pathp) + 1;
-	*p = _ALIGN(*p + l, 4);
-
-	/* version 0x10 has a more compact unit name here instead of the full
-	 * path. we accumulate the full path size using "fpsize", we'll rebuild
-	 * it later. We detect this because the first character of the name is
-	 * not '/'.
-	 */
-	if ((*pathp) != '/') {
-		new_format = 1;
-		if (fpsize == 0) {
-			/* root node: special case. fpsize accounts for path
-			 * plus terminating zero. root node only has '/', so
-			 * fpsize should be 2, but we want to avoid the first
-			 * level nodes to have two '/' so we use fpsize 1 here
-			 */
-			fpsize = 1;
-			allocl = 2;
-		} else {
-			/* account for '/' and path size minus terminal 0
-			 * already in 'l'
-			 */
-			fpsize += l;
-			allocl = fpsize;
-		}
-	}
-
-
-	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
-				__alignof__(struct device_node));
-	if (allnextpp) {
-		memset(np, 0, sizeof(*np));
-		np->full_name = ((char*)np) + sizeof(struct device_node);
-		if (new_format) {
-			char *p = np->full_name;
-			/* rebuild full path for new format */
-			if (dad && dad->parent) {
-				strcpy(p, dad->full_name);
-#ifdef DEBUG
-				if ((strlen(p) + l + 1) != allocl) {
-					DBG("%s: p: %d, l: %d, a: %d\n",
-					    pathp, (int)strlen(p), l, allocl);
-				}
-#endif
-				p += strlen(p);
-			}
-			*(p++) = '/';
-			memcpy(p, pathp, l);
-		} else
-			memcpy(np->full_name, pathp, l);
-		prev_pp = &np->properties;
-		**allnextpp = np;
-		*allnextpp = &np->allnext;
-		if (dad != NULL) {
-			np->parent = dad;
-			/* we temporarily use the next field as `last_child'*/
-			if (dad->next == 0)
-				dad->child = np;
-			else
-				dad->next->sibling = np;
-			dad->next = np;
-		}
-		kref_init(&np->kref);
-	}
-	while(1) {
-		u32 sz, noff;
-		char *pname;
-
-		tag = *((u32 *)(*p));
-		if (tag == OF_DT_NOP) {
-			*p += 4;
-			continue;
-		}
-		if (tag != OF_DT_PROP)
-			break;
-		*p += 4;
-		sz = *((u32 *)(*p));
-		noff = *((u32 *)((*p) + 4));
-		*p += 8;
-		if (initial_boot_params->version < 0x10)
-			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
-
-		pname = find_flat_dt_string(noff);
-		if (pname == NULL) {
-			printk("Can't find property name in list !\n");
-			break;
-		}
-		if (strcmp(pname, "name") == 0)
-			has_name = 1;
-		l = strlen(pname) + 1;
-		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
-					__alignof__(struct property));
-		if (allnextpp) {
-			if (strcmp(pname, "linux,phandle") == 0) {
-				np->node = *((u32 *)*p);
-				if (np->linux_phandle == 0)
-					np->linux_phandle = np->node;
-			}
-			if (strcmp(pname, "ibm,phandle") == 0)
-				np->linux_phandle = *((u32 *)*p);
-			pp->name = pname;
-			pp->length = sz;
-			pp->value = (void *)*p;
-			*prev_pp = pp;
-			prev_pp = &pp->next;
-		}
-		*p = _ALIGN((*p) + sz, 4);
-	}
-	/* with version 0x10 we may not have the name property, recreate
-	 * it here from the unit name if absent
-	 */
-	if (!has_name) {
-		char *p = pathp, *ps = pathp, *pa = NULL;
-		int sz;
-
-		while (*p) {
-			if ((*p) == '@')
-				pa = p;
-			if ((*p) == '/')
-				ps = p + 1;
-			p++;
-		}
-		if (pa < ps)
-			pa = p;
-		sz = (pa - ps) + 1;
-		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
-					__alignof__(struct property));
-		if (allnextpp) {
-			pp->name = "name";
-			pp->length = sz;
-			pp->value = pp + 1;
-			*prev_pp = pp;
-			prev_pp = &pp->next;
-			memcpy(pp->value, ps, sz - 1);
-			((char *)pp->value)[sz - 1] = 0;
-			DBG("fixed up name for %s -> %s\n", pathp,
-				(char *)pp->value);
-		}
-	}
-	if (allnextpp) {
-		*prev_pp = NULL;
-		np->name = of_get_property(np, "name", NULL);
-		np->type = of_get_property(np, "device_type", NULL);
-
-		if (!np->name)
-			np->name = "<NULL>";
-		if (!np->type)
-			np->type = "<NULL>";
-	}
-	while (tag == OF_DT_BEGIN_NODE) {
-		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
-		tag = *((u32 *)(*p));
-	}
-	if (tag != OF_DT_END_NODE) {
-		printk("Weird tag at end of node: %x\n", tag);
-		return mem;
-	}
-	*p += 4;
-	return mem;
-}
-
 static int __init early_parse_mem(char *p)
 {
 	if (!p)
@@ -446,7 +93,7 @@ static void __init move_device_tree(void)
 	DBG("-> move_device_tree\n");
 
 	start = __pa(initial_boot_params);
-	size = initial_boot_params->totalsize;
+	size = be32_to_cpu(initial_boot_params->totalsize);
 
 	if ((memory_limit && (start + size) > memory_limit) ||
 			overlaps_crashkernel(start, size)) {
@@ -459,54 +106,6 @@ static void __init move_device_tree(void)
 	DBG("<- move_device_tree\n");
 }
 
-/**
- * unflattens the device-tree passed by the firmware, creating the
- * tree of struct device_node. It also fills the "name" and "type"
- * pointers of the nodes so the normal device-tree walking functions
- * can be used (this used to be done by finish_device_tree)
- */
-void __init unflatten_device_tree(void)
-{
-	unsigned long start, mem, size;
-	struct device_node **allnextp = &allnodes;
-
-	DBG(" -> unflatten_device_tree()\n");
-
-	/* First pass, scan for size */
-	start = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
-	size = (size | 3) + 1;
-
-	DBG("  size is %lx, allocating...\n", size);
-
-	/* Allocate memory for the expanded device tree */
-	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
-	mem = (unsigned long) __va(mem);
-
-	((u32 *)mem)[size / 4] = 0xdeadbeef;
-
-	DBG("  unflattening %lx...\n", mem);
-
-	/* Second pass, do actual unflattening */
-	start = ((unsigned long)initial_boot_params) +
-		initial_boot_params->off_dt_struct;
-	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
-	if (*((u32 *)start) != OF_DT_END)
-		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
-	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
-		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
-		       ((u32 *)mem)[size / 4] );
-	*allnextp = NULL;
-
-	/* Get pointer to OF "/chosen" node for use everywhere */
-	of_chosen = of_find_node_by_path("/chosen");
-	if (of_chosen == NULL)
-		of_chosen = of_find_node_by_path("/chosen@0");
-
-	DBG(" <- unflatten_device_tree()\n");
-}
-
 /*
  * ibm,pa-features is a per-cpu property that contains a string of
  * attribute descriptors, each of which has a 2 byte header plus up
@@ -763,48 +362,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
 	return 0;
 }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init early_init_dt_check_for_initrd(unsigned long node)
-{
-	unsigned long l;
-	u32 *prop;
-
-	DBG("Looking for initrd properties... ");
-
-	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
-	if (prop) {
-		initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4));
-
-		prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
-		if (prop) {
-			initrd_end = (unsigned long)
-					__va(of_read_ulong(prop, l/4));
-			initrd_below_start_ok = 1;
-		} else {
-			initrd_start = 0;
-		}
-	}
-
-	DBG("initrd_start=0x%lx  initrd_end=0x%lx\n", initrd_start, initrd_end);
-}
-#else
-static inline void early_init_dt_check_for_initrd(unsigned long node)
-{
-}
-#endif /* CONFIG_BLK_DEV_INITRD */
-
-static int __init early_init_dt_scan_chosen(unsigned long node,
-					    const char *uname, int depth, void *data)
+void __init early_init_dt_scan_chosen_arch(unsigned long node)
 {
 	unsigned long *lprop;
-	unsigned long l;
-	char *p;
-
-	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
-
-	if (depth != 1 ||
-	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
-		return 0;
 
 #ifdef CONFIG_PPC64
 	/* check if iommu is forced on or off */
@@ -815,17 +375,17 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
 #endif
 
 	/* mem=x on the command line is the preferred mechanism */
- 	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
- 	if (lprop)
- 		memory_limit = *lprop;
+	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
+	if (lprop)
+		memory_limit = *lprop;
 
 #ifdef CONFIG_PPC64
- 	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
- 	if (lprop)
- 		tce_alloc_start = *lprop;
- 	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
- 	if (lprop)
- 		tce_alloc_end = *lprop;
+	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
+	if (lprop)
+		tce_alloc_start = *lprop;
+	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
+	if (lprop)
+		tce_alloc_end = *lprop;
 #endif
 
 #ifdef CONFIG_KEXEC
@@ -837,51 +397,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
 	if (lprop)
 		crashk_res.end = crashk_res.start + *lprop - 1;
 #endif
-
-	early_init_dt_check_for_initrd(node);
-
-	/* Retreive command line */
- 	p = of_get_flat_dt_prop(node, "bootargs", &l);
-	if (p != NULL && l > 0)
-		strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
-
-#ifdef CONFIG_CMDLINE
-	if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
-		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
-
-	DBG("Command line is: %s\n", cmd_line);
-
-	/* break now */
-	return 1;
-}
-
-static int __init early_init_dt_scan_root(unsigned long node,
-					  const char *uname, int depth, void *data)
-{
-	u32 *prop;
-
-	if (depth != 0)
-		return 0;
-
-	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
-	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
-	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
-
-	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
-	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
-	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
-	
-	/* break now */
-	return 1;
-}
-
-static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
-{
-	cell_t *p = *cellp;
-
-	*cellp = p + s;
-	return of_read_number(p, s);
 }
 
 #ifdef CONFIG_PPC_PSERIES
@@ -893,22 +408,22 @@ static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
  */
 static int __init early_init_dt_scan_drconf_memory(unsigned long node)
 {
-	cell_t *dm, *ls, *usm;
+	__be32 *dm, *ls, *usm;
 	unsigned long l, n, flags;
 	u64 base, size, lmb_size;
 	unsigned int is_kexec_kdump = 0, rngs;
 
 	ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
-	if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
+	if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
 		return 0;
 	lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
 
 	dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
-	if (dm == NULL || l < sizeof(cell_t))
+	if (dm == NULL || l < sizeof(__be32))
 		return 0;
 
 	n = *dm++;	/* number of entries */
-	if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
+	if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32))
 		return 0;
 
 	/* check if this is a kexec/kdump kernel. */
@@ -963,65 +478,47 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
 #define early_init_dt_scan_drconf_memory(node)	0
 #endif /* CONFIG_PPC_PSERIES */
 
-static int __init early_init_dt_scan_memory(unsigned long node,
-					    const char *uname, int depth, void *data)
+static int __init early_init_dt_scan_memory_ppc(unsigned long node,
+						const char *uname,
+						int depth, void *data)
 {
-	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-	cell_t *reg, *endp;
-	unsigned long l;
-
-	/* Look for the ibm,dynamic-reconfiguration-memory node */
 	if (depth == 1 &&
 	    strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
 		return early_init_dt_scan_drconf_memory(node);
+	
+	return early_init_dt_scan_memory(node, uname, depth, data);
+}
 
-	/* We are scanning "memory" nodes only */
-	if (type == NULL) {
-		/*
-		 * The longtrail doesn't have a device_type on the
-		 * /memory node, so look for the node called /memory@0.
-		 */
-		if (depth != 1 || strcmp(uname, "memory@0") != 0)
-			return 0;
-	} else if (strcmp(type, "memory") != 0)
-		return 0;
-
-	reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
-	if (reg == NULL)
-		reg = of_get_flat_dt_prop(node, "reg", &l);
-	if (reg == NULL)
-		return 0;
-
-	endp = reg + (l / sizeof(cell_t));
-
-	DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
-	    uname, l, reg[0], reg[1], reg[2], reg[3]);
-
-	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
-		u64 base, size;
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+#if defined(CONFIG_PPC64)
+	if (iommu_is_off) {
+		if (base >= 0x80000000ul)
+			return;
+		if ((base + size) > 0x80000000ul)
+			size = 0x80000000ul - base;
+	}
+#endif
 
-		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
-		size = dt_mem_next_cell(dt_root_size_cells, &reg);
+	lmb_add(base, size);
 
-		if (size == 0)
-			continue;
-		DBG(" - %llx ,  %llx\n", (unsigned long long)base,
-		    (unsigned long long)size);
-#ifdef CONFIG_PPC64
-		if (iommu_is_off) {
-			if (base >= 0x80000000ul)
-				continue;
-			if ((base + size) > 0x80000000ul)
-				size = 0x80000000ul - base;
-		}
-#endif
-		lmb_add(base, size);
+	memstart_addr = min((u64)memstart_addr, base);
+}
 
-		memstart_addr = min((u64)memstart_addr, base);
-	}
+u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+	return lmb_alloc(size, align);
+}
 
-	return 0;
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+		unsigned long end)
+{
+	initrd_start = (unsigned long)__va(start);
+	initrd_end = (unsigned long)__va(end);
+	initrd_below_start_ok = 1;
 }
+#endif
 
 static void __init early_reserve_mem(void)
 {
@@ -1186,7 +683,7 @@ void __init early_init_devtree(void *params)
 	/* Scan memory nodes and rebuild LMBs */
 	lmb_init();
 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
-	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+	of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
 
 	/* Save command line for /proc/cmdline and then parse parameters */
 	strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
@@ -1234,25 +731,6 @@ void __init early_init_devtree(void *params)
 	DBG(" <- early_init_devtree()\n");
 }
 
-
-/**
- * Indicates whether the root node has a given value in its
- * compatible property.
- */
-int machine_is_compatible(const char *compat)
-{
-	struct device_node *root;
-	int rc = 0;
-
-	root = of_find_node_by_path("/");
-	if (root) {
-		rc = of_device_is_compatible(root, compat);
-		of_node_put(root);
-	}
-	return rc;
-}
-EXPORT_SYMBOL(machine_is_compatible);
-
 /*******
  *
  * New implementation of the OF "find" APIs, return a refcounted
@@ -1264,27 +742,6 @@ EXPORT_SYMBOL(machine_is_compatible);
  *
  *******/
 
-/**
- *	of_find_node_by_phandle - Find a node given a phandle
- *	@handle:	phandle of the node to find
- *
- *	Returns a node pointer with refcount incremented, use
- *	of_node_put() on it when done.
- */
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
-	struct device_node *np;
-
-	read_lock(&devtree_lock);
-	for (np = allnodes; np != 0; np = np->allnext)
-		if (np->linux_phandle == handle)
-			break;
-	of_node_get(np);
-	read_unlock(&devtree_lock);
-	return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
 /**
  *	of_find_next_cache_node - Find a node's subsidiary cache
  *	@np:	node of type "cpu" or "cache"
@@ -1316,138 +773,6 @@ struct device_node *of_find_next_cache_node(struct device_node *np)
 	return NULL;
 }
 
-/**
- *	of_node_get - Increment refcount of a node
- *	@node:	Node to inc refcount, NULL is supported to
- *		simplify writing of callers
- *
- *	Returns node.
- */
-struct device_node *of_node_get(struct device_node *node)
-{
-	if (node)
-		kref_get(&node->kref);
-	return node;
-}
-EXPORT_SYMBOL(of_node_get);
-
-static inline struct device_node * kref_to_device_node(struct kref *kref)
-{
-	return container_of(kref, struct device_node, kref);
-}
-
-/**
- *	of_node_release - release a dynamically allocated node
- *	@kref:  kref element of the node to be released
- *
- *	In of_node_put() this function is passed to kref_put()
- *	as the destructor.
- */
-static void of_node_release(struct kref *kref)
-{
-	struct device_node *node = kref_to_device_node(kref);
-	struct property *prop = node->properties;
-
-	/* We should never be releasing nodes that haven't been detached. */
-	if (!of_node_check_flag(node, OF_DETACHED)) {
-		printk("WARNING: Bad of_node_put() on %s\n", node->full_name);
-		dump_stack();
-		kref_init(&node->kref);
-		return;
-	}
-
-	if (!of_node_check_flag(node, OF_DYNAMIC))
-		return;
-
-	while (prop) {
-		struct property *next = prop->next;
-		kfree(prop->name);
-		kfree(prop->value);
-		kfree(prop);
-		prop = next;
-
-		if (!prop) {
-			prop = node->deadprops;
-			node->deadprops = NULL;
-		}
-	}
-	kfree(node->full_name);
-	kfree(node->data);
-	kfree(node);
-}
-
-/**
- *	of_node_put - Decrement refcount of a node
- *	@node:	Node to dec refcount, NULL is supported to
- *		simplify writing of callers
- *
- */
-void of_node_put(struct device_node *node)
-{
-	if (node)
-		kref_put(&node->kref, of_node_release);
-}
-EXPORT_SYMBOL(of_node_put);
-
-/*
- * Plug a device node into the tree and global list.
- */
-void of_attach_node(struct device_node *np)
-{
-	unsigned long flags;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	np->sibling = np->parent->child;
-	np->allnext = allnodes;
-	np->parent->child = np;
-	allnodes = np;
-	write_unlock_irqrestore(&devtree_lock, flags);
-}
-
-/*
- * "Unplug" a node from the device tree.  The caller must hold
- * a reference to the node.  The memory associated with the node
- * is not freed until its refcount goes to zero.
- */
-void of_detach_node(struct device_node *np)
-{
-	struct device_node *parent;
-	unsigned long flags;
-
-	write_lock_irqsave(&devtree_lock, flags);
-
-	parent = np->parent;
-	if (!parent)
-		goto out_unlock;
-
-	if (allnodes == np)
-		allnodes = np->allnext;
-	else {
-		struct device_node *prev;
-		for (prev = allnodes;
-		     prev->allnext != np;
-		     prev = prev->allnext)
-			;
-		prev->allnext = np->allnext;
-	}
-
-	if (parent->child == np)
-		parent->child = np->sibling;
-	else {
-		struct device_node *prevsib;
-		for (prevsib = np->parent->child;
-		     prevsib->sibling != np;
-		     prevsib = prevsib->sibling)
-			;
-		prevsib->sibling = np->sibling;
-	}
-
-	of_node_set_flag(np, OF_DETACHED);
-
-out_unlock:
-	write_unlock_irqrestore(&devtree_lock, flags);
-}
-
 #ifdef CONFIG_PPC_PSERIES
 /*
  * Fix up the uninitialized fields in a new device node:
@@ -1479,9 +804,9 @@ static int of_finish_dynamic_node(struct device_node *node)
 	if (machine_is(powermac))
 		return -ENODEV;
 
-	/* fix up new node's linux_phandle field */
+	/* fix up new node's phandle field */
 	if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL)))
-		node->linux_phandle = *ibm_phandle;
+		node->phandle = *ibm_phandle;
 
 out:
 	of_node_put(parent);
@@ -1520,120 +845,6 @@ static int __init prom_reconfig_setup(void)
 __initcall(prom_reconfig_setup);
 #endif
 
-/*
- * Add a property to a node
- */
-int prom_add_property(struct device_node* np, struct property* prop)
-{
-	struct property **next;
-	unsigned long flags;
-
-	prop->next = NULL;	
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (strcmp(prop->name, (*next)->name) == 0) {
-			/* duplicate ! don't insert it */
-			write_unlock_irqrestore(&devtree_lock, flags);
-			return -1;
-		}
-		next = &(*next)->next;
-	}
-	*next = prop;
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to add to proc as well if it was initialized */
-	if (np->pde)
-		proc_device_tree_add_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-/*
- * Remove a property from a node.  Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property.  Instead we just move the property
- * to the "dead properties" list, so it won't be found any more.
- */
-int prom_remove_property(struct device_node *np, struct property *prop)
-{
-	struct property **next;
-	unsigned long flags;
-	int found = 0;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (*next == prop) {
-			/* found the node */
-			*next = prop->next;
-			prop->next = np->deadprops;
-			np->deadprops = prop;
-			found = 1;
-			break;
-		}
-		next = &(*next)->next;
-	}
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-	if (!found)
-		return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to remove the proc node as well */
-	if (np->pde)
-		proc_device_tree_remove_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-/*
- * Update a property in a node.  Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property.  Instead we just move the property
- * to the "dead properties" list, and add the new property to the
- * property list
- */
-int prom_update_property(struct device_node *np,
-			 struct property *newprop,
-			 struct property *oldprop)
-{
-	struct property **next;
-	unsigned long flags;
-	int found = 0;
-
-	write_lock_irqsave(&devtree_lock, flags);
-	next = &np->properties;
-	while (*next) {
-		if (*next == oldprop) {
-			/* found the node */
-			newprop->next = oldprop->next;
-			*next = newprop;
-			oldprop->next = np->deadprops;
-			np->deadprops = oldprop;
-			found = 1;
-			break;
-		}
-		next = &(*next)->next;
-	}
-	write_unlock_irqrestore(&devtree_lock, flags);
-
-	if (!found)
-		return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
-	/* try to add to proc as well if it was initialized */
-	if (np->pde)
-		proc_device_tree_update_prop(np->pde, newprop, oldprop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
-	return 0;
-}
-
-
 /* Find the device node for a given logical cpu number, also returns the cpu
  * local thread number (index in ibm,interrupt-server#s) if relevant and
  * asked for (non NULL)

+ 2 - 1
arch/powerpc/platforms/85xx/mpc85xx_mds.c

@@ -341,7 +341,8 @@ static void __init mpc85xx_mds_pic_init(void)
 	}
 
 	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+			MPIC_BROKEN_FRR_NIRQS,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
 	of_node_put(np);

+ 2 - 2
arch/powerpc/platforms/85xx/xes_mpc85xx.c

@@ -80,8 +80,8 @@ static void xes_mpc85xx_configure_l2(void __iomem *l2_base)
 	printk(KERN_INFO "xes_mpc85xx: Enabling L2 as cache\n");
 
 	ctl = MPC85xx_L2CTL_L2E | MPC85xx_L2CTL_L2I;
-	if (machine_is_compatible("MPC8540") ||
-	    machine_is_compatible("MPC8560"))
+	if (of_machine_is_compatible("MPC8540") ||
+	    of_machine_is_compatible("MPC8560"))
 		/*
 		 * Assume L2 SRAM is used fully for cache, so set
 		 * L2BLKSZ (bits 4:5) to match L2SIZ (bits 2:3).

+ 1 - 1
arch/powerpc/platforms/cell/cbe_powerbutton.c

@@ -48,7 +48,7 @@ static int __init cbe_powerbutton_init(void)
 	int ret = 0;
 	struct input_dev *dev;
 
-	if (!machine_is_compatible("IBM,CBPLUS-1.0")) {
+	if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) {
 		printk(KERN_ERR "%s: Not a cell blade.\n", __func__);
 		ret = -ENODEV;
 		goto out;

+ 1 - 1
arch/powerpc/platforms/cell/ras.c

@@ -255,7 +255,7 @@ static int __init cbe_sysreset_init(void)
 {
 	struct cbe_pmd_regs __iomem *regs;
 
-	sysreset_hack = machine_is_compatible("IBM,CBPLUS-1.0");
+	sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0");
 	if (!sysreset_hack)
 		return 0;
 

+ 3 - 3
arch/powerpc/platforms/cell/spu_manage.c

@@ -457,7 +457,7 @@ neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
 			continue;
 		vic_handles = of_get_property(spu_dn, "vicinity", &lenp);
 		for (i=0; i < (lenp / sizeof(phandle)); i++) {
-			if (vic_handles[i] == target->linux_phandle)
+			if (vic_handles[i] == target->phandle)
 				return spu;
 		}
 	}
@@ -499,7 +499,7 @@ static void init_affinity_node(int cbe)
 
 			if (strcmp(name, "spe") == 0) {
 				spu = devnode_spu(cbe, vic_dn);
-				avoid_ph = last_spu_dn->linux_phandle;
+				avoid_ph = last_spu_dn->phandle;
 			} else {
 				/*
 				 * "mic-tm" and "bif0" nodes do not have
@@ -514,7 +514,7 @@ static void init_affinity_node(int cbe)
 					last_spu->has_mem_affinity = 1;
 					spu->has_mem_affinity = 1;
 				}
-				avoid_ph = vic_dn->linux_phandle;
+				avoid_ph = vic_dn->phandle;
 			}
 
 			list_add_tail(&spu->aff_list, &last_spu->aff_list);

+ 2 - 2
arch/powerpc/platforms/pasemi/cpufreq.c

@@ -304,8 +304,8 @@ static struct cpufreq_driver pas_cpufreq_driver = {
 
 static int __init pas_cpufreq_init(void)
 {
-	if (!machine_is_compatible("PA6T-1682M") &&
-	    !machine_is_compatible("pasemi,pwrficient"))
+	if (!of_machine_is_compatible("PA6T-1682M") &&
+	    !of_machine_is_compatible("pasemi,pwrficient"))
 		return -ENODEV;
 
 	return cpufreq_register_driver(&pas_cpufreq_driver);

+ 7 - 7
arch/powerpc/platforms/powermac/cpufreq_32.c

@@ -657,31 +657,31 @@ static int __init pmac_cpufreq_setup(void)
 	cur_freq = (*value) / 1000;
 
 	/*  Check for 7447A based MacRISC3 */
-	if (machine_is_compatible("MacRISC3") &&
+	if (of_machine_is_compatible("MacRISC3") &&
 	    of_get_property(cpunode, "dynamic-power-step", NULL) &&
 	    PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
 		pmac_cpufreq_init_7447A(cpunode);
 	/* Check for other MacRISC3 machines */
-	} else if (machine_is_compatible("PowerBook3,4") ||
-		   machine_is_compatible("PowerBook3,5") ||
-		   machine_is_compatible("MacRISC3")) {
+	} else if (of_machine_is_compatible("PowerBook3,4") ||
+		   of_machine_is_compatible("PowerBook3,5") ||
+		   of_machine_is_compatible("MacRISC3")) {
 		pmac_cpufreq_init_MacRISC3(cpunode);
 	/* Else check for iBook2 500/600 */
-	} else if (machine_is_compatible("PowerBook4,1")) {
+	} else if (of_machine_is_compatible("PowerBook4,1")) {
 		hi_freq = cur_freq;
 		low_freq = 400000;
 		set_speed_proc = pmu_set_cpu_speed;
 		is_pmu_based = 1;
 	}
 	/* Else check for TiPb 550 */
-	else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
+	else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
 		hi_freq = cur_freq;
 		low_freq = 500000;
 		set_speed_proc = pmu_set_cpu_speed;
 		is_pmu_based = 1;
 	}
 	/* Else check for TiPb 400 & 500 */
-	else if (machine_is_compatible("PowerBook3,2")) {
+	else if (of_machine_is_compatible("PowerBook3,2")) {
 		/* We only know about the 400 MHz and the 500Mhz model
 		 * they both have 300 MHz as low frequency
 		 */

+ 7 - 7
arch/powerpc/platforms/powermac/cpufreq_64.c

@@ -398,11 +398,11 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
 	int rc = -ENODEV;
 
 	/* Check supported platforms */
-	if (machine_is_compatible("PowerMac8,1") ||
-	    machine_is_compatible("PowerMac8,2") ||
-	    machine_is_compatible("PowerMac9,1"))
+	if (of_machine_is_compatible("PowerMac8,1") ||
+	    of_machine_is_compatible("PowerMac8,2") ||
+	    of_machine_is_compatible("PowerMac9,1"))
 		use_volts_smu = 1;
-	else if (machine_is_compatible("PowerMac11,2"))
+	else if (of_machine_is_compatible("PowerMac11,2"))
 		use_volts_vdnap = 1;
 	else
 		return -ENODEV;
@@ -729,9 +729,9 @@ static int __init g5_cpufreq_init(void)
 		return -ENODEV;
 	}
 
-	if (machine_is_compatible("PowerMac7,2") ||
-	    machine_is_compatible("PowerMac7,3") ||
-	    machine_is_compatible("RackMac3,1"))
+	if (of_machine_is_compatible("PowerMac7,2") ||
+	    of_machine_is_compatible("PowerMac7,3") ||
+	    of_machine_is_compatible("RackMac3,1"))
 		rc = g5_pm72_cpufreq_init(cpus);
 #ifdef CONFIG_PMAC_SMU
 	else

+ 1 - 1
arch/powerpc/platforms/powermac/feature.c

@@ -2426,7 +2426,7 @@ static int __init probe_motherboard(void)
 	    }
 	}
 	for(i=0; i<ARRAY_SIZE(pmac_mb_defs); i++) {
-	    if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
+	    if (of_machine_is_compatible(pmac_mb_defs[i].model_string)) {
 		pmac_mb = pmac_mb_defs[i];
 		goto found;
 	    }

+ 1 - 1
arch/powerpc/platforms/powermac/pfunc_core.c

@@ -842,7 +842,7 @@ struct pmf_function *__pmf_find_function(struct device_node *target,
 	list_for_each_entry(func, &dev->functions, link) {
 		if (name && strcmp(name, func->name))
 			continue;
-		if (func->phandle && target->node != func->phandle)
+		if (func->phandle && target->phandle != func->phandle)
 			continue;
 		if ((func->flags & flags) == 0)
 			continue;

+ 6 - 6
arch/powerpc/platforms/powermac/smp.c

@@ -693,9 +693,9 @@ static void __init smp_core99_setup(int ncpus)
 #ifdef CONFIG_PPC64
 
 	/* i2c based HW sync on some G5s */
-	if (machine_is_compatible("PowerMac7,2") ||
-	    machine_is_compatible("PowerMac7,3") ||
-	    machine_is_compatible("RackMac3,1"))
+	if (of_machine_is_compatible("PowerMac7,2") ||
+	    of_machine_is_compatible("PowerMac7,3") ||
+	    of_machine_is_compatible("RackMac3,1"))
 		smp_core99_setup_i2c_hwsync(ncpus);
 
 	/* pfunc based HW sync on recent G5s */
@@ -713,7 +713,7 @@ static void __init smp_core99_setup(int ncpus)
 #else /* CONFIG_PPC64 */
 
 	/* GPIO based HW sync on ppc32 Core99 */
-	if (pmac_tb_freeze == NULL && !machine_is_compatible("MacRISC4")) {
+	if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) {
 		struct device_node *cpu;
 		const u32 *tbprop = NULL;
 
@@ -750,7 +750,7 @@ static void __init smp_core99_setup(int ncpus)
 #endif
 
 	/* 32 bits SMP can't NAP */
-	if (!machine_is_compatible("MacRISC4"))
+	if (!of_machine_is_compatible("MacRISC4"))
 		powersave_nap = 0;
 }
 
@@ -852,7 +852,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
 		/* If we didn't start the second CPU, we must take
 		 * it off the bus
 		 */
-		if (machine_is_compatible("MacRISC4") &&
+		if (of_machine_is_compatible("MacRISC4") &&
 		    num_online_cpus() < 2)		
 			g5_phy_disable_cpu1();
 #endif /* CONFIG_PPC64 */

+ 4 - 4
arch/powerpc/platforms/powermac/time.c

@@ -317,9 +317,9 @@ void __init pmac_calibrate_decr(void)
 	 * calibration. That's better since the VIA itself seems
 	 * to be slightly off. --BenH
 	 */
-	if (!machine_is_compatible("MacRISC2") &&
-	    !machine_is_compatible("MacRISC3") &&
-	    !machine_is_compatible("MacRISC4"))
+	if (!of_machine_is_compatible("MacRISC2") &&
+	    !of_machine_is_compatible("MacRISC3") &&
+	    !of_machine_is_compatible("MacRISC4"))
 		if (via_calibrate_decr())
 			return;
 
@@ -328,7 +328,7 @@ void __init pmac_calibrate_decr(void)
 	 * probably implement calibration based on the KL timer on these
 	 * machines anyway... -BenH
 	 */
-	if (machine_is_compatible("PowerMac3,5"))
+	if (of_machine_is_compatible("PowerMac3,5"))
 		if (via_calibrate_decr())
 			return;
 #endif

+ 3 - 3
arch/powerpc/platforms/powermac/udbg_scc.c

@@ -132,9 +132,9 @@ void udbg_scc_init(int force_scc)
 		scc_inittab[1] = in_8(sccc);
 		out_8(sccc, 12);
 		scc_inittab[3] = in_8(sccc);
-	} else if (machine_is_compatible("RackMac1,1")
-		   || machine_is_compatible("RackMac1,2")
-		   || machine_is_compatible("MacRISC4")) {
+	} else if (of_machine_is_compatible("RackMac1,1")
+		   || of_machine_is_compatible("RackMac1,2")
+		   || of_machine_is_compatible("MacRISC4")) {
 		/* Xserves and G5s default to 57600 */
 		scc_inittab[1] = 0;
 		scc_inittab[3] = 0;

+ 2 - 2
arch/powerpc/sysdev/grackle.c

@@ -56,9 +56,9 @@ static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
 void __init setup_grackle(struct pci_controller *hose)
 {
 	setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
-	if (machine_is_compatible("PowerMac1,1"))
+	if (of_machine_is_compatible("PowerMac1,1"))
 		ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
-	if (machine_is_compatible("AAPL,PowerBook1998"))
+	if (of_machine_is_compatible("AAPL,PowerBook1998"))
 		grackle_set_loop_snoop(hose, 1);
 #if 0	/* Disabled for now, HW problems ??? */
 	grackle_set_stg(hose, 1);

+ 2 - 2
arch/sparc/include/asm/stat.h

@@ -53,8 +53,8 @@ struct stat {
 	ino_t		st_ino;
 	mode_t		st_mode;
 	short		st_nlink;
-	uid_t		st_uid;
-	gid_t		st_gid;
+	uid16_t		st_uid;
+	gid16_t		st_gid;
 	unsigned short	st_rdev;
 	off_t		st_size;
 	time_t		st_atime;

+ 1 - 1
arch/sparc/kernel/devices.c

@@ -59,7 +59,7 @@ static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
 
 	cur_inst = 0;
 	for_each_node_by_type(dp, "cpu") {
-		int err = check_cpu_node(dp->node, &cur_inst,
+		int err = check_cpu_node(dp->phandle, &cur_inst,
 					 compare, compare_arg,
 					 prom_node, mid);
 		if (!err) {

+ 4 - 0
arch/sparc/kernel/kstack.h

@@ -11,6 +11,10 @@ static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
 {
 	unsigned long base = (unsigned long) tp;
 
+	/* Stack pointer must be 16-byte aligned.  */
+	if (sp & (16UL - 1))
+		return false;
+
 	if (sp >= (base + sizeof(struct thread_info)) &&
 	    sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
 		return true;

+ 2 - 2
arch/sparc/kernel/of_device_32.c

@@ -105,7 +105,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
 
 static int of_bus_ambapp_match(struct device_node *np)
 {
-	return !strcmp(np->name, "ambapp");
+	return !strcmp(np->type, "ambapp");
 }
 
 static void of_bus_ambapp_count_cells(struct device_node *child,
@@ -433,7 +433,7 @@ build_resources:
 	if (!parent)
 		dev_set_name(&op->dev, "root");
 	else
-		dev_set_name(&op->dev, "%08x", dp->node);
+		dev_set_name(&op->dev, "%08x", dp->phandle);
 
 	if (of_device_register(op)) {
 		printk("%s: Could not register of device.\n",

+ 1 - 1
arch/sparc/kernel/of_device_64.c

@@ -676,7 +676,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
 	if (!parent)
 		dev_set_name(&op->dev, "root");
 	else
-		dev_set_name(&op->dev, "%08x", dp->node);
+		dev_set_name(&op->dev, "%08x", dp->phandle);
 
 	if (of_device_register(op)) {
 		printk("%s: Could not register of device.\n",

+ 7 - 0
arch/sparc/kernel/pci.c

@@ -247,6 +247,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
 					 struct pci_bus *bus, int devfn)
 {
 	struct dev_archdata *sd;
+	struct pci_slot *slot;
 	struct of_device *op;
 	struct pci_dev *dev;
 	const char *type;
@@ -286,6 +287,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
 	dev->dev.bus = &pci_bus_type;
 	dev->devfn = devfn;
 	dev->multifunction = 0;		/* maybe a lie? */
+	set_pcie_port_type(dev);
+
+	list_for_each_entry(slot, &dev->bus->slots, list)
+		if (PCI_SLOT(dev->devfn) == slot->number)
+			dev->slot = slot;
 
 	dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
 	dev->device = of_getintprop_default(node, "device-id", 0xffff);
@@ -322,6 +328,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
 
 	dev->current_state = 4;		/* unknown power state */
 	dev->error_state = pci_channel_io_normal;
+	dev->dma_mask = 0xffffffff;
 
 	if (!strcmp(node->name, "pci")) {
 		/* a PCI-PCI bridge */

+ 0 - 3
arch/sparc/kernel/prom.h

@@ -4,9 +4,6 @@
 #include <linux/spinlock.h>
 #include <asm/prom.h>
 
-extern struct device_node *allnodes;	/* temporary while merging */
-extern rwlock_t devtree_lock;	/* temporary while merging */
-
 extern void * prom_early_alloc(unsigned long size);
 extern void irq_trans_init(struct device_node *dp);
 

+ 3 - 15
arch/sparc/kernel/prom_common.c

@@ -37,18 +37,6 @@ EXPORT_SYMBOL(of_console_path);
 char *of_console_options;
 EXPORT_SYMBOL(of_console_options);
 
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
-	struct device_node *np;
-
-	for (np = allnodes; np; np = np->allnext)
-		if (np->node == handle)
-			break;
-
-	return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
 int of_getintprop_default(struct device_node *np, const char *name, int def)
 {
 	struct property *prop;
@@ -89,7 +77,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
 			void *old_val = prop->value;
 			int ret;
 
-			ret = prom_setprop(dp->node, name, val, len);
+			ret = prom_setprop(dp->phandle, name, val, len);
 
 			err = -EINVAL;
 			if (ret >= 0) {
@@ -236,7 +224,7 @@ static struct device_node * __init prom_create_node(phandle node,
 
 	dp->name = get_one_property(node, "name");
 	dp->type = get_one_property(node, "device_type");
-	dp->node = node;
+	dp->phandle = node;
 
 	dp->properties = build_prop_list(node);
 
@@ -313,7 +301,7 @@ void __init prom_build_devicetree(void)
 
 	nextp = &allnodes->allnext;
 	allnodes->child = prom_build_tree(allnodes,
-					  prom_getchild(allnodes->node),
+					  prom_getchild(allnodes->phandle),
 					  &nextp);
 	of_console_init();
 

+ 1 - 1
arch/sparc/kernel/smp_64.c

@@ -370,7 +370,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
 	} else {
 		struct device_node *dp = of_find_node_by_cpuid(cpu);
 
-		prom_startcpu(dp->node, entry, cookie);
+		prom_startcpu(dp->phandle, entry, cookie);
 	}
 
 	for (timeout = 0; timeout < 50000; timeout++) {

+ 4 - 2
arch/sparc/kernel/tsb.S

@@ -191,10 +191,12 @@ tsb_dtlb_load:
 
 tsb_itlb_load:
 	/* Executable bit must be set.  */
-661:	andcc		%g5, _PAGE_EXEC_4U, %g0
-	.section	.sun4v_1insn_patch, "ax"
+661:	sethi		%hi(_PAGE_EXEC_4U), %g4
+	andcc		%g5, %g4, %g0
+	.section	.sun4v_2insn_patch, "ax"
 	.word		661b
 	andcc		%g5, _PAGE_EXEC_4V, %g0
+	nop
 	.previous
 
 	be,pn		%xcc, tsb_do_fault

+ 2 - 0
arch/x86/include/asm/processor.h

@@ -450,6 +450,8 @@ struct thread_struct {
 	struct perf_event	*ptrace_bps[HBP_NUM];
 	/* Debug status used for traps, single steps, etc... */
 	unsigned long           debugreg6;
+	/* Keep track of the exact dr7 value set by the user */
+	unsigned long           ptrace_dr7;
 	/* Fault info: */
 	unsigned long		cr2;
 	unsigned long		trap_no;

+ 0 - 8
arch/x86/kernel/acpi/boot.c

@@ -1342,14 +1342,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
 		     DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
 		     },
 	 },
-	{
-	 .callback = force_acpi_ht,
-	 .ident = "ASUS P2B-DS",
-	 .matches = {
-		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-		     DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
-		     },
-	 },
 	{
 	 .callback = force_acpi_ht,
 	 .ident = "ASUS CUR-DLS",

+ 7 - 23
arch/x86/kernel/hw_breakpoint.c

@@ -212,25 +212,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
 	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
 
-/*
- * Store a breakpoint's encoded address, length, and type.
- */
-static int arch_store_info(struct perf_event *bp)
-{
-	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-	/*
-	 * For kernel-addresses, either the address or symbol name can be
-	 * specified.
-	 */
-	if (info->name)
-		info->address = (unsigned long)
-				kallsyms_lookup_name(info->name);
-	if (info->address)
-		return 0;
-
-	return -EINVAL;
-}
-
 int arch_bp_generic_fields(int x86_len, int x86_type,
 			   int *gen_len, int *gen_type)
 {
@@ -362,10 +343,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
 		return ret;
 	}
 
-	ret = arch_store_info(bp);
-
-	if (ret < 0)
-		return ret;
+	/*
+	 * For kernel-addresses, either the address or symbol name can be
+	 * specified.
+	 */
+	if (info->name)
+		info->address = (unsigned long)
+				kallsyms_lookup_name(info->name);
 	/*
 	 * Check that the low-order bits of the address are appropriate
 	 * for the alignment implied by len.

+ 5 - 2
arch/x86/kernel/ptrace.c

@@ -702,7 +702,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
 	} else if (n == 6) {
 		val = thread->debugreg6;
 	 } else if (n == 7) {
-		val = ptrace_get_dr7(thread->ptrace_bps);
+		val = thread->ptrace_dr7;
 	}
 	return val;
 }
@@ -778,8 +778,11 @@ int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
 			return rc;
 	}
 	/* All that's left is DR7 */
-	if (n == 7)
+	if (n == 7) {
 		rc = ptrace_write_dr7(tsk, val);
+		if (!rc)
+			thread->ptrace_dr7 = val;
+	}
 
 ret_path:
 	return rc;

+ 2 - 9
block/blk-core.c

@@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
  */
 static inline bool queue_should_plug(struct request_queue *q)
 {
-	return !(blk_queue_nonrot(q) && blk_queue_queuing(q));
+	return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
 }
 
 static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1859,15 +1859,8 @@ void blk_dequeue_request(struct request *rq)
 	 * and to it is freed is accounted as io that is in progress at
 	 * the driver side.
 	 */
-	if (blk_account_rq(rq)) {
+	if (blk_account_rq(rq))
 		q->in_flight[rq_is_sync(rq)]++;
-		/*
-		 * Mark this device as supporting hardware queuing, if
-		 * we have more IOs in flight than 4.
-		 */
-		if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
-			set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
-	}
 }
 
 /**

+ 1 - 0
drivers/acpi/dock.c

@@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
 	struct platform_device *dd;
 
 	id = dock_station_count;
+	memset(&ds, 0, sizeof(ds));
 	dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
 	if (IS_ERR(dd))
 		return PTR_ERR(dd);

+ 24 - 12
drivers/acpi/processor_idle.c

@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
 	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
 	 (void *)2},
+	{ set_max_cstate, "Pavilion zv5000", {
+	  DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+	  DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
+	 (void *)1},
+	{ set_max_cstate, "Asus L8400B", {
+	  DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+	  DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
+	 (void *)1},
 	{},
 };
 
@@ -872,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
 		return(acpi_idle_enter_c1(dev, state));
 
 	local_irq_disable();
-	current_thread_info()->status &= ~TS_POLLING;
-	/*
-	 * TS_POLLING-cleared state must be visible before we test
-	 * NEED_RESCHED:
-	 */
-	smp_mb();
+	if (cx->entry_method != ACPI_CSTATE_FFH) {
+		current_thread_info()->status &= ~TS_POLLING;
+		/*
+		 * TS_POLLING-cleared state must be visible before we test
+		 * NEED_RESCHED:
+		 */
+		smp_mb();
+	}
 
 	if (unlikely(need_resched())) {
 		current_thread_info()->status |= TS_POLLING;
@@ -957,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 	}
 
 	local_irq_disable();
-	current_thread_info()->status &= ~TS_POLLING;
-	/*
-	 * TS_POLLING-cleared state must be visible before we test
-	 * NEED_RESCHED:
-	 */
-	smp_mb();
+	if (cx->entry_method != ACPI_CSTATE_FFH) {
+		current_thread_info()->status &= ~TS_POLLING;
+		/*
+		 * TS_POLLING-cleared state must be visible before we test
+		 * NEED_RESCHED:
+		 */
+		smp_mb();
+	}
 
 	if (unlikely(need_resched())) {
 		current_thread_info()->status |= TS_POLLING;

+ 14 - 0
drivers/acpi/processor_pdc.c

@@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
 	return status;
 }
 
+static int early_pdc_done;
+
 void acpi_processor_set_pdc(acpi_handle handle)
 {
 	struct acpi_object_list *obj_list;
@@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
 	if (arch_has_acpi_pdc() == false)
 		return;
 
+	if (early_pdc_done)
+		return;
+
 	obj_list = acpi_processor_alloc_pdc();
 	if (!obj_list)
 		return;
@@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id)
 	return 0;
 }
 
+static int param_early_pdc_optin(char *s)
+{
+	early_pdc_optin = 1;
+	return 1;
+}
+__setup("acpi_early_pdc_eval", param_early_pdc_optin);
+
 static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
 	{
 	set_early_pdc_optin, "HP Envy", {
@@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void)
 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
 			    ACPI_UINT32_MAX,
 			    early_init_pdc, NULL, NULL, NULL);
+
+	early_pdc_done = 1;
 }

+ 5 - 1
drivers/acpi/processor_perflib.c

@@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
 	if (result)
 		goto update_bios;
 
-	return 0;
+	/* We need to call _PPC once when cpufreq starts */
+	if (ignore_ppc != 1)
+		result = acpi_processor_get_platform_limit(pr);
+
+	return result;
 
 	/*
 	 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that

+ 22 - 5
drivers/acpi/scan.c

@@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
 
 	if (child)
 		*child = device;
-	return 0;
+
+	if (device)
+		return 0;
+	else
+		return -ENODEV;
 }
 
+/*
+ * acpi_bus_add and acpi_bus_start
+ *
+ * scan a given ACPI tree and (probably recently hot-plugged)
+ * create and add or starts found devices.
+ *
+ * If no devices were found -ENODEV is returned which does not
+ * mean that this is a real error, there just have been no suitable
+ * ACPI objects in the table trunk from which the kernel could create
+ * a device and add/start an appropriate driver.
+ */
+
 int
 acpi_bus_add(struct acpi_device **child,
 	     struct acpi_device *parent, acpi_handle handle, int type)
@@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
 	memset(&ops, 0, sizeof(ops));
 	ops.acpi_op_add = 1;
 
-	acpi_bus_scan(handle, &ops, child);
-	return 0;
+	return acpi_bus_scan(handle, &ops, child);
 }
 EXPORT_SYMBOL(acpi_bus_add);
 
@@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
 {
 	struct acpi_bus_ops ops;
 
+	if (!device)
+		return -EINVAL;
+
 	memset(&ops, 0, sizeof(ops));
 	ops.acpi_op_start = 1;
 
-	acpi_bus_scan(device->handle, &ops, NULL);
-	return 0;
+	return acpi_bus_scan(device->handle, &ops, NULL);
 }
 EXPORT_SYMBOL(acpi_bus_start);
 

+ 2 - 2
drivers/acpi/tables.c

@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
 	unsigned long table_end;
 	acpi_size tbl_size;
 
-	if (acpi_disabled)
+	if (acpi_disabled && !acpi_ht)
 		return -ENODEV;
 
 	if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
 	struct acpi_table_header *table = NULL;
 	acpi_size tbl_size;
 
-	if (acpi_disabled)
+	if (acpi_disabled && !acpi_ht)
 		return -ENODEV;
 
 	if (!handler)

+ 10 - 2
drivers/ata/ahci.c

@@ -3082,8 +3082,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	ahci_save_initial_config(pdev, hpriv);
 
 	/* prepare host */
-	if (hpriv->cap & HOST_CAP_NCQ)
-		pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
+	if (hpriv->cap & HOST_CAP_NCQ) {
+		pi.flags |= ATA_FLAG_NCQ;
+		/* Auto-activate optimization is supposed to be supported on
+		   all AHCI controllers indicating NCQ support, but it seems
+		   to be broken at least on some NVIDIA MCP79 chipsets.
+		   Until we get info on which NVIDIA chipsets don't have this
+		   issue, if any, disable AA on all NVIDIA AHCIs. */
+		if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+			pi.flags |= ATA_FLAG_FPDMA_AA;
+	}
 
 	if (hpriv->cap & HOST_CAP_PMP)
 		pi.flags |= ATA_FLAG_PMP;

+ 2 - 0
drivers/base/class.c

@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
 	else
 		pr_debug("class '%s' does not have a release() function, "
 			 "be careful\n", class->name);
+
+	kfree(cp);
 }
 
 static struct sysfs_ops class_sysfs_ops = {

+ 46 - 15
drivers/block/virtio_blk.c

@@ -243,10 +243,12 @@ static int index_to_minor(int index)
 static int __devinit virtblk_probe(struct virtio_device *vdev)
 {
 	struct virtio_blk *vblk;
+	struct request_queue *q;
 	int err;
 	u64 cap;
-	u32 v;
-	u32 blk_size, sg_elems;
+	u32 v, blk_size, sg_elems, opt_io_size;
+	u16 min_io_size;
+	u8 physical_block_exp, alignment_offset;
 
 	if (index_to_minor(index) >= 1 << MINORBITS)
 		return -ENOSPC;
@@ -293,13 +295,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
 		goto out_mempool;
 	}
 
-	vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
-	if (!vblk->disk->queue) {
+	q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
+	if (!q) {
 		err = -ENOMEM;
 		goto out_put_disk;
 	}
 
-	vblk->disk->queue->queuedata = vblk;
+	q->queuedata = vblk;
 
 	if (index < 26) {
 		sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
@@ -323,10 +325,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
 
 	/* If barriers are supported, tell block layer that queue is ordered */
 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
-		blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH,
+		blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
 				  virtblk_prepare_flush);
 	else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
-		blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
+		blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
 
 	/* If disk is read-only in the host, the guest should obey */
 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@@ -345,14 +347,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
 	set_capacity(vblk->disk, cap);
 
 	/* We can handle whatever the host told us to handle. */
-	blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
-	blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
+	blk_queue_max_phys_segments(q, vblk->sg_elems-2);
+	blk_queue_max_hw_segments(q, vblk->sg_elems-2);
 
 	/* No need to bounce any requests */
-	blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
+	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
 
 	/* No real sector limit. */
-	blk_queue_max_sectors(vblk->disk->queue, -1U);
+	blk_queue_max_sectors(q, -1U);
 
 	/* Host can optionally specify maximum segment size and number of
 	 * segments. */
@@ -360,16 +362,45 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
 				offsetof(struct virtio_blk_config, size_max),
 				&v);
 	if (!err)
-		blk_queue_max_segment_size(vblk->disk->queue, v);
+		blk_queue_max_segment_size(q, v);
 	else
-		blk_queue_max_segment_size(vblk->disk->queue, -1U);
+		blk_queue_max_segment_size(q, -1U);
 
 	/* Host can optionally specify the block size of the device */
 	err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
 				offsetof(struct virtio_blk_config, blk_size),
 				&blk_size);
 	if (!err)
-		blk_queue_logical_block_size(vblk->disk->queue, blk_size);
+		blk_queue_logical_block_size(q, blk_size);
+	else
+		blk_size = queue_logical_block_size(q);
+
+	/* Use topology information if available */
+	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+			offsetof(struct virtio_blk_config, physical_block_exp),
+			&physical_block_exp);
+	if (!err && physical_block_exp)
+		blk_queue_physical_block_size(q,
+				blk_size * (1 << physical_block_exp));
+
+	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+			offsetof(struct virtio_blk_config, alignment_offset),
+			&alignment_offset);
+	if (!err && alignment_offset)
+		blk_queue_alignment_offset(q, blk_size * alignment_offset);
+
+	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+			offsetof(struct virtio_blk_config, min_io_size),
+			&min_io_size);
+	if (!err && min_io_size)
+		blk_queue_io_min(q, blk_size * min_io_size);
+
+	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+			offsetof(struct virtio_blk_config, opt_io_size),
+			&opt_io_size);
+	if (!err && opt_io_size)
+		blk_queue_io_opt(q, blk_size * opt_io_size);
+
 
 	add_disk(vblk->disk);
 	return 0;
@@ -412,7 +443,7 @@ static struct virtio_device_id id_table[] = {
 static unsigned int features[] = {
 	VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
 	VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
-	VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH
+	VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
 };
 
 /*

+ 8 - 0
drivers/char/Kconfig

@@ -666,6 +666,14 @@ config VIRTIO_CONSOLE
 	help
 	  Virtio console for use with lguest and other hypervisors.
 
+	  Also serves as a general-purpose serial device for data
+	  transfer between the guest and host.  Character devices at
+	  /dev/vportNpn will be created when corresponding ports are
+	  found, where N is the device number and n is the port number
+	  within that device.  If specified by the host, a sysfs
+	  attribute called 'name' will be populated with a name for
+	  the port which can be used by udev scripts to create a
+	  symlink to the device.
 
 config HVCS
 	tristate "IBM Hypervisor Virtual Console Server support"

+ 1 - 1
drivers/char/hvc_beat.c

@@ -99,7 +99,7 @@ static int hvc_beat_config(char *p)
 
 static int __init hvc_beat_console_init(void)
 {
-	if (hvc_beat_useit && machine_is_compatible("Beat")) {
+	if (hvc_beat_useit && of_machine_is_compatible("Beat")) {
 		hvc_instantiate(0, 0, &hvc_beat_get_put_ops);
 	}
 	return 0;

File diff suppressed because it is too large
+ 1403 - 145
drivers/char/virtio_console.c


+ 1 - 1
drivers/clocksource/cs5535-clockevt.c

@@ -21,7 +21,7 @@
 
 #define DRV_NAME "cs5535-clockevt"
 
-static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
+static int timer_irq;
 module_param_named(irq, timer_irq, int, 0644);
 MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
 

+ 45 - 2
drivers/gpu/drm/drm_edid.c

@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
 	return mode;
 }
 
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded.  Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size.  Technically we
+ * should be checking refresh rate too.  Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+			    struct detailed_pixel_timing *pt)
+{
+	int i;
+	static const struct {
+		int w, h;
+	} cea_interlaced[] = {
+		{ 1920, 1080 },
+		{  720,  480 },
+		{ 1440,  480 },
+		{ 2880,  480 },
+		{  720,  576 },
+		{ 1440,  576 },
+		{ 2880,  576 },
+	};
+	static const int n_sizes =
+		sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
+
+	if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+		return;
+
+	for (i = 0; i < n_sizes; i++) {
+		if ((mode->hdisplay == cea_interlaced[i].w) &&
+		    (mode->vdisplay == cea_interlaced[i].h / 2)) {
+			mode->vdisplay *= 2;
+			mode->vsync_start *= 2;
+			mode->vsync_end *= 2;
+			mode->vtotal *= 2;
+			mode->vtotal |= 1;
+		}
+	}
+
+	mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
 /**
  * drm_mode_detailed - create a new mode from an EDID detailed timing section
  * @dev: DRM device (needed to create new mode)
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
 
 	drm_mode_set_name(mode);
 
-	if (pt->misc & DRM_EDID_PT_INTERLACED)
-		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+	drm_mode_do_interlace_quirk(mode, pt);
 
 	if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
 		pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;

+ 9 - 21
drivers/gpu/drm/i915/i915_drv.c

@@ -176,6 +176,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
 
 static int i915_drm_freeze(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	pci_save_state(dev->pdev);
 
 	/* If KMS is active, we do the leavevt stuff here */
@@ -191,17 +193,12 @@ static int i915_drm_freeze(struct drm_device *dev)
 
 	i915_save_state(dev);
 
-	return 0;
-}
-
-static void i915_drm_suspend(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	intel_opregion_free(dev, 1);
 
 	/* Modeset on resume, not lid events */
 	dev_priv->modeset_on_lid = 0;
+
+	return 0;
 }
 
 static int i915_suspend(struct drm_device *dev, pm_message_t state)
@@ -221,8 +218,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
 	if (error)
 		return error;
 
-	i915_drm_suspend(dev);
-
 	if (state.event == PM_EVENT_SUSPEND) {
 		/* Shut down the device */
 		pci_disable_device(dev->pdev);
@@ -237,6 +232,10 @@ static int i915_drm_thaw(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int error = 0;
 
+	i915_restore_state(dev);
+
+	intel_opregion_init(dev, 1);
+
 	/* KMS EnterVT equivalent */
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		mutex_lock(&dev->struct_mutex);
@@ -263,10 +262,6 @@ static int i915_resume(struct drm_device *dev)
 
 	pci_set_master(dev->pdev);
 
-	i915_restore_state(dev);
-
-	intel_opregion_init(dev, 1);
-
 	return i915_drm_thaw(dev);
 }
 
@@ -423,8 +418,6 @@ static int i915_pm_suspend(struct device *dev)
 	if (error)
 		return error;
 
-	i915_drm_suspend(drm_dev);
-
 	pci_disable_device(pdev);
 	pci_set_power_state(pdev, PCI_D3hot);
 
@@ -464,13 +457,8 @@ static int i915_pm_poweroff(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	int error;
-
-	error = i915_drm_freeze(drm_dev);
-	if (!error)
-		i915_drm_suspend(drm_dev);
 
-	return error;
+	return i915_drm_freeze(drm_dev);
 }
 
 const struct dev_pm_ops i915_pm_ops = {

+ 7 - 0
drivers/gpu/drm/i915/intel_lvds.c

@@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
 		},
 	},
+	{
+		.ident = "Clevo M5x0N",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+			DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
+		},
+	},
 	{ }
 };
 

+ 3 - 4
drivers/gpu/drm/nouveau/nouveau_bios.c

@@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nvbios *bios = &dev_priv->VBIOS;
 	struct init_exec iexec = { true, false };
-	unsigned long flags;
 
-	spin_lock_irqsave(&bios->lock, flags);
+	mutex_lock(&bios->lock);
 	bios->display.output = dcbent;
 	parse_init_table(bios, table, &iexec);
 	bios->display.output = NULL;
-	spin_unlock_irqrestore(&bios->lock, flags);
+	mutex_unlock(&bios->lock);
 }
 
 static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
 	struct nvbios *bios = &dev_priv->VBIOS;
 
 	memset(bios, 0, sizeof(struct nvbios));
-	spin_lock_init(&bios->lock);
+	mutex_init(&bios->lock);
 	bios->dev = dev;
 
 	if (!NVShadowVBIOS(dev, bios->data))

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_bios.h

@@ -205,7 +205,7 @@ struct nvbios {
 	struct drm_device *dev;
 	struct nouveau_bios_info pub;
 
-	spinlock_t lock;
+	struct mutex lock;
 
 	uint8_t data[NV_PROM_SIZE];
 	unsigned int length;

+ 1 - 0
drivers/gpu/drm/nouveau/nouveau_drv.h

@@ -583,6 +583,7 @@ struct drm_nouveau_private {
 	uint64_t vm_end;
 	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
 	int vm_vram_pt_nr;
+	uint64_t vram_sys_base;
 
 	/* the mtrr covering the FB */
 	int fb_mtrr;

+ 72 - 41
drivers/gpu/drm/nouveau/nouveau_mem.c

@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
 			uint32_t flags, uint64_t phys)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj **pgt;
-	unsigned psz, pfl, pages;
-
-	if (virt >= dev_priv->vm_gart_base &&
-	    (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
-		psz = 12;
-		pgt = &dev_priv->gart_info.sg_ctxdma;
-		pfl = 0x21;
-		virt -= dev_priv->vm_gart_base;
-	} else
-	if (virt >= dev_priv->vm_vram_base &&
-	    (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
-		psz = 16;
-		pgt = dev_priv->vm_vram_pt;
-		pfl = 0x01;
-		virt -= dev_priv->vm_vram_base;
-	} else {
-		NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
-			 virt, virt + size - 1);
-		return -EINVAL;
-	}
+	struct nouveau_gpuobj *pgt;
+	unsigned block;
+	int i;
 
-	pages = size >> psz;
+	virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
+	size = (size >> 16) << 1;
+
+	phys |= ((uint64_t)flags << 32);
+	phys |= 1;
+	if (dev_priv->vram_sys_base) {
+		phys += dev_priv->vram_sys_base;
+		phys |= 0x30;
+	}
 
 	dev_priv->engine.instmem.prepare_access(dev, true);
-	if (flags & 0x80000000) {
-		while (pages--) {
-			struct nouveau_gpuobj *pt = pgt[virt >> 29];
-			unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+	while (size) {
+		unsigned offset_h = upper_32_bits(phys);
+		unsigned offset_l = lower_32_bits(phys);
+		unsigned pte, end;
+
+		for (i = 7; i >= 0; i--) {
+			block = 1 << (i + 1);
+			if (size >= block && !(virt & (block - 1)))
+				break;
+		}
+		offset_l |= (i << 7);
 
-			nv_wo32(dev, pt, pte++, 0x00000000);
-			nv_wo32(dev, pt, pte++, 0x00000000);
+		phys += block << 15;
+		size -= block;
 
-			virt += (1 << psz);
-		}
-	} else {
-		while (pages--) {
-			struct nouveau_gpuobj *pt = pgt[virt >> 29];
-			unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
-			unsigned offset_h = upper_32_bits(phys) & 0xff;
-			unsigned offset_l = lower_32_bits(phys);
+		while (block) {
+			pgt = dev_priv->vm_vram_pt[virt >> 14];
+			pte = virt & 0x3ffe;
 
-			nv_wo32(dev, pt, pte++, offset_l | pfl);
-			nv_wo32(dev, pt, pte++, offset_h | flags);
+			end = pte + block;
+			if (end > 16384)
+				end = 16384;
+			block -= (end - pte);
+			virt  += (end - pte);
 
-			phys += (1 << psz);
-			virt += (1 << psz);
+			while (pte < end) {
+				nv_wo32(dev, pgt, pte++, offset_l);
+				nv_wo32(dev, pgt, pte++, offset_h);
+			}
 		}
 	}
 	dev_priv->engine.instmem.finish_access(dev);
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
 void
 nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
 {
-	nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *pgt;
+	unsigned pages, pte, end;
+
+	virt -= dev_priv->vm_vram_base;
+	pages = (size >> 16) << 1;
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	while (pages) {
+		pgt = dev_priv->vm_vram_pt[virt >> 29];
+		pte = (virt & 0x1ffe0000ULL) >> 15;
+
+		end = pte + pages;
+		if (end > 16384)
+			end = 16384;
+		pages -= (end - pte);
+		virt  += (end - pte) << 15;
+
+		while (pte < end)
+			nv_wo32(dev, pgt, pte++, 0);
+	}
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv_wr32(dev, 0x100c80, 0x00050001);
+	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+		return;
+	}
+
+	nv_wr32(dev, 0x100c80, 0x00000001);
+	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+	}
 }
 
 /*

+ 5 - 1
drivers/gpu/drm/nouveau/nv04_dac.c

@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
 						 struct drm_connector *connector)
 {
 	struct drm_device *dev = encoder->dev;
-	uint8_t saved_seq1, saved_pi, saved_rpc1;
+	uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
 	uint8_t saved_palette0[3], saved_palette_mask;
 	uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
 	int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
 		/* only implemented for head A for now */
 		NVSetOwner(dev, 0);
 
+	saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
+
 	saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
 	NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
 
@@ -203,6 +206,7 @@ out:
 	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
 	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
 	NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
 
 	if (blue == 0x18) {
 		NV_INFO(dev, "Load detected on head A\n");

+ 2 - 0
drivers/gpu/drm/nouveau/nv17_tv.c

@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
 				nouveau_encoder(encoder)->restore.output);
 
 	nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+
+	nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
 }
 
 static int nv17_tv_create_resources(struct drm_encoder *encoder,

+ 40 - 18
drivers/gpu/drm/nouveau/nv50_instmem.c

@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
 	for (i = 0x1700; i <= 0x1710; i += 4)
 		priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
 
+	if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
+		dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
+	else
+		dev_priv->vram_sys_base = 0;
+
 	/* Reserve the last MiB of VRAM, we should probably try to avoid
 	 * setting up the below tables over the top of the VBIOS image at
 	 * some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
 	 * We map the entire fake channel into the start of the PRAMIN BAR
 	 */
 	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
-							0, &priv->pramin_pt);
+				     0, &priv->pramin_pt);
 	if (ret)
 		return ret;
 
-	for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
-		if (v < (c_offset + c_size))
-			BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
-		else
-			BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+	v = c_offset | 1;
+	if (dev_priv->vram_sys_base) {
+		v += dev_priv->vram_sys_base;
+		v |= 0x30;
+	}
+
+	i = 0;
+	while (v < dev_priv->vram_sys_base + c_offset + c_size) {
+		BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
+		BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+		v += 0x1000;
+		i += 8;
+	}
+
+	while (i < pt_size) {
+		BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
 		BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+		i += 8;
 	}
 
 	BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	uint32_t pte, pte_end, vram;
+	struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
+	uint32_t pte, pte_end;
+	uint64_t vram;
 
 	if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
 		return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 	NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
 		 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
 
-	pte     = (gpuobj->im_pramin->start >> 12) << 3;
-	pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+	pte     = (gpuobj->im_pramin->start >> 12) << 1;
+	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
 	vram    = gpuobj->im_backing_start;
 
 	NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
 		 gpuobj->im_pramin->start, pte, pte_end);
 	NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
 
+	vram |= 1;
+	if (dev_priv->vram_sys_base) {
+		vram += dev_priv->vram_sys_base;
+		vram |= 0x30;
+	}
+
 	dev_priv->engine.instmem.prepare_access(dev, true);
 	while (pte < pte_end) {
-		nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
-		nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
-
-		pte += 8;
+		nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
+		nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
 		vram += NV50_INSTMEM_PAGE_SIZE;
 	}
 	dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 	if (gpuobj->im_bound == 0)
 		return -EINVAL;
 
-	pte     = (gpuobj->im_pramin->start >> 12) << 3;
-	pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+	pte     = (gpuobj->im_pramin->start >> 12) << 1;
+	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
 
 	dev_priv->engine.instmem.prepare_access(dev, true);
 	while (pte < pte_end) {
-		nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
-		nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
-		pte += 8;
+		nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
+		nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
 	}
 	dev_priv->engine.instmem.finish_access(dev);
 

+ 1 - 1
drivers/gpu/drm/radeon/atom.c

@@ -643,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
 	uint8_t count = U8((*ptr)++);
 	SDEBUG("   count: %d\n", count);
 	if (arg == ATOM_UNIT_MICROSEC)
-		schedule_timeout_uninterruptible(usecs_to_jiffies(count));
+		udelay(count);
 	else
 		schedule_timeout_uninterruptible(msecs_to_jiffies(count));
 }

+ 0 - 3
drivers/gpu/drm/radeon/r600_blit_kms.c

@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
 void r600_vb_ib_put(struct radeon_device *rdev)
 {
 	radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
-	mutex_lock(&rdev->ib_pool.mutex);
-	list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
-	mutex_unlock(&rdev->ib_pool.mutex);
 	radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
 }
 

+ 6 - 3
drivers/gpu/drm/radeon/r600_cp.c

@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
 
 	gb_tiling_config |= R600_BANK_SWAPS(1);
 
-	backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
-							dev_priv->r600_max_backends,
-							(0xff << dev_priv->r600_max_backends) & 0xff);
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+		backend_map = 0x28;
+	else
+		backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+								dev_priv->r600_max_backends,
+								(0xff << dev_priv->r600_max_backends) & 0xff);
 	gb_tiling_config |= R600_BACKEND_MAP(backend_map);
 
 	cc_gc_shader_pipe_config =

+ 5 - 4
drivers/gpu/drm/radeon/radeon.h

@@ -96,6 +96,7 @@ extern int radeon_audio;
  * symbol;
  */
 #define RADEON_MAX_USEC_TIMEOUT		100000	/* 100 ms */
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
 #define RADEON_IB_POOL_SIZE		16
 #define RADEON_DEBUGFS_MAX_NUM_FILES	32
 #define RADEONFB_CONN_LIMIT		4
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
  */
 struct radeon_ib {
 	struct list_head	list;
-	unsigned long		idx;
+	unsigned		idx;
 	uint64_t		gpu_addr;
 	struct radeon_fence	*fence;
-	uint32_t	*ptr;
+	uint32_t		*ptr;
 	uint32_t		length_dw;
+	bool			free;
 };
 
 /*
@@ -377,10 +379,9 @@ struct radeon_ib {
 struct radeon_ib_pool {
 	struct mutex		mutex;
 	struct radeon_bo	*robj;
-	struct list_head	scheduled_ibs;
 	struct radeon_ib	ibs[RADEON_IB_POOL_SIZE];
 	bool			ready;
-	DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
+	unsigned		head_id;
 };
 
 struct radeon_cp {

+ 9 - 0
drivers/gpu/drm/radeon/radeon_atombios.c

@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 			*connector_type = DRM_MODE_CONNECTOR_DVID;
 	}
 
+	/* Asrock RS600 board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x7941) &&
+	    (dev->pdev->subsystem_vendor == 0x1849) &&
+	    (dev->pdev->subsystem_device == 0x7941)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+	}
+
 	/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
 	if ((dev->pdev->device == 0x7941) &&
 	    (dev->pdev->subsystem_vendor == 0x147b) &&

+ 22 - 22
drivers/gpu/drm/radeon/radeon_combios.c

@@ -1279,47 +1279,47 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
 	rdev->mode_info.connector_table = radeon_connector_table;
 	if (rdev->mode_info.connector_table == CT_NONE) {
 #ifdef CONFIG_PPC_PMAC
-		if (machine_is_compatible("PowerBook3,3")) {
+		if (of_machine_is_compatible("PowerBook3,3")) {
 			/* powerbook with VGA */
 			rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
-		} else if (machine_is_compatible("PowerBook3,4") ||
-			   machine_is_compatible("PowerBook3,5")) {
+		} else if (of_machine_is_compatible("PowerBook3,4") ||
+			   of_machine_is_compatible("PowerBook3,5")) {
 			/* powerbook with internal tmds */
 			rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
-		} else if (machine_is_compatible("PowerBook5,1") ||
-			   machine_is_compatible("PowerBook5,2") ||
-			   machine_is_compatible("PowerBook5,3") ||
-			   machine_is_compatible("PowerBook5,4") ||
-			   machine_is_compatible("PowerBook5,5")) {
+		} else if (of_machine_is_compatible("PowerBook5,1") ||
+			   of_machine_is_compatible("PowerBook5,2") ||
+			   of_machine_is_compatible("PowerBook5,3") ||
+			   of_machine_is_compatible("PowerBook5,4") ||
+			   of_machine_is_compatible("PowerBook5,5")) {
 			/* powerbook with external single link tmds (sil164) */
 			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
-		} else if (machine_is_compatible("PowerBook5,6")) {
+		} else if (of_machine_is_compatible("PowerBook5,6")) {
 			/* powerbook with external dual or single link tmds */
 			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
-		} else if (machine_is_compatible("PowerBook5,7") ||
-			   machine_is_compatible("PowerBook5,8") ||
-			   machine_is_compatible("PowerBook5,9")) {
+		} else if (of_machine_is_compatible("PowerBook5,7") ||
+			   of_machine_is_compatible("PowerBook5,8") ||
+			   of_machine_is_compatible("PowerBook5,9")) {
 			/* PowerBook6,2 ? */
 			/* powerbook with external dual link tmds (sil1178?) */
 			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
-		} else if (machine_is_compatible("PowerBook4,1") ||
-			   machine_is_compatible("PowerBook4,2") ||
-			   machine_is_compatible("PowerBook4,3") ||
-			   machine_is_compatible("PowerBook6,3") ||
-			   machine_is_compatible("PowerBook6,5") ||
-			   machine_is_compatible("PowerBook6,7")) {
+		} else if (of_machine_is_compatible("PowerBook4,1") ||
+			   of_machine_is_compatible("PowerBook4,2") ||
+			   of_machine_is_compatible("PowerBook4,3") ||
+			   of_machine_is_compatible("PowerBook6,3") ||
+			   of_machine_is_compatible("PowerBook6,5") ||
+			   of_machine_is_compatible("PowerBook6,7")) {
 			/* ibook */
 			rdev->mode_info.connector_table = CT_IBOOK;
-		} else if (machine_is_compatible("PowerMac4,4")) {
+		} else if (of_machine_is_compatible("PowerMac4,4")) {
 			/* emac */
 			rdev->mode_info.connector_table = CT_EMAC;
-		} else if (machine_is_compatible("PowerMac10,1")) {
+		} else if (of_machine_is_compatible("PowerMac10,1")) {
 			/* mini with internal tmds */
 			rdev->mode_info.connector_table = CT_MINI_INTERNAL;
-		} else if (machine_is_compatible("PowerMac10,2")) {
+		} else if (of_machine_is_compatible("PowerMac10,2")) {
 			/* mini with external tmds */
 			rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
-		} else if (machine_is_compatible("PowerMac12,1")) {
+		} else if (of_machine_is_compatible("PowerMac12,1")) {
 			/* PowerMac8,1 ? */
 			/* imac g5 isight */
 			rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;

+ 2 - 3
drivers/gpu/drm/radeon/radeon_connectors.c

@@ -780,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
 			 * connected and the DVI port disconnected.  If the edid doesn't
 			 * say HDMI, vice versa.
 			 */
-			if (radeon_connector->shared_ddc && connector_status_connected) {
+			if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
 				struct drm_device *dev = connector->dev;
 				struct drm_connector *list_connector;
 				struct radeon_connector *list_radeon_connector;
@@ -1060,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
 			return;
 		}
 		if (radeon_connector->ddc_bus && i2c_bus->valid) {
-			if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
-				    sizeof(struct radeon_i2c_bus_rec)) == 0) {
+			if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
 				radeon_connector->shared_ddc = true;
 				shared_ddc = true;
 			}

+ 4 - 6
drivers/gpu/drm/radeon/radeon_cs.c

@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 						&p->validated);
 		}
 	}
-	return radeon_bo_list_validate(&p->validated, p->ib->fence);
+	return radeon_bo_list_validate(&p->validated);
 }
 
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
 {
 	unsigned i;
 
-	if (error && parser->ib) {
-		radeon_bo_list_unvalidate(&parser->validated,
-						parser->ib->fence);
-	} else {
-		radeon_bo_list_unreserve(&parser->validated);
+	if (!error && parser->ib) {
+		radeon_bo_list_fence(&parser->validated, parser->ib->fence);
 	}
+	radeon_bo_list_unreserve(&parser->validated);
 	for (i = 0; i < parser->nrelocs; i++) {
 		if (parser->relocs[i].gobj) {
 			mutex_lock(&parser->rdev->ddev->struct_mutex);

+ 2 - 1
drivers/gpu/drm/radeon/radeon_drv.h

@@ -106,9 +106,10 @@
  * 1.29- R500 3D cmd buffer support
  * 1.30- Add support for occlusion queries
  * 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
  */
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		31
+#define DRIVER_MINOR		32
 #define DRIVER_PATCHLEVEL	0
 
 enum radeon_cp_microcode_version {

+ 15 - 21
drivers/gpu/drm/radeon/radeon_object.c

@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
 	}
 }
 
-int radeon_bo_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head)
 {
 	struct radeon_bo_list *lobj;
 	struct radeon_bo *bo;
-	struct radeon_fence *old_fence = NULL;
 	int r;
 
 	r = radeon_bo_list_reserve(head);
@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
 		}
 		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
 		lobj->tiling_flags = bo->tiling_flags;
-		if (fence) {
-			old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
-			bo->tbo.sync_obj = radeon_fence_ref(fence);
-			bo->tbo.sync_obj_arg = NULL;
-		}
-		if (old_fence) {
-			radeon_fence_unref(&old_fence);
-		}
 	}
 	return 0;
 }
 
-void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
+void radeon_bo_list_fence(struct list_head *head, void *fence)
 {
 	struct radeon_bo_list *lobj;
-	struct radeon_fence *old_fence;
-
-	if (fence)
-		list_for_each_entry(lobj, head, list) {
-			old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
-			if (old_fence == fence) {
-				lobj->bo->tbo.sync_obj = NULL;
-				radeon_fence_unref(&old_fence);
-			}
+	struct radeon_bo *bo;
+	struct radeon_fence *old_fence = NULL;
+
+	list_for_each_entry(lobj, head, list) {
+		bo = lobj->bo;
+		spin_lock(&bo->tbo.lock);
+		old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+		bo->tbo.sync_obj = radeon_fence_ref(fence);
+		bo->tbo.sync_obj_arg = NULL;
+		spin_unlock(&bo->tbo.lock);
+		if (old_fence) {
+			radeon_fence_unref(&old_fence);
 		}
-	radeon_bo_list_unreserve(head);
+	}
 }
 
 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,

+ 2 - 2
drivers/gpu/drm/radeon/radeon_object.h

@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
 				struct list_head *head);
 extern int radeon_bo_list_reserve(struct list_head *head);
 extern void radeon_bo_list_unreserve(struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head, void *fence);
-extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_list_validate(struct list_head *head);
+extern void radeon_bo_list_fence(struct list_head *head, void *fence);
 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
 				struct vm_area_struct *vma);
 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,

+ 42 - 65
drivers/gpu/drm/radeon/radeon_ring.c

@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
 {
 	struct radeon_fence *fence;
 	struct radeon_ib *nib;
-	unsigned long i;
-	int r = 0;
+	int r = 0, i, c;
 
 	*ib = NULL;
 	r = radeon_fence_create(rdev, &fence);
 	if (r) {
-		DRM_ERROR("failed to create fence for new IB\n");
+		dev_err(rdev->dev, "failed to create fence for new IB\n");
 		return r;
 	}
 	mutex_lock(&rdev->ib_pool.mutex);
-	i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
-	if (i < RADEON_IB_POOL_SIZE) {
-		set_bit(i, rdev->ib_pool.alloc_bm);
-		rdev->ib_pool.ibs[i].length_dw = 0;
-		*ib = &rdev->ib_pool.ibs[i];
-		mutex_unlock(&rdev->ib_pool.mutex);
-		goto out;
+	for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
+		i &= (RADEON_IB_POOL_SIZE - 1);
+		if (rdev->ib_pool.ibs[i].free) {
+			nib = &rdev->ib_pool.ibs[i];
+			break;
+		}
 	}
-	if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
-		/* we go do nothings here */
+	if (nib == NULL) {
+		/* This should never happen, it means we allocated all
+		 * IB and haven't scheduled one yet, return EBUSY to
+		 * userspace hoping that on ioctl recall we get better
+		 * luck
+		 */
+		dev_err(rdev->dev, "no free indirect buffer !\n");
 		mutex_unlock(&rdev->ib_pool.mutex);
-		DRM_ERROR("all IB allocated none scheduled.\n");
-		r = -EINVAL;
-		goto out;
+		radeon_fence_unref(&fence);
+		return -EBUSY;
 	}
-	/* get the first ib on the scheduled list */
-	nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
-			 struct radeon_ib, list);
-	if (nib->fence == NULL) {
-		/* we go do nothings here */
+	rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+	nib->free = false;
+	if (nib->fence) {
 		mutex_unlock(&rdev->ib_pool.mutex);
-		DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
-		r = -EINVAL;
-		goto out;
-	}
-	mutex_unlock(&rdev->ib_pool.mutex);
-
-	r = radeon_fence_wait(nib->fence, false);
-	if (r) {
-		DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
-			  (unsigned long)nib->gpu_addr, nib->length_dw);
-		DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
-		goto out;
+		r = radeon_fence_wait(nib->fence, false);
+		if (r) {
+			dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
+				nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
+			mutex_lock(&rdev->ib_pool.mutex);
+			nib->free = true;
+			mutex_unlock(&rdev->ib_pool.mutex);
+			radeon_fence_unref(&fence);
+			return r;
+		}
+		mutex_lock(&rdev->ib_pool.mutex);
 	}
 	radeon_fence_unref(&nib->fence);
-
+	nib->fence = fence;
 	nib->length_dw = 0;
-
-	/* scheduled list is accessed here */
-	mutex_lock(&rdev->ib_pool.mutex);
-	list_del(&nib->list);
-	INIT_LIST_HEAD(&nib->list);
 	mutex_unlock(&rdev->ib_pool.mutex);
-
 	*ib = nib;
-out:
-	if (r) {
-		radeon_fence_unref(&fence);
-	} else {
-		(*ib)->fence = fence;
-	}
-	return r;
+	return 0;
 }
 
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
 	if (tmp == NULL) {
 		return;
 	}
-	mutex_lock(&rdev->ib_pool.mutex);
-	if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
-		/* IB is scheduled & not signaled don't do anythings */
-		mutex_unlock(&rdev->ib_pool.mutex);
-		return;
-	}
-	list_del(&tmp->list);
-	INIT_LIST_HEAD(&tmp->list);
-	if (tmp->fence)
+	if (!tmp->fence->emited)
 		radeon_fence_unref(&tmp->fence);
-
-	tmp->length_dw = 0;
-	clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+	mutex_lock(&rdev->ib_pool.mutex);
+	tmp->free = true;
 	mutex_unlock(&rdev->ib_pool.mutex);
 }
 
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 
 	if (!ib->length_dw || !rdev->cp.ready) {
 		/* TODO: Nothings in the ib we should report. */
-		DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
 		return -EINVAL;
 	}
 
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 	radeon_ring_ib_execute(rdev, ib);
 	radeon_fence_emit(rdev, ib->fence);
 	mutex_lock(&rdev->ib_pool.mutex);
-	list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+	/* once scheduled IB is considered free and protected by the fence */
+	ib->free = true;
 	mutex_unlock(&rdev->ib_pool.mutex);
 	radeon_ring_unlock_commit(rdev);
 	return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
 	if (rdev->ib_pool.robj)
 		return 0;
 	/* Allocate 1M object buffer */
-	INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
 	r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
 				true, RADEON_GEM_DOMAIN_GTT,
 				&rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
 		rdev->ib_pool.ibs[i].ptr = ptr + offset;
 		rdev->ib_pool.ibs[i].idx = i;
 		rdev->ib_pool.ibs[i].length_dw = 0;
-		INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+		rdev->ib_pool.ibs[i].free = true;
 	}
-	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+	rdev->ib_pool.head_id = 0;
 	rdev->ib_pool.ready = true;
 	DRM_INFO("radeon: ib pool ready.\n");
 	if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
 		return;
 	}
 	mutex_lock(&rdev->ib_pool.mutex);
-	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
 	if (rdev->ib_pool.robj) {
 		r = radeon_bo_reserve(rdev->ib_pool.robj, false);
 		if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
 	if (ib == NULL) {
 		return 0;
 	}
-	seq_printf(m, "IB %04lu\n", ib->idx);
+	seq_printf(m, "IB %04u\n", ib->idx);
 	seq_printf(m, "IB fence %p\n", ib->fence);
 	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
 	for (i = 0; i < ib->length_dw; i++) {

+ 6 - 3
drivers/gpu/drm/radeon/rv770.c

@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
 	gb_tiling_config |= BANK_SWAPS(1);
 
-	backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
-							rdev->config.rv770.max_backends,
-							(0xff << rdev->config.rv770.max_backends) & 0xff);
+	if (rdev->family == CHIP_RV740)
+		backend_map = 0x28;
+	else
+		backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
+								rdev->config.rv770.max_backends,
+								(0xff << rdev->config.rv770.max_backends) & 0xff);
 	gb_tiling_config |= BACKEND_MAP(backend_map);
 
 	cc_gc_shader_pipe_config =

+ 11 - 7
drivers/gpu/drm/ttm/ttm_tt.c

@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate);
 
 #ifdef CONFIG_X86
 static inline int ttm_tt_set_page_caching(struct page *p,
-					  enum ttm_caching_state c_state)
+					  enum ttm_caching_state c_old,
+					  enum ttm_caching_state c_new)
 {
 	int ret = 0;
 
 	if (PageHighMem(p))
 		return 0;
 
-	if (get_page_memtype(p) != -1) {
+	if (c_old != tt_cached) {
 		/* p isn't in the default caching state, set it to
 		 * writeback first to free its current memtype. */
 
@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p,
 			return ret;
 	}
 
-	if (c_state == tt_wc)
+	if (c_new == tt_wc)
 		ret = set_memory_wc((unsigned long) page_address(p), 1);
-	else if (c_state == tt_uncached)
+	else if (c_new == tt_uncached)
 		ret = set_pages_uc(p, 1);
 
 	return ret;
 }
 #else /* CONFIG_X86 */
 static inline int ttm_tt_set_page_caching(struct page *p,
-					  enum ttm_caching_state c_state)
+					  enum ttm_caching_state c_old,
+					  enum ttm_caching_state c_new)
 {
 	return 0;
 }
@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
 	for (i = 0; i < ttm->num_pages; ++i) {
 		cur_page = ttm->pages[i];
 		if (likely(cur_page != NULL)) {
-			ret = ttm_tt_set_page_caching(cur_page, c_state);
+			ret = ttm_tt_set_page_caching(cur_page,
+						      ttm->caching_state,
+						      c_state);
 			if (unlikely(ret != 0))
 				goto out_err;
 		}
@@ -268,7 +272,7 @@ out_err:
 	for (j = 0; j < i; ++j) {
 		cur_page = ttm->pages[j];
 		if (likely(cur_page != NULL)) {
-			(void)ttm_tt_set_page_caching(cur_page,
+			(void)ttm_tt_set_page_caching(cur_page, c_state,
 						      ttm->caching_state);
 		}
 	}

+ 16 - 33
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c

@@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		 */
 
 		DRM_INFO("It appears like vesafb is loaded. "
-			 "Ignore above error if any. Entering stealth mode.\n");
+			 "Ignore above error if any.\n");
 		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
 		if (unlikely(ret != 0)) {
 			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
 			goto out_no_device;
 		}
-		vmw_kms_init(dev_priv);
-		vmw_overlay_init(dev_priv);
-	} else {
-		ret = vmw_request_device(dev_priv);
-		if (unlikely(ret != 0))
-			goto out_no_device;
-		vmw_kms_init(dev_priv);
-		vmw_overlay_init(dev_priv);
-		vmw_fb_init(dev_priv);
 	}
+	ret = vmw_request_device(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_no_device;
+	vmw_kms_init(dev_priv);
+	vmw_overlay_init(dev_priv);
+	vmw_fb_init(dev_priv);
 
 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
 	register_pm_notifier(&dev_priv->pm_nb);
@@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
 
 	unregister_pm_notifier(&dev_priv->pm_nb);
 
-	if (!dev_priv->stealth) {
-		vmw_fb_close(dev_priv);
-		vmw_kms_close(dev_priv);
-		vmw_overlay_close(dev_priv);
-		vmw_release_device(dev_priv);
-		pci_release_regions(dev->pdev);
-	} else {
-		vmw_kms_close(dev_priv);
-		vmw_overlay_close(dev_priv);
+	vmw_fb_close(dev_priv);
+	vmw_kms_close(dev_priv);
+	vmw_overlay_close(dev_priv);
+	vmw_release_device(dev_priv);
+	if (dev_priv->stealth)
 		pci_release_region(dev->pdev, 2);
-	}
+	else
+		pci_release_regions(dev->pdev);
+
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 		drm_irq_uninstall(dev_priv->dev);
 	if (dev->devname == vmw_devname)
@@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
 	int ret = 0;
 
 	DRM_INFO("Master set.\n");
-	if (dev_priv->stealth) {
-		ret = vmw_request_device(dev_priv);
-		if (unlikely(ret != 0))
-			return ret;
-	}
 
 	if (active) {
 		BUG_ON(active != &dev_priv->fbdev_master);
@@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
 
 	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 
-	if (dev_priv->stealth) {
-		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-		if (unlikely(ret != 0))
-			DRM_ERROR("Unable to clean VRAM on master drop.\n");
-		vmw_release_device(dev_priv);
-	}
 	dev_priv->active_master = &dev_priv->fbdev_master;
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 
-	if (!dev_priv->stealth)
-		vmw_fb_on(dev_priv);
+	vmw_fb_on(dev_priv);
 }
 
 

+ 92 - 16
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c

@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
 }
 
-static int vmw_cmd_dma(struct vmw_private *dev_priv,
-		       struct vmw_sw_context *sw_context,
-		       SVGA3dCmdHeader *header)
+static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGAGuestPtr *ptr,
+				   struct vmw_dma_buffer **vmw_bo_p)
 {
-	uint32_t handle;
 	struct vmw_dma_buffer *vmw_bo = NULL;
 	struct ttm_buffer_object *bo;
-	struct vmw_surface *srf = NULL;
-	struct vmw_dma_cmd {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdSurfaceDMA dma;
-	} *cmd;
+	uint32_t handle = ptr->gmrId;
 	struct vmw_relocation *reloc;
-	int ret;
 	uint32_t cur_validate_node;
 	struct ttm_validate_buffer *val_buf;
+	int ret;
 
-	cmd = container_of(header, struct vmw_dma_cmd, header);
-	handle = cmd->dma.guest.ptr.gmrId;
 	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use GMR region.\n");
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 	bo = &vmw_bo->base;
 
 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
-		DRM_ERROR("Max number of DMA commands per submission"
+		DRM_ERROR("Max number relocations per submission"
 			  " exceeded\n");
 		ret = -EINVAL;
 		goto out_no_reloc;
 	}
 
 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
-	reloc->location = &cmd->dma.guest.ptr;
+	reloc->location = ptr;
 
 	cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
 	if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 		++sw_context->cur_val_buf;
 	}
+	*vmw_bo_p = vmw_bo;
+	return 0;
+
+out_no_reloc:
+	vmw_dmabuf_unreference(&vmw_bo);
+	vmw_bo_p = NULL;
+	return ret;
+}
+
+static int vmw_cmd_end_query(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     SVGA3dCmdHeader *header)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	struct vmw_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdEndQuery q;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_query_cmd, header);
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->q.guestResult,
+				      &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_dmabuf_unreference(&vmw_bo);
+	return 0;
+}
 
+static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	struct vmw_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdWaitForQuery q;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_query_cmd, header);
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->q.guestResult,
+				      &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_dmabuf_unreference(&vmw_bo);
+	return 0;
+}
+
+
+static int vmw_cmd_dma(struct vmw_private *dev_priv,
+		       struct vmw_sw_context *sw_context,
+		       SVGA3dCmdHeader *header)
+{
+	struct vmw_dma_buffer *vmw_bo = NULL;
+	struct ttm_buffer_object *bo;
+	struct vmw_surface *srf = NULL;
+	struct vmw_dma_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSurfaceDMA dma;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_dma_cmd, header);
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->dma.guest.ptr,
+				      &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	bo = &vmw_bo->base;
 	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
 					     cmd->dma.host.sid, &srf);
 	if (ret) {
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
+	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
 		    &vmw_cmd_blt_surf_screen_check)

+ 3 - 0
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c

@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	info->pixmap.scan_align = 1;
 #endif
 
+	info->aperture_base = vmw_priv->vram_start;
+	info->aperture_size = vmw_priv->vram_size;
+
 	/*
 	 * Dirty & Deferred IO
 	 */

+ 1 - 1
drivers/gpu/vga/vgaarb.c

@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
 		remaining -= 7;
 		pr_devel("client 0x%p called 'target'\n", priv);
 		/* if target is default */
-		if (!strncmp(kbuf, "default", 7))
+		if (!strncmp(curr_pos, "default", 7))
 			pdev = pci_dev_get(vga_default_device());
 		else {
 			if (!vga_pci_str_to_vars(curr_pos, remaining,

+ 50 - 4
drivers/hid/Kconfig

@@ -55,6 +55,12 @@ source "drivers/hid/usbhid/Kconfig"
 menu "Special HID drivers"
 	depends on HID
 
+config HID_3M_PCT
+	tristate "3M PCT"
+	depends on USB_HID
+	---help---
+	Support for 3M PCT touch screens.
+
 config HID_A4TECH
 	tristate "A4 tech" if EMBEDDED
 	depends on USB_HID
@@ -183,6 +189,23 @@ config LOGIRUMBLEPAD2_FF
 	  Say Y here if you want to enable force feedback support for Logitech
 	  Rumblepad 2 devices.
 
+config LOGIG940_FF
+	bool "Logitech Flight System G940 force feedback support"
+	depends on HID_LOGITECH
+	select INPUT_FF_MEMLESS
+	help
+	  Say Y here if you want to enable force feedback support for Logitech
+	  Flight System G940 devices.
+
+config HID_MAGICMOUSE
+	tristate "Apple MagicMouse multi-touch support"
+	depends on BT_HIDP
+	---help---
+	Support for the Apple Magic Mouse multi-touch.
+
+	Say Y here if you want support for the multi-touch features of the
+	Apple Wireless "Magic" Mouse.
+
 config HID_MICROSOFT
 	tristate "Microsoft" if EMBEDDED
 	depends on USB_HID
@@ -190,6 +213,12 @@ config HID_MICROSOFT
 	---help---
 	Support for Microsoft devices that are not fully compliant with HID standard.
 
+config HID_MOSART
+	tristate "MosArt"
+	depends on USB_HID
+	---help---
+	Support for MosArt dual-touch panels.
+
 config HID_MONTEREY
 	tristate "Monterey" if EMBEDDED
 	depends on USB_HID
@@ -198,12 +227,18 @@ config HID_MONTEREY
 	Support for Monterey Genius KB29E.
 
 config HID_NTRIG
-	tristate "NTrig" if EMBEDDED
+	tristate "NTrig"
 	depends on USB_HID
-	default !EMBEDDED
 	---help---
 	Support for N-Trig touch screen.
 
+config HID_ORTEK
+	tristate "Ortek" if EMBEDDED
+	depends on USB_HID
+	default !EMBEDDED
+	---help---
+	Support for Ortek WKB-2000 wireless keyboard + mouse trackpad.
+
 config HID_PANTHERLORD
 	tristate "Pantherlord support" if EMBEDDED
 	depends on USB_HID
@@ -227,6 +262,12 @@ config HID_PETALYNX
 	---help---
 	Support for Petalynx Maxter remote control.
 
+config HID_QUANTA
+	tristate "Quanta Optical Touch"
+	depends on USB_HID
+	---help---
+	Support for Quanta Optical Touch dual-touch panels.
+
 config HID_SAMSUNG
 	tristate "Samsung" if EMBEDDED
 	depends on USB_HID
@@ -241,6 +282,12 @@ config HID_SONY
 	---help---
 	Support for Sony PS3 controller.
 
+config HID_STANTUM
+	tristate "Stantum"
+	depends on USB_HID
+	---help---
+	Support for Stantum multitouch panel.
+
 config HID_SUNPLUS
 	tristate "Sunplus" if EMBEDDED
 	depends on USB_HID
@@ -305,9 +352,8 @@ config THRUSTMASTER_FF
 	  Rumble Force or Force Feedback Wheel.
 
 config HID_WACOM
-	tristate "Wacom Bluetooth devices support" if EMBEDDED
+	tristate "Wacom Bluetooth devices support"
 	depends on BT_HIDP
-	default !EMBEDDED
 	---help---
 	Support for Wacom Graphire Bluetooth tablet.
 

Some files were not shown because too many files changed in this diff