Эх сурвалжийг харах

Merge branches 'x86/mce' and 'x86/urgent' into perf/mce

Merge reason: Put all MCE changes into this branch, we are
              queueing up a dependent patch.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Ingo Molnar 15 жил өмнө
parent
commit
6b50f5c7c7
100 өөрчлөгдсөн 1002 нэмэгдсэн , 661 устгасан
  1. 2 2
      Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
  2. 4 4
      Documentation/debugging-via-ohci1394.txt
  3. 30 0
      Documentation/feature-removal-schedule.txt
  4. 12 4
      Documentation/filesystems/ext3.txt
  5. 32 11
      Documentation/flexible-arrays.txt
  6. 1 0
      Documentation/sound/alsa/HD-Audio-Models.txt
  7. 21 0
      MAINTAINERS
  8. 3 45
      Makefile
  9. 0 1
      arch/arm/configs/omap3_beagle_defconfig
  10. 3 3
      arch/arm/include/asm/bitops.h
  11. 41 35
      arch/arm/kernel/traps.c
  12. 2 2
      arch/arm/mach-bcmring/core.c
  13. 1 1
      arch/arm/mach-bcmring/include/mach/system.h
  14. 35 9
      arch/arm/mach-ep93xx/Kconfig
  15. 9 0
      arch/arm/mach-ep93xx/Makefile.boot
  16. 119 47
      arch/arm/mach-ep93xx/clock.c
  17. 23 8
      arch/arm/mach-ep93xx/core.c
  18. 24 7
      arch/arm/mach-ep93xx/edb93xx.c
  19. 22 20
      arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
  20. 4 12
      arch/arm/mach-ep93xx/include/mach/gpio.h
  21. 6 0
      arch/arm/mach-ep93xx/include/mach/memory.h
  22. 3 1
      arch/arm/mach-ep93xx/include/mach/platform.h
  23. 77 55
      arch/arm/mach-ep93xx/micro9.c
  24. 1 1
      arch/arm/mach-omap2/board-rx51-peripherals.c
  25. 3 1
      arch/arm/mach-omap2/board-zoom2.c
  26. 1 0
      arch/arm/mach-omap2/clock24xx.c
  27. 44 30
      arch/arm/mach-omap2/clockdomain.c
  28. 1 1
      arch/arm/mach-pxa/cpufreq-pxa2xx.c
  29. 1 1
      arch/arm/mach-pxa/csb726.c
  30. 1 0
      arch/arm/mach-sa1100/Makefile
  31. 18 2
      arch/arm/mm/cache-v6.S
  32. 17 2
      arch/arm/mm/cache-v7.S
  33. 3 6
      arch/arm/mm/fault-armv.c
  34. 5 0
      arch/arm/mm/fault.c
  35. 2 0
      arch/arm/mm/highmem.c
  36. 1 1
      arch/arm/mm/init.c
  37. 9 6
      arch/arm/plat-omap/dma.c
  38. 1 1
      arch/arm/plat-omap/mcbsp.c
  39. 5 5
      arch/powerpc/include/asm/firmware.h
  40. 2 0
      arch/powerpc/kernel/cputable.c
  41. 1 2
      arch/powerpc/kernel/entry_64.S
  42. 0 6
      arch/powerpc/kernel/kgdb.c
  43. 1 1
      arch/powerpc/kernel/pci-common.c
  44. 7 3
      arch/powerpc/kernel/process.c
  45. 1 0
      arch/powerpc/kernel/vmlinux.lds.S
  46. 4 6
      arch/powerpc/mm/slb_low.S
  47. 1 1
      arch/powerpc/platforms/cell/axon_msi.c
  48. 5 2
      arch/powerpc/platforms/powermac/low_i2c.c
  49. 1 2
      arch/powerpc/platforms/pseries/firmware.c
  50. 1 1
      arch/s390/hypfs/hypfs_diag.c
  51. 3 3
      arch/s390/kernel/processor.c
  52. 1 1
      arch/sh/kernel/entry-common.S
  53. 27 10
      arch/sh/kernel/ftrace.c
  54. 2 0
      arch/sh/kernel/setup.c
  55. 4 5
      arch/sh/kernel/signal_32.c
  56. 2 0
      arch/sh/kernel/smp.c
  57. 4 3
      arch/sh/kernel/traps_32.c
  58. 1 1
      arch/sh/mm/cache.c
  59. 2 2
      arch/sparc/kernel/ldc.c
  60. 1 1
      arch/sparc/kernel/perf_event.c
  61. 1 1
      arch/sparc/mm/init_64.c
  62. 10 1
      arch/x86/Kconfig
  63. 3 0
      arch/x86/boot/setup.ld
  64. 2 1
      arch/x86/include/asm/mce.h
  65. 4 24
      arch/x86/include/asm/paravirt.h
  66. 6 4
      arch/x86/include/asm/paravirt_types.h
  67. 3 0
      arch/x86/kernel/acpi/realmode/wakeup.lds.S
  68. 4 4
      arch/x86/kernel/apic/x2apic_uv_x.c
  69. 22 10
      arch/x86/kernel/cpu/mcheck/mce.c
  70. 2 4
      arch/x86/kernel/irq.c
  71. 1 1
      arch/x86/kernel/pci-dma.c
  72. 0 1
      arch/x86/kernel/smp.c
  73. 2 1
      arch/x86/kernel/time.c
  74. 2 2
      arch/x86/kernel/tlb_uv.c
  75. 10 2
      arch/x86/kernel/trampoline.c
  76. 4 0
      arch/x86/kernel/trampoline_64.S
  77. 1 1
      arch/x86/kernel/vmi_32.c
  78. 3 0
      arch/x86/kernel/vmlinux.lds.S
  79. 4 12
      block/blk-core.c
  80. 1 1
      block/blk-merge.c
  81. 1 1
      block/blk-settings.c
  82. 1 1
      block/blk-tag.c
  83. 142 117
      block/cfq-iosched.c
  84. 1 3
      block/elevator.c
  85. 3 1
      block/genhd.c
  86. 2 2
      drivers/acpi/Kconfig
  87. 1 0
      drivers/acpi/ac.c
  88. 3 0
      drivers/acpi/button.c
  89. 11 0
      drivers/acpi/pci_root.c
  90. 6 1
      drivers/acpi/video.c
  91. 1 1
      drivers/acpi/video_detect.c
  92. 37 42
      drivers/block/cciss.c
  93. 1 0
      drivers/char/genrtc.c
  94. 1 0
      drivers/char/rtc.c
  95. 1 0
      drivers/char/sonypi.c
  96. 14 17
      drivers/char/tty_buffer.c
  97. 1 1
      drivers/char/vt_ioctl.c
  98. 12 9
      drivers/edac/edac_mce_amd.c
  99. 19 20
      drivers/firewire/sbp2.c
  100. 1 1
      drivers/hid/hid-core.c

+ 2 - 2
Documentation/ABI/testing/sysfs-class-usb_host → Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc

@@ -1,4 +1,4 @@
-What:           /sys/class/usb_host/usb_hostN/wusb_chid
+What:           /sys/class/uwb_rc/uwbN/wusbhc/wusb_chid
 Date:           July 2008
 Date:           July 2008
 KernelVersion:  2.6.27
 KernelVersion:  2.6.27
 Contact:        David Vrabel <david.vrabel@csr.com>
 Contact:        David Vrabel <david.vrabel@csr.com>
@@ -9,7 +9,7 @@ Description:
 
 
                 Set an all zero CHID to stop the host controller.
                 Set an all zero CHID to stop the host controller.
 
 
-What:           /sys/class/usb_host/usb_hostN/wusb_trust_timeout
+What:           /sys/class/uwb_rc/uwbN/wusbhc/wusb_trust_timeout
 Date:           July 2008
 Date:           July 2008
 KernelVersion:  2.6.27
 KernelVersion:  2.6.27
 Contact:        David Vrabel <david.vrabel@csr.com>
 Contact:        David Vrabel <david.vrabel@csr.com>

+ 4 - 4
Documentation/debugging-via-ohci1394.txt

@@ -64,14 +64,14 @@ be used to view the printk buffer of a remote machine, even with live update.
 
 
 Bernhard Kaindl enhanced firescope to support accessing 64-bit machines
 Bernhard Kaindl enhanced firescope to support accessing 64-bit machines
 from 32-bit firescope and vice versa:
 from 32-bit firescope and vice versa:
-- ftp://ftp.suse.de/private/bk/firewire/tools/firescope-0.2.2.tar.bz2
+- http://halobates.de/firewire/firescope-0.2.2.tar.bz2
 
 
 and he implemented fast system dump (alpha version - read README.txt):
 and he implemented fast system dump (alpha version - read README.txt):
-- ftp://ftp.suse.de/private/bk/firewire/tools/firedump-0.1.tar.bz2
+- http://halobates.de/firewire/firedump-0.1.tar.bz2
 
 
 There is also a gdb proxy for firewire which allows to use gdb to access
 There is also a gdb proxy for firewire which allows to use gdb to access
 data which can be referenced from symbols found by gdb in vmlinux:
 data which can be referenced from symbols found by gdb in vmlinux:
-- ftp://ftp.suse.de/private/bk/firewire/tools/fireproxy-0.33.tar.bz2
+- http://halobates.de/firewire/fireproxy-0.33.tar.bz2
 
 
 The latest version of this gdb proxy (fireproxy-0.34) can communicate (not
 The latest version of this gdb proxy (fireproxy-0.34) can communicate (not
 yet stable) with kgdb over an memory-based communication module (kgdbom).
 yet stable) with kgdb over an memory-based communication module (kgdbom).
@@ -178,7 +178,7 @@ Step-by-step instructions for using firescope with early OHCI initialization:
 
 
 Notes
 Notes
 -----
 -----
-Documentation and specifications: ftp://ftp.suse.de/private/bk/firewire/docs
+Documentation and specifications: http://halobates.de/firewire/
 
 
 FireWire is a trademark of Apple Inc. - for more information please refer to:
 FireWire is a trademark of Apple Inc. - for more information please refer to:
 http://en.wikipedia.org/wiki/FireWire
 http://en.wikipedia.org/wiki/FireWire

+ 30 - 0
Documentation/feature-removal-schedule.txt

@@ -451,3 +451,33 @@ Why:	OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
 	will also allow making ALSA OSS emulation independent of
 	will also allow making ALSA OSS emulation independent of
 	sound_core.  The dependency will be broken then too.
 	sound_core.  The dependency will be broken then too.
 Who:	Tejun Heo <tj@kernel.org>
 Who:	Tejun Heo <tj@kernel.org>
+
+----------------------------
+
+What:	Support for VMware's guest paravirtuliazation technique [VMI] will be
+	dropped.
+When:	2.6.37 or earlier.
+Why:	With the recent innovations in CPU hardware acceleration technologies
+	from Intel and AMD, VMware ran a few experiments to compare these
+	techniques to guest paravirtualization technique on VMware's platform.
+	These hardware assisted virtualization techniques have outperformed the
+	performance benefits provided by VMI in most of the workloads. VMware
+	expects that these hardware features will be ubiquitous in a couple of
+	years, as a result, VMware has started a phased retirement of this
+	feature from the hypervisor. We will be removing this feature from the
+	Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
+	technical reasons (read opportunity to remove major chunk of pvops)
+	arise.
+
+	Please note that VMI has always been an optimization and non-VMI kernels
+	still work fine on VMware's platform.
+	Latest versions of VMware's product which support VMI are,
+	Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
+	releases for these products will continue supporting VMI.
+
+	For more details about VMI retirement take a look at this,
+	http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html
+
+Who:	Alok N Kataria <akataria@vmware.com>
+
+----------------------------

+ 12 - 4
Documentation/filesystems/ext3.txt

@@ -123,10 +123,18 @@ resuid=n		The user ID which may use the reserved blocks.
 
 
 sb=n			Use alternate superblock at this location.
 sb=n			Use alternate superblock at this location.
 
 
-quota
-noquota
-grpquota
-usrquota
+quota			These options are ignored by the filesystem. They
+noquota			are used only by quota tools to recognize volumes
+grpquota		where quota should be turned on. See documentation
+usrquota		in the quota-tools package for more details
+			(http://sourceforge.net/projects/linuxquota).
+
+jqfmt=<quota type>	These options tell filesystem details about quota
+usrjquota=<file>	so that quota information can be properly updated
+grpjquota=<file>	during journal replay. They replace the above
+			quota options. See documentation in the quota-tools
+			package for more details
+			(http://sourceforge.net/projects/linuxquota).
 
 
 bh		(*)	ext3 associates buffer heads to data pages to
 bh		(*)	ext3 associates buffer heads to data pages to
 nobh			(a) cache disk block mapping information
 nobh			(a) cache disk block mapping information

+ 32 - 11
Documentation/flexible-arrays.txt

@@ -1,5 +1,5 @@
 Using flexible arrays in the kernel
 Using flexible arrays in the kernel
-Last updated for 2.6.31
+Last updated for 2.6.32
 Jonathan Corbet <corbet@lwn.net>
 Jonathan Corbet <corbet@lwn.net>
 
 
 Large contiguous memory allocations can be unreliable in the Linux kernel.
 Large contiguous memory allocations can be unreliable in the Linux kernel.
@@ -40,6 +40,13 @@ argument is passed directly to the internal memory allocation calls.  With
 the current code, using flags to ask for high memory is likely to lead to
 the current code, using flags to ask for high memory is likely to lead to
 notably unpleasant side effects.
 notably unpleasant side effects.
 
 
+It is also possible to define flexible arrays at compile time with:
+
+    DEFINE_FLEX_ARRAY(name, element_size, total);
+
+This macro will result in a definition of an array with the given name; the
+element size and total will be checked for validity at compile time.
+
 Storing data into a flexible array is accomplished with a call to:
 Storing data into a flexible array is accomplished with a call to:
 
 
     int flex_array_put(struct flex_array *array, unsigned int element_nr,
     int flex_array_put(struct flex_array *array, unsigned int element_nr,
@@ -76,16 +83,30 @@ particular element has never been allocated.
 Note that it is possible to get back a valid pointer for an element which
 Note that it is possible to get back a valid pointer for an element which
 has never been stored in the array.  Memory for array elements is allocated
 has never been stored in the array.  Memory for array elements is allocated
 one page at a time; a single allocation could provide memory for several
 one page at a time; a single allocation could provide memory for several
-adjacent elements.  The flexible array code does not know if a specific
-element has been written; it only knows if the associated memory is
-present.  So a flex_array_get() call on an element which was never stored
-in the array has the potential to return a pointer to random data.  If the
-caller does not have a separate way to know which elements were actually
-stored, it might be wise, at least, to add GFP_ZERO to the flags argument
-to ensure that all elements are zeroed.
-
-There is no way to remove a single element from the array.  It is possible,
-though, to remove all elements with a call to:
+adjacent elements.  Flexible array elements are normally initialized to the
+value FLEX_ARRAY_FREE (defined as 0x6c in <linux/poison.h>), so errors
+involving that number probably result from use of unstored array entries.
+Note that, if array elements are allocated with __GFP_ZERO, they will be
+initialized to zero and this poisoning will not happen.
+
+Individual elements in the array can be cleared with:
+
+    int flex_array_clear(struct flex_array *array, unsigned int element_nr);
+
+This function will set the given element to FLEX_ARRAY_FREE and return
+zero.  If storage for the indicated element is not allocated for the array,
+flex_array_clear() will return -EINVAL instead.  Note that clearing an
+element does not release the storage associated with it; to reduce the
+allocated size of an array, call:
+
+    int flex_array_shrink(struct flex_array *array);
+
+The return value will be the number of pages of memory actually freed.
+This function works by scanning the array for pages containing nothing but
+FLEX_ARRAY_FREE bytes, so (1) it can be expensive, and (2) it will not work
+if the array's pages are allocated with __GFP_ZERO.
+
+It is possible to remove all elements of an array with a call to:
 
 
     void flex_array_free_parts(struct flex_array *array);
     void flex_array_free_parts(struct flex_array *array);
 
 

+ 1 - 0
Documentation/sound/alsa/HD-Audio-Models.txt

@@ -359,6 +359,7 @@ STAC9227/9228/9229/927x
   5stack-no-fp	D965 5stack without front panel
   5stack-no-fp	D965 5stack without front panel
   dell-3stack	Dell Dimension E520
   dell-3stack	Dell Dimension E520
   dell-bios	Fixes with Dell BIOS setup
   dell-bios	Fixes with Dell BIOS setup
+  volknob	Fixes with volume-knob widget 0x24
   auto		BIOS setup (default)
   auto		BIOS setup (default)
 
 
 STAC92HD71B*
 STAC92HD71B*

+ 21 - 0
MAINTAINERS

@@ -577,6 +577,11 @@ M:	Mike Rapoport <mike@compulab.co.il>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
 
 
+ARM/CONTEC MICRO9 MACHINE SUPPORT
+M:	Hubert Feurstein <hubert.feurstein@contec.at>
+S:	Maintained
+F:	arch/arm/mach-ep93xx/micro9.c
+
 ARM/CORGI MACHINE SUPPORT
 ARM/CORGI MACHINE SUPPORT
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Richard Purdie <rpurdie@rpsys.net>
 S:	Maintained
 S:	Maintained
@@ -2610,6 +2615,7 @@ L:	linux1394-devel@lists.sourceforge.net
 W:	http://www.linux1394.org/
 W:	http://www.linux1394.org/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
 S:	Maintained
 S:	Maintained
+F:	Documentation/debugging-via-ohci1394.txt
 F:	drivers/ieee1394/
 F:	drivers/ieee1394/
 
 
 IEEE 1394 RAW I/O DRIVER
 IEEE 1394 RAW I/O DRIVER
@@ -3661,6 +3667,7 @@ NETWORKING [GENERAL]
 M:	"David S. Miller" <davem@davemloft.net>
 M:	"David S. Miller" <davem@davemloft.net>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 W:	http://www.linuxfoundation.org/en/Net
 W:	http://www.linuxfoundation.org/en/Net
+W:	http://patchwork.ozlabs.org/project/netdev/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
 S:	Maintained
 S:	Maintained
 F:	net/
 F:	net/
@@ -4071,6 +4078,13 @@ M:	Peter Zijlstra <a.p.zijlstra@chello.nl>
 M:	Paul Mackerras <paulus@samba.org>
 M:	Paul Mackerras <paulus@samba.org>
 M:	Ingo Molnar <mingo@elte.hu>
 M:	Ingo Molnar <mingo@elte.hu>
 S:	Supported
 S:	Supported
+F:	kernel/perf_event.c
+F:	include/linux/perf_event.h
+F:	arch/*/*/kernel/perf_event.c
+F:	arch/*/include/asm/perf_event.h
+F:	arch/*/lib/perf_event.c
+F:	arch/*/kernel/perf_callchain.c
+F:	tools/perf/
 
 
 PERSONALITY HANDLING
 PERSONALITY HANDLING
 M:	Christoph Hellwig <hch@infradead.org>
 M:	Christoph Hellwig <hch@infradead.org>
@@ -5651,6 +5665,13 @@ S:	Maintained
 F:	drivers/vlynq/vlynq.c
 F:	drivers/vlynq/vlynq.c
 F:	include/linux/vlynq.h
 F:	include/linux/vlynq.h
 
 
+VMWARE VMXNET3 ETHERNET DRIVER
+M:     Shreyas Bhatewara <sbhatewara@vmware.com>
+M:     VMware, Inc. <pv-drivers@vmware.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/vmxnet3/
+
 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
 M:	Liam Girdwood <lrg@slimlogic.co.uk>
 M:	Liam Girdwood <lrg@slimlogic.co.uk>
 M:	Mark Brown <broonie@opensource.wolfsonmicro.com>
 M:	Mark Brown <broonie@opensource.wolfsonmicro.com>

+ 3 - 45
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 32
 SUBLEVEL = 32
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Man-Eating Seals of Antiquity
 NAME = Man-Eating Seals of Antiquity
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -179,46 +179,9 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
 # Alternatively CROSS_COMPILE can be set in the environment.
 # Alternatively CROSS_COMPILE can be set in the environment.
 # Default value for CROSS_COMPILE is not to prefix executables
 # Default value for CROSS_COMPILE is not to prefix executables
 # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
 # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
-#
-# To force ARCH and CROSS_COMPILE settings include kernel.* files
-# in the kernel tree - do not patch this file.
 export KBUILD_BUILDHOST := $(SUBARCH)
 export KBUILD_BUILDHOST := $(SUBARCH)
-
-# Kbuild save the ARCH and CROSS_COMPILE setting in kernel.* files.
-# Restore these settings and check that user did not specify
-# conflicting values.
-
-saved_arch  := $(shell cat include/generated/kernel.arch  2> /dev/null)
-saved_cross := $(shell cat include/generated/kernel.cross 2> /dev/null)
-
-ifneq ($(CROSS_COMPILE),)
-        ifneq ($(saved_cross),)
-                ifneq ($(CROSS_COMPILE),$(saved_cross))
-                        $(error CROSS_COMPILE changed from \
-                                "$(saved_cross)" to \
-                                 to "$(CROSS_COMPILE)". \
-                                 Use "make mrproper" to fix it up)
-                endif
-        endif
-else
-    CROSS_COMPILE := $(saved_cross)
-endif
-
-ifneq ($(ARCH),)
-        ifneq ($(saved_arch),)
-                ifneq ($(saved_arch),$(ARCH))
-                        $(error ARCH changed from \
-                                "$(saved_arch)" to "$(ARCH)". \
-                                 Use "make mrproper" to fix it up)
-                endif
-        endif
-else
-        ifneq ($(saved_arch),)
-                ARCH := $(saved_arch)
-        else
-                ARCH := $(SUBARCH)
-        endif
-endif
+ARCH		?= $(SUBARCH)
+CROSS_COMPILE	?=
 
 
 # Architecture as present in compile.h
 # Architecture as present in compile.h
 UTS_MACHINE 	:= $(ARCH)
 UTS_MACHINE 	:= $(ARCH)
@@ -483,11 +446,6 @@ ifeq ($(config-targets),1)
 include $(srctree)/arch/$(SRCARCH)/Makefile
 include $(srctree)/arch/$(SRCARCH)/Makefile
 export KBUILD_DEFCONFIG KBUILD_KCONFIG
 export KBUILD_DEFCONFIG KBUILD_KCONFIG
 
 
-# save ARCH & CROSS_COMPILE settings
-$(shell mkdir -p include/generated &&                            \
-        echo $(ARCH)          > include/generated/kernel.arch && \
-        echo $(CROSS_COMPILE) > include/generated/kernel.cross)
-
 config: scripts_basic outputmakefile FORCE
 config: scripts_basic outputmakefile FORCE
 	$(Q)mkdir -p include/linux include/config
 	$(Q)mkdir -p include/linux include/config
 	$(Q)$(MAKE) $(build)=scripts/kconfig $@
 	$(Q)$(MAKE) $(build)=scripts/kconfig $@

+ 0 - 1
arch/arm/configs/omap3_beagle_defconfig

@@ -969,7 +969,6 @@ CONFIG_USB_ETH_RNDIS=y
 #
 #
 CONFIG_USB_OTG_UTILS=y
 CONFIG_USB_OTG_UTILS=y
 # CONFIG_USB_GPIO_VBUS is not set
 # CONFIG_USB_GPIO_VBUS is not set
-# CONFIG_ISP1301_OMAP is not set
 CONFIG_TWL4030_USB=y
 CONFIG_TWL4030_USB=y
 # CONFIG_NOP_USB_XCEIV is not set
 # CONFIG_NOP_USB_XCEIV is not set
 CONFIG_MMC=y
 CONFIG_MMC=y

+ 3 - 3
arch/arm/include/asm/bitops.h

@@ -84,7 +84,7 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
 	*p = res | mask;
 	*p = res | mask;
 	raw_local_irq_restore(flags);
 	raw_local_irq_restore(flags);
 
 
-	return res & mask;
+	return (res & mask) != 0;
 }
 }
 
 
 static inline int
 static inline int
@@ -101,7 +101,7 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
 	*p = res & ~mask;
 	*p = res & ~mask;
 	raw_local_irq_restore(flags);
 	raw_local_irq_restore(flags);
 
 
-	return res & mask;
+	return (res & mask) != 0;
 }
 }
 
 
 static inline int
 static inline int
@@ -118,7 +118,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
 	*p = res ^ mask;
 	*p = res ^ mask;
 	raw_local_irq_restore(flags);
 	raw_local_irq_restore(flags);
 
 
-	return res & mask;
+	return (res & mask) != 0;
 }
 }
 
 
 #include <asm-generic/bitops/non-atomic.h>
 #include <asm-generic/bitops/non-atomic.h>

+ 41 - 35
arch/arm/kernel/traps.c

@@ -45,21 +45,21 @@ static int __init user_debug_setup(char *str)
 __setup("user_debug=", user_debug_setup);
 __setup("user_debug=", user_debug_setup);
 #endif
 #endif
 
 
-static void dump_mem(const char *str, unsigned long bottom, unsigned long top);
+static void dump_mem(const char *, const char *, unsigned long, unsigned long);
 
 
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 {
 {
 #ifdef CONFIG_KALLSYMS
 #ifdef CONFIG_KALLSYMS
-	printk("[<%08lx>] ", where);
-	print_symbol("(%s) ", where);
-	printk("from [<%08lx>] ", from);
-	print_symbol("(%s)\n", from);
+	char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
+	sprint_symbol(sym1, where);
+	sprint_symbol(sym2, from);
+	printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
 #else
 #else
 	printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 	printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 #endif
 #endif
 
 
 	if (in_exception_text(where))
 	if (in_exception_text(where))
-		dump_mem("Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
+		dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
 }
 }
 
 
 #ifndef CONFIG_ARM_UNWIND
 #ifndef CONFIG_ARM_UNWIND
@@ -81,9 +81,10 @@ static int verify_stack(unsigned long sp)
 /*
 /*
  * Dump out the contents of some memory nicely...
  * Dump out the contents of some memory nicely...
  */
  */
-static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+		     unsigned long top)
 {
 {
-	unsigned long p = bottom & ~31;
+	unsigned long first;
 	mm_segment_t fs;
 	mm_segment_t fs;
 	int i;
 	int i;
 
 
@@ -95,33 +96,37 @@ static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
 	fs = get_fs();
 	fs = get_fs();
 	set_fs(KERNEL_DS);
 	set_fs(KERNEL_DS);
 
 
-	printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+	printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
 
 
-	for (p = bottom & ~31; p < top;) {
-		printk("%04lx: ", p & 0xffff);
+	for (first = bottom & ~31; first < top; first += 32) {
+		unsigned long p;
+		char str[sizeof(" 12345678") * 8 + 1];
 
 
-		for (i = 0; i < 8; i++, p += 4) {
-			unsigned int val;
+		memset(str, ' ', sizeof(str));
+		str[sizeof(str) - 1] = '\0';
 
 
-			if (p < bottom || p >= top)
-				printk("         ");
-			else {
-				__get_user(val, (unsigned long *)p);
-				printk("%08x ", val);
+		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+			if (p >= bottom && p < top) {
+				unsigned long val;
+				if (__get_user(val, (unsigned long *)p) == 0)
+					sprintf(str + i * 9, " %08lx", val);
+				else
+					sprintf(str + i * 9, " ????????");
 			}
 			}
 		}
 		}
-		printk ("\n");
+		printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
 	}
 	}
 
 
 	set_fs(fs);
 	set_fs(fs);
 }
 }
 
 
-static void dump_instr(struct pt_regs *regs)
+static void dump_instr(const char *lvl, struct pt_regs *regs)
 {
 {
 	unsigned long addr = instruction_pointer(regs);
 	unsigned long addr = instruction_pointer(regs);
 	const int thumb = thumb_mode(regs);
 	const int thumb = thumb_mode(regs);
 	const int width = thumb ? 4 : 8;
 	const int width = thumb ? 4 : 8;
 	mm_segment_t fs;
 	mm_segment_t fs;
+	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
 	int i;
 	int i;
 
 
 	/*
 	/*
@@ -132,7 +137,6 @@ static void dump_instr(struct pt_regs *regs)
 	fs = get_fs();
 	fs = get_fs();
 	set_fs(KERNEL_DS);
 	set_fs(KERNEL_DS);
 
 
-	printk("Code: ");
 	for (i = -4; i < 1; i++) {
 	for (i = -4; i < 1; i++) {
 		unsigned int val, bad;
 		unsigned int val, bad;
 
 
@@ -142,13 +146,14 @@ static void dump_instr(struct pt_regs *regs)
 			bad = __get_user(val, &((u32 *)addr)[i]);
 			bad = __get_user(val, &((u32 *)addr)[i]);
 
 
 		if (!bad)
 		if (!bad)
-			printk(i == 0 ? "(%0*x) " : "%0*x ", width, val);
+			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
+					width, val);
 		else {
 		else {
-			printk("bad PC value.");
+			p += sprintf(p, "bad PC value");
 			break;
 			break;
 		}
 		}
 	}
 	}
-	printk("\n");
+	printk("%sCode: %s\n", lvl, str);
 
 
 	set_fs(fs);
 	set_fs(fs);
 }
 }
@@ -224,18 +229,19 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
 	struct task_struct *tsk = thread->task;
 	struct task_struct *tsk = thread->task;
 	static int die_counter;
 	static int die_counter;
 
 
-	printk("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
+	printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
 	       str, err, ++die_counter);
 	       str, err, ++die_counter);
+	sysfs_printk_last_file();
 	print_modules();
 	print_modules();
 	__show_regs(regs);
 	__show_regs(regs);
-	printk("Process %s (pid: %d, stack limit = 0x%p)\n",
-		tsk->comm, task_pid_nr(tsk), thread + 1);
+	printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
+		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
 
 
 	if (!user_mode(regs) || in_interrupt()) {
 	if (!user_mode(regs) || in_interrupt()) {
-		dump_mem("Stack: ", regs->ARM_sp,
+		dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
 			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
 			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
 		dump_backtrace(regs, tsk);
 		dump_backtrace(regs, tsk);
-		dump_instr(regs);
+		dump_instr(KERN_EMERG, regs);
 	}
 	}
 }
 }
 
 
@@ -250,13 +256,14 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
 
 
 	oops_enter();
 	oops_enter();
 
 
-	console_verbose();
 	spin_lock_irq(&die_lock);
 	spin_lock_irq(&die_lock);
+	console_verbose();
 	bust_spinlocks(1);
 	bust_spinlocks(1);
 	__die(str, err, thread, regs);
 	__die(str, err, thread, regs);
 	bust_spinlocks(0);
 	bust_spinlocks(0);
 	add_taint(TAINT_DIE);
 	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
 	spin_unlock_irq(&die_lock);
+	oops_exit();
 
 
 	if (in_interrupt())
 	if (in_interrupt())
 		panic("Fatal exception in interrupt");
 		panic("Fatal exception in interrupt");
@@ -264,7 +271,6 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
 	if (panic_on_oops)
 	if (panic_on_oops)
 		panic("Fatal exception");
 		panic("Fatal exception");
 
 
-	oops_exit();
 	do_exit(SIGSEGV);
 	do_exit(SIGSEGV);
 }
 }
 
 
@@ -349,7 +355,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 	if (user_debug & UDBG_UNDEFINED) {
 	if (user_debug & UDBG_UNDEFINED) {
 		printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
 		printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
 			current->comm, task_pid_nr(current), pc);
 			current->comm, task_pid_nr(current), pc);
-		dump_instr(regs);
+		dump_instr(KERN_INFO, regs);
 	}
 	}
 #endif
 #endif
 
 
@@ -400,7 +406,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
 	if (user_debug & UDBG_SYSCALL) {
 	if (user_debug & UDBG_SYSCALL) {
 		printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
 		printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
 			task_pid_nr(current), current->comm, n);
 			task_pid_nr(current), current->comm, n);
-		dump_instr(regs);
+		dump_instr(KERN_ERR, regs);
 	}
 	}
 #endif
 #endif
 
 
@@ -579,7 +585,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 	if (user_debug & UDBG_SYSCALL) {
 	if (user_debug & UDBG_SYSCALL) {
 		printk("[%d] %s: arm syscall %d\n",
 		printk("[%d] %s: arm syscall %d\n",
 		       task_pid_nr(current), current->comm, no);
 		       task_pid_nr(current), current->comm, no);
-		dump_instr(regs);
+		dump_instr("", regs);
 		if (user_mode(regs)) {
 		if (user_mode(regs)) {
 			__show_regs(regs);
 			__show_regs(regs);
 			c_backtrace(regs->ARM_fp, processor_mode(regs));
 			c_backtrace(regs->ARM_fp, processor_mode(regs));
@@ -656,7 +662,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
 	if (user_debug & UDBG_BADABORT) {
 	if (user_debug & UDBG_BADABORT) {
 		printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
 		printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
 			task_pid_nr(current), current->comm, code, instr);
 			task_pid_nr(current), current->comm, code, instr);
-		dump_instr(regs);
+		dump_instr(KERN_ERR, regs);
 		show_pte(current->mm, addr);
 		show_pte(current->mm, addr);
 	}
 	}
 #endif
 #endif

+ 2 - 2
arch/arm/mach-bcmring/core.c

@@ -271,12 +271,12 @@ static struct irqaction bcmring_timer_irq = {
 	.handler = bcmring_timer_interrupt,
 	.handler = bcmring_timer_interrupt,
 };
 };
 
 
-static cycle_t bcmring_get_cycles_timer1(void)
+static cycle_t bcmring_get_cycles_timer1(struct clocksource *cs)
 {
 {
 	return ~readl(TIMER1_VA_BASE + TIMER_VALUE);
 	return ~readl(TIMER1_VA_BASE + TIMER_VALUE);
 }
 }
 
 
-static cycle_t bcmring_get_cycles_timer3(void)
+static cycle_t bcmring_get_cycles_timer3(struct clocksource *cs)
 {
 {
 	return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
 	return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
 }
 }

+ 1 - 1
arch/arm/mach-bcmring/include/mach/system.h

@@ -29,7 +29,7 @@ static inline void arch_idle(void)
 	cpu_do_idle();
 	cpu_do_idle();
 }
 }
 
 
-static inline void arch_reset(char mode, char *cmd)
+static inline void arch_reset(char mode, const char *cmd)
 {
 {
 	printk("arch_reset:%c %x\n", mode, bcmring_arch_warm_reboot);
 	printk("arch_reset:%c %x\n", mode, bcmring_arch_warm_reboot);
 
 

+ 35 - 9
arch/arm/mach-ep93xx/Kconfig

@@ -17,13 +17,31 @@ config EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	bool "0x00000000 - SDCE3/SyncBoot"
 	bool "0x00000000 - SDCE3/SyncBoot"
 	help
 	help
 	  Select this option if you want support for EP93xx boards with the
 	  Select this option if you want support for EP93xx boards with the
-	  first SDRAM bank at 0x00000000
+	  first SDRAM bank at 0x00000000.
 
 
 config EP93XX_SDCE0_PHYS_OFFSET
 config EP93XX_SDCE0_PHYS_OFFSET
 	bool "0xc0000000 - SDCEO"
 	bool "0xc0000000 - SDCEO"
 	help
 	help
 	  Select this option if you want support for EP93xx boards with the
 	  Select this option if you want support for EP93xx boards with the
-	  first SDRAM bank at 0xc0000000
+	  first SDRAM bank at 0xc0000000.
+
+config EP93XX_SDCE1_PHYS_OFFSET
+	bool "0xd0000000 - SDCE1"
+	help
+	  Select this option if you want support for EP93xx boards with the
+	  first SDRAM bank at 0xd0000000.
+
+config EP93XX_SDCE2_PHYS_OFFSET
+	bool "0xe0000000 - SDCE2"
+	help
+	  Select this option if you want support for EP93xx boards with the
+	  first SDRAM bank at 0xe0000000.
+
+config EP93XX_SDCE3_ASYNC_PHYS_OFFSET
+	bool "0xf0000000 - SDCE3/AsyncBoot"
+	help
+	  Select this option if you want support for EP93xx boards with the
+	  first SDRAM bank at 0xf0000000.
 
 
 endchoice
 endchoice
 
 
@@ -112,28 +130,36 @@ config MACH_MICRO9
 	bool
 	bool
 
 
 config MACH_MICRO9H
 config MACH_MICRO9H
-	bool "Support Contec Hypercontrol Micro9-H"
+	bool "Support Contec Micro9-High"
 	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	select MACH_MICRO9
 	select MACH_MICRO9
 	help
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  Say 'Y' here if you want your kernel to support the
-	  Contec Hypercontrol Micro9-H board.
+	  Contec Micro9-High board.
 
 
 config MACH_MICRO9M
 config MACH_MICRO9M
-	bool "Support Contec Hypercontrol Micro9-M"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
+	bool "Support Contec Micro9-Mid"
+	depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET
 	select MACH_MICRO9
 	select MACH_MICRO9
 	help
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  Say 'Y' here if you want your kernel to support the
-	  Contec Hypercontrol Micro9-M board.
+	  Contec Micro9-Mid board.
 
 
 config MACH_MICRO9L
 config MACH_MICRO9L
-	bool "Support Contec Hypercontrol Micro9-L"
+	bool "Support Contec Micro9-Lite"
 	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	select MACH_MICRO9
 	select MACH_MICRO9
 	help
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  Say 'Y' here if you want your kernel to support the
-	  Contec Hypercontrol Micro9-L board.
+	  Contec Micro9-Lite board.
+
+config MACH_MICRO9S
+	bool "Support Contec Micro9-Slim"
+	depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET
+	select MACH_MICRO9
+	help
+	  Say 'Y' here if you want your kernel to support the
+	  Contec Micro9-Slim board.
 
 
 config MACH_TS72XX
 config MACH_TS72XX
 	bool "Support Technologic Systems TS-72xx SBC"
 	bool "Support Technologic Systems TS-72xx SBC"

+ 9 - 0
arch/arm/mach-ep93xx/Makefile.boot

@@ -3,3 +3,12 @@ params_phys-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)	:= 0x00000100
 
 
    zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)		:= 0xc0008000
    zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)		:= 0xc0008000
 params_phys-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)		:= 0xc0000100
 params_phys-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)		:= 0xc0000100
+
+   zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)		:= 0xd0008000
+params_phys-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)		:= 0xd0000100
+
+   zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)		:= 0xe0008000
+params_phys-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)		:= 0xe0000100
+
+   zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)	:= 0xf0008000
+params_phys-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)	:= 0xf0000100

+ 119 - 47
arch/arm/mach-ep93xx/clock.c

@@ -16,13 +16,16 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <linux/io.h>
 #include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include <mach/hardware.h>
 
 
 #include <asm/clkdev.h>
 #include <asm/clkdev.h>
 #include <asm/div64.h>
 #include <asm/div64.h>
-#include <mach/hardware.h>
 
 
 
 
 struct clk {
 struct clk {
+	struct clk	*parent;
 	unsigned long	rate;
 	unsigned long	rate;
 	int		users;
 	int		users;
 	int		sw_locked;
 	int		sw_locked;
@@ -39,40 +42,60 @@ static unsigned long get_uart_rate(struct clk *clk);
 static int set_keytchclk_rate(struct clk *clk, unsigned long rate);
 static int set_keytchclk_rate(struct clk *clk, unsigned long rate);
 static int set_div_rate(struct clk *clk, unsigned long rate);
 static int set_div_rate(struct clk *clk, unsigned long rate);
 
 
+
+static struct clk clk_xtali = {
+	.rate		= EP93XX_EXT_CLK_RATE,
+};
 static struct clk clk_uart1 = {
 static struct clk clk_uart1 = {
+	.parent		= &clk_xtali,
 	.sw_locked	= 1,
 	.sw_locked	= 1,
 	.enable_reg	= EP93XX_SYSCON_DEVCFG,
 	.enable_reg	= EP93XX_SYSCON_DEVCFG,
 	.enable_mask	= EP93XX_SYSCON_DEVCFG_U1EN,
 	.enable_mask	= EP93XX_SYSCON_DEVCFG_U1EN,
 	.get_rate	= get_uart_rate,
 	.get_rate	= get_uart_rate,
 };
 };
 static struct clk clk_uart2 = {
 static struct clk clk_uart2 = {
+	.parent		= &clk_xtali,
 	.sw_locked	= 1,
 	.sw_locked	= 1,
 	.enable_reg	= EP93XX_SYSCON_DEVCFG,
 	.enable_reg	= EP93XX_SYSCON_DEVCFG,
 	.enable_mask	= EP93XX_SYSCON_DEVCFG_U2EN,
 	.enable_mask	= EP93XX_SYSCON_DEVCFG_U2EN,
 	.get_rate	= get_uart_rate,
 	.get_rate	= get_uart_rate,
 };
 };
 static struct clk clk_uart3 = {
 static struct clk clk_uart3 = {
+	.parent		= &clk_xtali,
 	.sw_locked	= 1,
 	.sw_locked	= 1,
 	.enable_reg	= EP93XX_SYSCON_DEVCFG,
 	.enable_reg	= EP93XX_SYSCON_DEVCFG,
 	.enable_mask	= EP93XX_SYSCON_DEVCFG_U3EN,
 	.enable_mask	= EP93XX_SYSCON_DEVCFG_U3EN,
 	.get_rate	= get_uart_rate,
 	.get_rate	= get_uart_rate,
 };
 };
-static struct clk clk_pll1;
-static struct clk clk_f;
-static struct clk clk_h;
-static struct clk clk_p;
-static struct clk clk_pll2;
+static struct clk clk_pll1 = {
+	.parent		= &clk_xtali,
+};
+static struct clk clk_f = {
+	.parent		= &clk_pll1,
+};
+static struct clk clk_h = {
+	.parent		= &clk_pll1,
+};
+static struct clk clk_p = {
+	.parent		= &clk_pll1,
+};
+static struct clk clk_pll2 = {
+	.parent		= &clk_xtali,
+};
 static struct clk clk_usb_host = {
 static struct clk clk_usb_host = {
+	.parent		= &clk_pll2,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_USH_EN,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_USH_EN,
 };
 };
 static struct clk clk_keypad = {
 static struct clk clk_keypad = {
+	.parent		= &clk_xtali,
 	.sw_locked	= 1,
 	.sw_locked	= 1,
 	.enable_reg	= EP93XX_SYSCON_KEYTCHCLKDIV,
 	.enable_reg	= EP93XX_SYSCON_KEYTCHCLKDIV,
 	.enable_mask	= EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
 	.enable_mask	= EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
 	.set_rate	= set_keytchclk_rate,
 	.set_rate	= set_keytchclk_rate,
 };
 };
 static struct clk clk_pwm = {
 static struct clk clk_pwm = {
+	.parent		= &clk_xtali,
 	.rate		= EP93XX_EXT_CLK_RATE,
 	.rate		= EP93XX_EXT_CLK_RATE,
 };
 };
 
 
@@ -85,50 +108,62 @@ static struct clk clk_video = {
 
 
 /* DMA Clocks */
 /* DMA Clocks */
 static struct clk clk_m2p0 = {
 static struct clk clk_m2p0 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P0,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P0,
 };
 };
 static struct clk clk_m2p1 = {
 static struct clk clk_m2p1 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P1,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P1,
 };
 };
 static struct clk clk_m2p2 = {
 static struct clk clk_m2p2 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P2,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P2,
 };
 };
 static struct clk clk_m2p3 = {
 static struct clk clk_m2p3 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P3,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P3,
 };
 };
 static struct clk clk_m2p4 = {
 static struct clk clk_m2p4 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P4,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P4,
 };
 };
 static struct clk clk_m2p5 = {
 static struct clk clk_m2p5 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P5,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P5,
 };
 };
 static struct clk clk_m2p6 = {
 static struct clk clk_m2p6 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P6,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P6,
 };
 };
 static struct clk clk_m2p7 = {
 static struct clk clk_m2p7 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P7,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P7,
 };
 };
 static struct clk clk_m2p8 = {
 static struct clk clk_m2p8 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P8,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P8,
 };
 };
 static struct clk clk_m2p9 = {
 static struct clk clk_m2p9 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P9,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2P9,
 };
 };
 static struct clk clk_m2m0 = {
 static struct clk clk_m2m0 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2M0,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2M0,
 };
 };
 static struct clk clk_m2m1 = {
 static struct clk clk_m2m1 = {
+	.parent		= &clk_h,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_reg	= EP93XX_SYSCON_PWRCNT,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2M1,
 	.enable_mask	= EP93XX_SYSCON_PWRCNT_DMA_M2M1,
 };
 };
@@ -137,6 +172,7 @@ static struct clk clk_m2m1 = {
 	{ .dev_id = dev, .con_id = con, .clk = ck }
 	{ .dev_id = dev, .con_id = con, .clk = ck }
 
 
 static struct clk_lookup clocks[] = {
 static struct clk_lookup clocks[] = {
+	INIT_CK(NULL,			"xtali",	&clk_xtali),
 	INIT_CK("apb:uart1",		NULL,		&clk_uart1),
 	INIT_CK("apb:uart1",		NULL,		&clk_uart1),
 	INIT_CK("apb:uart2",		NULL,		&clk_uart2),
 	INIT_CK("apb:uart2",		NULL,		&clk_uart2),
 	INIT_CK("apb:uart3",		NULL,		&clk_uart3),
 	INIT_CK("apb:uart3",		NULL,		&clk_uart3),
@@ -163,48 +199,84 @@ static struct clk_lookup clocks[] = {
 	INIT_CK(NULL,			"m2m1",		&clk_m2m1),
 	INIT_CK(NULL,			"m2m1",		&clk_m2m1),
 };
 };
 
 
+static DEFINE_SPINLOCK(clk_lock);
+
+static void __clk_enable(struct clk *clk)
+{
+	if (!clk->users++) {
+		if (clk->parent)
+			__clk_enable(clk->parent);
+
+		if (clk->enable_reg) {
+			u32 v;
+
+			v = __raw_readl(clk->enable_reg);
+			v |= clk->enable_mask;
+			if (clk->sw_locked)
+				ep93xx_syscon_swlocked_write(v, clk->enable_reg);
+			else
+				__raw_writel(v, clk->enable_reg);
+		}
+	}
+}
 
 
 int clk_enable(struct clk *clk)
 int clk_enable(struct clk *clk)
 {
 {
-	if (!clk->users++ && clk->enable_reg) {
-		u32 value;
+	unsigned long flags;
 
 
-		value = __raw_readl(clk->enable_reg);
-		value |= clk->enable_mask;
-		if (clk->sw_locked)
-			ep93xx_syscon_swlocked_write(value, clk->enable_reg);
-		else
-			__raw_writel(value, clk->enable_reg);
-	}
+	if (!clk)
+		return -EINVAL;
+
+	spin_lock_irqsave(&clk_lock, flags);
+	__clk_enable(clk);
+	spin_unlock_irqrestore(&clk_lock, flags);
 
 
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL(clk_enable);
 EXPORT_SYMBOL(clk_enable);
 
 
-void clk_disable(struct clk *clk)
+static void __clk_disable(struct clk *clk)
 {
 {
-	if (!--clk->users && clk->enable_reg) {
-		u32 value;
+	if (!--clk->users) {
+		if (clk->enable_reg) {
+			u32 v;
+
+			v = __raw_readl(clk->enable_reg);
+			v &= ~clk->enable_mask;
+			if (clk->sw_locked)
+				ep93xx_syscon_swlocked_write(v, clk->enable_reg);
+			else
+				__raw_writel(v, clk->enable_reg);
+		}
 
 
-		value = __raw_readl(clk->enable_reg);
-		value &= ~clk->enable_mask;
-		if (clk->sw_locked)
-			ep93xx_syscon_swlocked_write(value, clk->enable_reg);
-		else
-			__raw_writel(value, clk->enable_reg);
+		if (clk->parent)
+			__clk_disable(clk->parent);
 	}
 	}
 }
 }
+
+void clk_disable(struct clk *clk)
+{
+	unsigned long flags;
+
+	if (!clk)
+		return;
+
+	spin_lock_irqsave(&clk_lock, flags);
+	__clk_disable(clk);
+	spin_unlock_irqrestore(&clk_lock, flags);
+}
 EXPORT_SYMBOL(clk_disable);
 EXPORT_SYMBOL(clk_disable);
 
 
 static unsigned long get_uart_rate(struct clk *clk)
 static unsigned long get_uart_rate(struct clk *clk)
 {
 {
+	unsigned long rate = clk_get_rate(clk->parent);
 	u32 value;
 	u32 value;
 
 
 	value = __raw_readl(EP93XX_SYSCON_PWRCNT);
 	value = __raw_readl(EP93XX_SYSCON_PWRCNT);
 	if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD)
 	if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD)
-		return EP93XX_EXT_CLK_RATE;
+		return rate;
 	else
 	else
-		return EP93XX_EXT_CLK_RATE / 2;
+		return rate / 2;
 }
 }
 
 
 unsigned long clk_get_rate(struct clk *clk)
 unsigned long clk_get_rate(struct clk *clk)
@@ -244,16 +316,16 @@ static int set_keytchclk_rate(struct clk *clk, unsigned long rate)
 	return 0;
 	return 0;
 }
 }
 
 
-static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
-				  int *pdiv, int *div)
+static int calc_clk_div(struct clk *clk, unsigned long rate,
+			int *psel, int *esel, int *pdiv, int *div)
 {
 {
-	unsigned long max_rate, best_rate = 0,
-		actual_rate = 0, mclk_rate = 0, rate_err = -1;
+	struct clk *mclk;
+	unsigned long max_rate, actual_rate, mclk_rate, rate_err = -1;
 	int i, found = 0, __div = 0, __pdiv = 0;
 	int i, found = 0, __div = 0, __pdiv = 0;
 
 
 	/* Don't exceed the maximum rate */
 	/* Don't exceed the maximum rate */
 	max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4),
 	max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4),
-		       (unsigned long)EP93XX_EXT_CLK_RATE / 4);
+		       clk_xtali.rate / 4);
 	rate = min(rate, max_rate);
 	rate = min(rate, max_rate);
 
 
 	/*
 	/*
@@ -267,11 +339,12 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
 	 */
 	 */
 	for (i = 0; i < 3; i++) {
 	for (i = 0; i < 3; i++) {
 		if (i == 0)
 		if (i == 0)
-			mclk_rate = EP93XX_EXT_CLK_RATE * 2;
+			mclk = &clk_xtali;
 		else if (i == 1)
 		else if (i == 1)
-			mclk_rate = clk_pll1.rate * 2;
-		else if (i == 2)
-			mclk_rate = clk_pll2.rate * 2;
+			mclk = &clk_pll1;
+		else
+			mclk = &clk_pll2;
+		mclk_rate = mclk->rate * 2;
 
 
 		/* Try each predivider value */
 		/* Try each predivider value */
 		for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
 		for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
@@ -286,7 +359,8 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
 				*div = __div;
 				*div = __div;
 				*psel = (i == 2);
 				*psel = (i == 2);
 				*esel = (i != 0);
 				*esel = (i != 0);
-				best_rate = actual_rate;
+				clk->parent = mclk;
+				clk->rate = actual_rate;
 				rate_err = abs(actual_rate - rate);
 				rate_err = abs(actual_rate - rate);
 				found = 1;
 				found = 1;
 			}
 			}
@@ -294,21 +368,19 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
 	}
 	}
 
 
 	if (!found)
 	if (!found)
-		return 0;
+		return -EINVAL;
 
 
-	return best_rate;
+	return 0;
 }
 }
 
 
 static int set_div_rate(struct clk *clk, unsigned long rate)
 static int set_div_rate(struct clk *clk, unsigned long rate)
 {
 {
-	unsigned long actual_rate;
-	int psel = 0, esel = 0, pdiv = 0, div = 0;
+	int err, psel = 0, esel = 0, pdiv = 0, div = 0;
 	u32 val;
 	u32 val;
 
 
-	actual_rate = calc_clk_div(rate, &psel, &esel, &pdiv, &div);
-	if (actual_rate == 0)
-		return -EINVAL;
-	clk->rate = actual_rate;
+	err = calc_clk_div(clk, rate, &psel, &esel, &pdiv, &div);
+	if (err)
+		return err;
 
 
 	/* Clear the esel, psel, pdiv and div bits */
 	/* Clear the esel, psel, pdiv and div bits */
 	val = __raw_readl(clk->enable_reg);
 	val = __raw_readl(clk->enable_reg);
@@ -344,7 +416,7 @@ static unsigned long calc_pll_rate(u32 config_word)
 	unsigned long long rate;
 	unsigned long long rate;
 	int i;
 	int i;
 
 
-	rate = EP93XX_EXT_CLK_RATE;
+	rate = clk_xtali.rate;
 	rate *= ((config_word >> 11) & 0x1f) + 1;		/* X1FBD */
 	rate *= ((config_word >> 11) & 0x1f) + 1;		/* X1FBD */
 	rate *= ((config_word >> 5) & 0x3f) + 1;		/* X2FBD */
 	rate *= ((config_word >> 5) & 0x3f) + 1;		/* X2FBD */
 	do_div(rate, (config_word & 0x1f) + 1);			/* X2IPD */
 	do_div(rate, (config_word & 0x1f) + 1);			/* X2IPD */
@@ -377,7 +449,7 @@ static int __init ep93xx_clock_init(void)
 
 
 	value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1);
 	value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1);
 	if (!(value & 0x00800000)) {			/* PLL1 bypassed?  */
 	if (!(value & 0x00800000)) {			/* PLL1 bypassed?  */
-		clk_pll1.rate = EP93XX_EXT_CLK_RATE;
+		clk_pll1.rate = clk_xtali.rate;
 	} else {
 	} else {
 		clk_pll1.rate = calc_pll_rate(value);
 		clk_pll1.rate = calc_pll_rate(value);
 	}
 	}
@@ -388,7 +460,7 @@ static int __init ep93xx_clock_init(void)
 
 
 	value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2);
 	value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2);
 	if (!(value & 0x00080000)) {			/* PLL2 bypassed?  */
 	if (!(value & 0x00080000)) {			/* PLL2 bypassed?  */
-		clk_pll2.rate = EP93XX_EXT_CLK_RATE;
+		clk_pll2.rate = clk_xtali.rate;
 	} else if (value & 0x00040000) {		/* PLL2 enabled?  */
 	} else if (value & 0x00040000) {		/* PLL2 enabled?  */
 		clk_pll2.rate = calc_pll_rate(value);
 		clk_pll2.rate = calc_pll_rate(value);
 	} else {
 	} else {

+ 23 - 8
arch/arm/mach-ep93xx/core.c

@@ -550,13 +550,11 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
 	platform_device_register(&ep93xx_eth_device);
 	platform_device_register(&ep93xx_eth_device);
 }
 }
 
 
-static struct i2c_gpio_platform_data ep93xx_i2c_data = {
-	.sda_pin		= EP93XX_GPIO_LINE_EEDAT,
-	.sda_is_open_drain	= 0,
-	.scl_pin		= EP93XX_GPIO_LINE_EECLK,
-	.scl_is_open_drain	= 0,
-	.udelay			= 2,
-};
+
+/*************************************************************************
+ * EP93xx i2c peripheral handling
+ *************************************************************************/
+static struct i2c_gpio_platform_data ep93xx_i2c_data;
 
 
 static struct platform_device ep93xx_i2c_device = {
 static struct platform_device ep93xx_i2c_device = {
 	.name			= "i2c-gpio",
 	.name			= "i2c-gpio",
@@ -564,8 +562,25 @@ static struct platform_device ep93xx_i2c_device = {
 	.dev.platform_data	= &ep93xx_i2c_data,
 	.dev.platform_data	= &ep93xx_i2c_data,
 };
 };
 
 
-void __init ep93xx_register_i2c(struct i2c_board_info *devices, int num)
+void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
+				struct i2c_board_info *devices, int num)
 {
 {
+	/*
+	 * Set the EEPROM interface pin drive type control.
+	 * Defines the driver type for the EECLK and EEDAT pins as either
+	 * open drain, which will require an external pull-up, or a normal
+	 * CMOS driver.
+	 */
+	if (data->sda_is_open_drain && data->sda_pin != EP93XX_GPIO_LINE_EEDAT)
+		pr_warning("ep93xx: sda != EEDAT, open drain has no effect\n");
+	if (data->scl_is_open_drain && data->scl_pin != EP93XX_GPIO_LINE_EECLK)
+		pr_warning("ep93xx: scl != EECLK, open drain has no effect\n");
+
+	__raw_writel((data->sda_is_open_drain << 1) |
+		     (data->scl_is_open_drain << 0),
+		     EP93XX_GPIO_EEDRIVE);
+
+	ep93xx_i2c_data = *data;
 	i2c_register_board_info(0, devices, num);
 	i2c_register_board_info(0, devices, num);
 	platform_device_register(&ep93xx_i2c_device);
 	platform_device_register(&ep93xx_i2c_device);
 }
 }

+ 24 - 7
arch/arm/mach-ep93xx/edb93xx.c

@@ -27,8 +27,10 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
-#include <linux/i2c.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/physmap.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c-gpio.h>
 
 
 #include <mach/hardware.h>
 #include <mach/hardware.h>
 
 
@@ -76,13 +78,26 @@ static struct ep93xx_eth_data edb93xx_eth_data = {
 	.phy_id		= 1,
 	.phy_id		= 1,
 };
 };
 
 
-static struct i2c_board_info __initdata edb93xxa_i2c_data[] = {
+
+/*************************************************************************
+ * EDB93xx i2c peripheral handling
+ *************************************************************************/
+static struct i2c_gpio_platform_data edb93xx_i2c_gpio_data = {
+	.sda_pin		= EP93XX_GPIO_LINE_EEDAT,
+	.sda_is_open_drain	= 0,
+	.scl_pin		= EP93XX_GPIO_LINE_EECLK,
+	.scl_is_open_drain	= 0,
+	.udelay			= 0,	/* default to 100 kHz */
+	.timeout		= 0,	/* default to 100 ms */
+};
+
+static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = {
 	{
 	{
 		I2C_BOARD_INFO("isl1208", 0x6f),
 		I2C_BOARD_INFO("isl1208", 0x6f),
 	},
 	},
 };
 };
 
 
-static struct i2c_board_info __initdata edb93xx_i2c_data[] = {
+static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = {
 	{
 	{
 		I2C_BOARD_INFO("ds1337", 0x68),
 		I2C_BOARD_INFO("ds1337", 0x68),
 	},
 	},
@@ -92,12 +107,14 @@ static void __init edb93xx_register_i2c(void)
 {
 {
 	if (machine_is_edb9302a() || machine_is_edb9307a() ||
 	if (machine_is_edb9302a() || machine_is_edb9307a() ||
 	    machine_is_edb9315a()) {
 	    machine_is_edb9315a()) {
-		ep93xx_register_i2c(edb93xxa_i2c_data,
-				ARRAY_SIZE(edb93xxa_i2c_data));
+		ep93xx_register_i2c(&edb93xx_i2c_gpio_data,
+				    edb93xxa_i2c_board_info,
+				    ARRAY_SIZE(edb93xxa_i2c_board_info));
 	} else if (machine_is_edb9307() || machine_is_edb9312() ||
 	} else if (machine_is_edb9307() || machine_is_edb9312() ||
 		   machine_is_edb9315()) {
 		   machine_is_edb9315()) {
-		ep93xx_register_i2c(edb93xx_i2c_data,
-				ARRAY_SIZE(edb93xx_i2c_data));
+		ep93xx_register_i2c(&edb93xx_i2c_gpio_data
+				    edb93xx_i2c_board_info,
+				    ARRAY_SIZE(edb93xx_i2c_board_info));
 	}
 	}
 }
 }
 
 

+ 22 - 20
arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h

@@ -52,25 +52,27 @@
 #define EP93XX_AHB_VIRT_BASE		0xfef00000
 #define EP93XX_AHB_VIRT_BASE		0xfef00000
 #define EP93XX_AHB_SIZE			0x00100000
 #define EP93XX_AHB_SIZE			0x00100000
 
 
+#define EP93XX_AHB_PHYS(x)		(EP93XX_AHB_PHYS_BASE + (x))
 #define EP93XX_AHB_IOMEM(x)		IOMEM(EP93XX_AHB_VIRT_BASE + (x))
 #define EP93XX_AHB_IOMEM(x)		IOMEM(EP93XX_AHB_VIRT_BASE + (x))
 
 
 #define EP93XX_APB_PHYS_BASE		0x80800000
 #define EP93XX_APB_PHYS_BASE		0x80800000
 #define EP93XX_APB_VIRT_BASE		0xfed00000
 #define EP93XX_APB_VIRT_BASE		0xfed00000
 #define EP93XX_APB_SIZE			0x00200000
 #define EP93XX_APB_SIZE			0x00200000
 
 
+#define EP93XX_APB_PHYS(x)		(EP93XX_APB_PHYS_BASE + (x))
 #define EP93XX_APB_IOMEM(x)		IOMEM(EP93XX_APB_VIRT_BASE + (x))
 #define EP93XX_APB_IOMEM(x)		IOMEM(EP93XX_APB_VIRT_BASE + (x))
 
 
 
 
 /* AHB peripherals */
 /* AHB peripherals */
 #define EP93XX_DMA_BASE			EP93XX_AHB_IOMEM(0x00000000)
 #define EP93XX_DMA_BASE			EP93XX_AHB_IOMEM(0x00000000)
 
 
-#define EP93XX_ETHERNET_PHYS_BASE	(EP93XX_AHB_PHYS_BASE + 0x00010000)
+#define EP93XX_ETHERNET_PHYS_BASE	EP93XX_AHB_PHYS(0x00010000)
 #define EP93XX_ETHERNET_BASE		EP93XX_AHB_IOMEM(0x00010000)
 #define EP93XX_ETHERNET_BASE		EP93XX_AHB_IOMEM(0x00010000)
 
 
-#define EP93XX_USB_PHYS_BASE		(EP93XX_AHB_PHYS_BASE + 0x00020000)
+#define EP93XX_USB_PHYS_BASE		EP93XX_AHB_PHYS(0x00020000)
 #define EP93XX_USB_BASE			EP93XX_AHB_IOMEM(0x00020000)
 #define EP93XX_USB_BASE			EP93XX_AHB_IOMEM(0x00020000)
 
 
-#define EP93XX_RASTER_PHYS_BASE		(EP93XX_AHB_PHYS_BASE + 0x00030000)
+#define EP93XX_RASTER_PHYS_BASE		EP93XX_AHB_PHYS(0x00030000)
 #define EP93XX_RASTER_BASE		EP93XX_AHB_IOMEM(0x00030000)
 #define EP93XX_RASTER_BASE		EP93XX_AHB_IOMEM(0x00030000)
 
 
 #define EP93XX_GRAPHICS_ACCEL_BASE	EP93XX_AHB_IOMEM(0x00040000)
 #define EP93XX_GRAPHICS_ACCEL_BASE	EP93XX_AHB_IOMEM(0x00040000)
@@ -112,21 +114,10 @@
 
 
 #define EP93XX_GPIO_BASE		EP93XX_APB_IOMEM(0x00040000)
 #define EP93XX_GPIO_BASE		EP93XX_APB_IOMEM(0x00040000)
 #define EP93XX_GPIO_REG(x)		(EP93XX_GPIO_BASE + (x))
 #define EP93XX_GPIO_REG(x)		(EP93XX_GPIO_BASE + (x))
-#define EP93XX_GPIO_F_INT_TYPE1		EP93XX_GPIO_REG(0x4c)
-#define EP93XX_GPIO_F_INT_TYPE2		EP93XX_GPIO_REG(0x50)
-#define EP93XX_GPIO_F_INT_ACK		EP93XX_GPIO_REG(0x54)
-#define EP93XX_GPIO_F_INT_ENABLE	EP93XX_GPIO_REG(0x58)
 #define EP93XX_GPIO_F_INT_STATUS	EP93XX_GPIO_REG(0x5c)
 #define EP93XX_GPIO_F_INT_STATUS	EP93XX_GPIO_REG(0x5c)
-#define EP93XX_GPIO_A_INT_TYPE1		EP93XX_GPIO_REG(0x90)
-#define EP93XX_GPIO_A_INT_TYPE2		EP93XX_GPIO_REG(0x94)
-#define EP93XX_GPIO_A_INT_ACK		EP93XX_GPIO_REG(0x98)
-#define EP93XX_GPIO_A_INT_ENABLE	EP93XX_GPIO_REG(0x9c)
 #define EP93XX_GPIO_A_INT_STATUS	EP93XX_GPIO_REG(0xa0)
 #define EP93XX_GPIO_A_INT_STATUS	EP93XX_GPIO_REG(0xa0)
-#define EP93XX_GPIO_B_INT_TYPE1		EP93XX_GPIO_REG(0xac)
-#define EP93XX_GPIO_B_INT_TYPE2		EP93XX_GPIO_REG(0xb0)
-#define EP93XX_GPIO_B_INT_ACK		EP93XX_GPIO_REG(0xb4)
-#define EP93XX_GPIO_B_INT_ENABLE	EP93XX_GPIO_REG(0xb8)
 #define EP93XX_GPIO_B_INT_STATUS	EP93XX_GPIO_REG(0xbc)
 #define EP93XX_GPIO_B_INT_STATUS	EP93XX_GPIO_REG(0xbc)
+#define EP93XX_GPIO_EEDRIVE		EP93XX_GPIO_REG(0xc8)
 
 
 #define EP93XX_AAC_BASE			EP93XX_APB_IOMEM(0x00080000)
 #define EP93XX_AAC_BASE			EP93XX_APB_IOMEM(0x00080000)
 
 
@@ -134,13 +125,13 @@
 
 
 #define EP93XX_IRDA_BASE		EP93XX_APB_IOMEM(0x000b0000)
 #define EP93XX_IRDA_BASE		EP93XX_APB_IOMEM(0x000b0000)
 
 
-#define EP93XX_UART1_PHYS_BASE		(EP93XX_APB_PHYS_BASE + 0x000c0000)
+#define EP93XX_UART1_PHYS_BASE		EP93XX_APB_PHYS(0x000c0000)
 #define EP93XX_UART1_BASE		EP93XX_APB_IOMEM(0x000c0000)
 #define EP93XX_UART1_BASE		EP93XX_APB_IOMEM(0x000c0000)
 
 
-#define EP93XX_UART2_PHYS_BASE		(EP93XX_APB_PHYS_BASE + 0x000d0000)
+#define EP93XX_UART2_PHYS_BASE		EP93XX_APB_PHYS(0x000d0000)
 #define EP93XX_UART2_BASE		EP93XX_APB_IOMEM(0x000d0000)
 #define EP93XX_UART2_BASE		EP93XX_APB_IOMEM(0x000d0000)
 
 
-#define EP93XX_UART3_PHYS_BASE		(EP93XX_APB_PHYS_BASE + 0x000e0000)
+#define EP93XX_UART3_PHYS_BASE		EP93XX_APB_PHYS(0x000e0000)
 #define EP93XX_UART3_BASE		EP93XX_APB_IOMEM(0x000e0000)
 #define EP93XX_UART3_BASE		EP93XX_APB_IOMEM(0x000e0000)
 
 
 #define EP93XX_KEY_MATRIX_BASE		EP93XX_APB_IOMEM(0x000f0000)
 #define EP93XX_KEY_MATRIX_BASE		EP93XX_APB_IOMEM(0x000f0000)
@@ -148,10 +139,10 @@
 #define EP93XX_ADC_BASE			EP93XX_APB_IOMEM(0x00100000)
 #define EP93XX_ADC_BASE			EP93XX_APB_IOMEM(0x00100000)
 #define EP93XX_TOUCHSCREEN_BASE		EP93XX_APB_IOMEM(0x00100000)
 #define EP93XX_TOUCHSCREEN_BASE		EP93XX_APB_IOMEM(0x00100000)
 
 
-#define EP93XX_PWM_PHYS_BASE		(EP93XX_APB_PHYS_BASE + 0x00110000)
+#define EP93XX_PWM_PHYS_BASE		EP93XX_APB_PHYS(0x00110000)
 #define EP93XX_PWM_BASE			EP93XX_APB_IOMEM(0x00110000)
 #define EP93XX_PWM_BASE			EP93XX_APB_IOMEM(0x00110000)
 
 
-#define EP93XX_RTC_PHYS_BASE		(EP93XX_APB_PHYS_BASE + 0x00120000)
+#define EP93XX_RTC_PHYS_BASE		EP93XX_APB_PHYS(0x00120000)
 #define EP93XX_RTC_BASE			EP93XX_APB_IOMEM(0x00120000)
 #define EP93XX_RTC_BASE			EP93XX_APB_IOMEM(0x00120000)
 
 
 #define EP93XX_SYSCON_BASE		EP93XX_APB_IOMEM(0x00130000)
 #define EP93XX_SYSCON_BASE		EP93XX_APB_IOMEM(0x00130000)
@@ -218,6 +209,17 @@
 #define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV	(1<<16)
 #define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV	(1<<16)
 #define EP93XX_SYSCON_KEYTCHCLKDIV_KEN	(1<<15)
 #define EP93XX_SYSCON_KEYTCHCLKDIV_KEN	(1<<15)
 #define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV	(1<<0)
 #define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV	(1<<0)
+#define EP93XX_SYSCON_SYSCFG		EP93XX_SYSCON_REG(0x9c)
+#define EP93XX_SYSCON_SYSCFG_REV_MASK	(0xf0000000)
+#define EP93XX_SYSCON_SYSCFG_REV_SHIFT	(28)
+#define EP93XX_SYSCON_SYSCFG_SBOOT	(1<<8)
+#define EP93XX_SYSCON_SYSCFG_LCSN7	(1<<7)
+#define EP93XX_SYSCON_SYSCFG_LCSN6	(1<<6)
+#define EP93XX_SYSCON_SYSCFG_LASDO	(1<<5)
+#define EP93XX_SYSCON_SYSCFG_LEEDA	(1<<4)
+#define EP93XX_SYSCON_SYSCFG_LEECLK	(1<<3)
+#define EP93XX_SYSCON_SYSCFG_LCSN2	(1<<1)
+#define EP93XX_SYSCON_SYSCFG_LCSN1	(1<<0)
 #define EP93XX_SYSCON_SWLOCK		EP93XX_SYSCON_REG(0xc0)
 #define EP93XX_SYSCON_SWLOCK		EP93XX_SYSCON_REG(0xc0)
 
 
 #define EP93XX_WATCHDOG_BASE		EP93XX_APB_IOMEM(0x00140000)
 #define EP93XX_WATCHDOG_BASE		EP93XX_APB_IOMEM(0x00140000)

+ 4 - 12
arch/arm/mach-ep93xx/include/mach/gpio.h

@@ -114,17 +114,9 @@ extern void ep93xx_gpio_int_debounce(unsigned int irq, int enable);
  *          B0..B7  (7..15) to irq 72..79, and
  *          B0..B7  (7..15) to irq 72..79, and
  *          F0..F7 (16..24) to irq 80..87.
  *          F0..F7 (16..24) to irq 80..87.
  */
  */
-static inline int gpio_to_irq(unsigned gpio)
-{
-	if (gpio <= EP93XX_GPIO_LINE_MAX_IRQ)
-		return 64 + gpio;
-
-	return -EINVAL;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
-	return irq - gpio_to_irq(0);
-}
+#define gpio_to_irq(gpio)	\
+	(((gpio) <= EP93XX_GPIO_LINE_MAX_IRQ) ? (64 + (gpio)) : -EINVAL)
+
+#define irq_to_gpio(irq)	((irq) - gpio_to_irq(0))
 
 
 #endif
 #endif

+ 6 - 0
arch/arm/mach-ep93xx/include/mach/memory.h

@@ -9,6 +9,12 @@
 #define PHYS_OFFSET		UL(0x00000000)
 #define PHYS_OFFSET		UL(0x00000000)
 #elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
 #elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
 #define PHYS_OFFSET		UL(0xc0000000)
 #define PHYS_OFFSET		UL(0xc0000000)
+#elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
+#define PHYS_OFFSET		UL(0xd0000000)
+#elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
+#define PHYS_OFFSET		UL(0xe0000000)
+#elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
+#define PHYS_OFFSET		UL(0xf0000000)
 #else
 #else
 #error "Kconfig bug: No EP93xx PHYS_OFFSET set"
 #error "Kconfig bug: No EP93xx PHYS_OFFSET set"
 #endif
 #endif

+ 3 - 1
arch/arm/mach-ep93xx/include/mach/platform.h

@@ -4,6 +4,7 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
+struct i2c_gpio_platform_data;
 struct i2c_board_info;
 struct i2c_board_info;
 struct platform_device;
 struct platform_device;
 struct ep93xxfb_mach_info;
 struct ep93xxfb_mach_info;
@@ -33,7 +34,8 @@ static inline void ep93xx_devcfg_clear_bits(unsigned int bits)
 }
 }
 
 
 void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
 void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
-void ep93xx_register_i2c(struct i2c_board_info *devices, int num);
+void ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
+			 struct i2c_board_info *devices, int num);
 void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
 void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
 void ep93xx_register_pwm(int pwm0, int pwm1);
 void ep93xx_register_pwm(int pwm0, int pwm1);
 int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
 int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);

+ 77 - 55
arch/arm/mach-ep93xx/micro9.c

@@ -2,7 +2,9 @@
  *  linux/arch/arm/mach-ep93xx/micro9.c
  *  linux/arch/arm/mach-ep93xx/micro9.c
  *
  *
  * Copyright (C) 2006 Contec Steuerungstechnik & Automation GmbH
  * Copyright (C) 2006 Contec Steuerungstechnik & Automation GmbH
- *                   Manfred Gruber <manfred.gruber@contec.at>
+ *                    Manfred Gruber <m.gruber@tirol.com>
+ * Copyright (C) 2009 Contec Steuerungstechnik & Automation GmbH
+ *                    Hubert Feurstein <hubert.feurstein@contec.at>
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * it under the terms of the GNU General Public License version 2 as
@@ -20,104 +22,124 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
 
 
 
 
-static struct ep93xx_eth_data micro9_eth_data = {
-	.phy_id		= 0x1f,
-};
-
-static void __init micro9_init(void)
-{
-	ep93xx_register_eth(&micro9_eth_data, 1);
-}
-
-/*
- * Micro9-H
- */
-#ifdef CONFIG_MACH_MICRO9H
-static struct physmap_flash_data micro9h_flash_data = {
-	.width		= 4,
-};
-
-static struct resource micro9h_flash_resource = {
+/*************************************************************************
+ * Micro9 NOR Flash
+ *
+ * Micro9-High has up to 64MB of 32-bit flash on CS1
+ * Micro9-Mid has up to 64MB of either 32-bit or 16-bit flash on CS1
+ * Micro9-Lite uses a seperate MTD map driver for flash support
+ * Micro9-Slim has up to 64MB of either 32-bit or 16-bit flash on CS1
+ *************************************************************************/
+static struct physmap_flash_data micro9_flash_data;
+
+static struct resource micro9_flash_resource = {
 	.start		= EP93XX_CS1_PHYS_BASE,
 	.start		= EP93XX_CS1_PHYS_BASE,
 	.end		= EP93XX_CS1_PHYS_BASE + SZ_64M - 1,
 	.end		= EP93XX_CS1_PHYS_BASE + SZ_64M - 1,
 	.flags		= IORESOURCE_MEM,
 	.flags		= IORESOURCE_MEM,
 };
 };
 
 
-static struct platform_device micro9h_flash = {
+static struct platform_device micro9_flash = {
 	.name		= "physmap-flash",
 	.name		= "physmap-flash",
 	.id		= 0,
 	.id		= 0,
 	.dev		= {
 	.dev		= {
-		.platform_data	= &micro9h_flash_data,
+		.platform_data	= &micro9_flash_data,
 	},
 	},
 	.num_resources	= 1,
 	.num_resources	= 1,
-	.resource	= &micro9h_flash_resource,
+	.resource	= &micro9_flash_resource,
 };
 };
 
 
-static void __init micro9h_init(void)
+static void __init __micro9_register_flash(unsigned int width)
+{
+	micro9_flash_data.width = width;
+
+	platform_device_register(&micro9_flash);
+}
+
+static unsigned int __init micro9_detect_bootwidth(void)
+{
+	u32 v;
+
+	/* Detect the bus width of the external flash memory */
+	v = __raw_readl(EP93XX_SYSCON_SYSCFG);
+	if (v & EP93XX_SYSCON_SYSCFG_LCSN7)
+		return 4; /* 32-bit */
+	else
+		return 2; /* 16-bit */
+}
+
+static void __init micro9_register_flash(void)
 {
 {
-	platform_device_register(&micro9h_flash);
+	if (machine_is_micro9())
+		__micro9_register_flash(4);
+	else if (machine_is_micro9m() || machine_is_micro9s())
+		__micro9_register_flash(micro9_detect_bootwidth());
 }
 }
 
 
-static void __init micro9h_init_machine(void)
+
+/*************************************************************************
+ * Micro9 Ethernet
+ *************************************************************************/
+static struct ep93xx_eth_data micro9_eth_data = {
+	.phy_id		= 0x1f,
+};
+
+
+static void __init micro9_init_machine(void)
 {
 {
 	ep93xx_init_devices();
 	ep93xx_init_devices();
-	micro9_init();
-	micro9h_init();
+	ep93xx_register_eth(&micro9_eth_data, 1);
+	micro9_register_flash();
 }
 }
 
 
-MACHINE_START(MICRO9, "Contec Hypercontrol Micro9-H")
-	/* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */
+
+#ifdef CONFIG_MACH_MICRO9H
+MACHINE_START(MICRO9, "Contec Micro9-High")
+	/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
 	.phys_io	= EP93XX_APB_PHYS_BASE,
 	.phys_io	= EP93XX_APB_PHYS_BASE,
 	.io_pg_offst	= ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
 	.io_pg_offst	= ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
 	.boot_params	= EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
 	.boot_params	= EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
 	.map_io		= ep93xx_map_io,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
 	.init_irq	= ep93xx_init_irq,
 	.timer		= &ep93xx_timer,
 	.timer		= &ep93xx_timer,
-	.init_machine	= micro9h_init_machine,
+	.init_machine	= micro9_init_machine,
 MACHINE_END
 MACHINE_END
 #endif
 #endif
 
 
-/*
- * Micro9-M
- */
 #ifdef CONFIG_MACH_MICRO9M
 #ifdef CONFIG_MACH_MICRO9M
-static void __init micro9m_init_machine(void)
-{
-	ep93xx_init_devices();
-	micro9_init();
-}
-
-MACHINE_START(MICRO9M, "Contec Hypercontrol Micro9-M")
-	/* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */
+MACHINE_START(MICRO9M, "Contec Micro9-Mid")
+	/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
 	.phys_io	= EP93XX_APB_PHYS_BASE,
 	.phys_io	= EP93XX_APB_PHYS_BASE,
 	.io_pg_offst	= ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
 	.io_pg_offst	= ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
-	.boot_params	= EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
+	.boot_params	= EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100,
 	.map_io		= ep93xx_map_io,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
 	.init_irq	= ep93xx_init_irq,
 	.timer		= &ep93xx_timer,
 	.timer		= &ep93xx_timer,
-	.init_machine	= micro9m_init_machine,
+	.init_machine	= micro9_init_machine,
 MACHINE_END
 MACHINE_END
 #endif
 #endif
 
 
-/*
- * Micro9-L
- */
 #ifdef CONFIG_MACH_MICRO9L
 #ifdef CONFIG_MACH_MICRO9L
-static void __init micro9l_init_machine(void)
-{
-	ep93xx_init_devices();
-	micro9_init();
-}
-
-MACHINE_START(MICRO9L, "Contec Hypercontrol Micro9-L")
-	/* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */
+MACHINE_START(MICRO9L, "Contec Micro9-Lite")
+	/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
 	.phys_io	= EP93XX_APB_PHYS_BASE,
 	.phys_io	= EP93XX_APB_PHYS_BASE,
 	.io_pg_offst	= ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
 	.io_pg_offst	= ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
 	.boot_params	= EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
 	.boot_params	= EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
 	.map_io		= ep93xx_map_io,
 	.map_io		= ep93xx_map_io,
 	.init_irq	= ep93xx_init_irq,
 	.init_irq	= ep93xx_init_irq,
 	.timer		= &ep93xx_timer,
 	.timer		= &ep93xx_timer,
-	.init_machine	= micro9l_init_machine,
+	.init_machine	= micro9_init_machine,
 MACHINE_END
 MACHINE_END
 #endif
 #endif
 
 
+#ifdef CONFIG_MACH_MICRO9S
+MACHINE_START(MICRO9S, "Contec Micro9-Slim")
+	/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
+	.phys_io	= EP93XX_APB_PHYS_BASE,
+	.io_pg_offst	= ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
+	.boot_params	= EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100,
+	.map_io		= ep93xx_map_io,
+	.init_irq	= ep93xx_init_irq,
+	.timer		= &ep93xx_timer,
+	.init_machine	= micro9_init_machine,
+MACHINE_END
+#endif

+ 1 - 1
arch/arm/mach-omap2/board-rx51-peripherals.c

@@ -444,7 +444,7 @@ static int __init rx51_i2c_init(void)
 		rx51_twldata.vaux3 = &rx51_vaux3_cam;
 		rx51_twldata.vaux3 = &rx51_vaux3_cam;
 		rx51_twldata.vmmc2 = &rx51_vmmc2;
 		rx51_twldata.vmmc2 = &rx51_vmmc2;
 	}
 	}
-	omap_register_i2c_bus(1, 2600, rx51_peripherals_i2c_board_info_1,
+	omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1,
 			ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
 			ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
 	omap_register_i2c_bus(2, 100, NULL, 0);
 	omap_register_i2c_bus(2, 100, NULL, 0);
 	omap_register_i2c_bus(3, 400, NULL, 0);
 	omap_register_i2c_bus(3, 400, NULL, 0);

+ 3 - 1
arch/arm/mach-omap2/board-zoom2.c

@@ -25,6 +25,7 @@
 #include <mach/keypad.h>
 #include <mach/keypad.h>
 
 
 #include "mmc-twl4030.h"
 #include "mmc-twl4030.h"
+#include "sdram-micron-mt46h32m32lf-6.h"
 
 
 /* Zoom2 has Qwerty keyboard*/
 /* Zoom2 has Qwerty keyboard*/
 static int board_keymap[] = {
 static int board_keymap[] = {
@@ -213,7 +214,8 @@ static void __init omap_zoom2_init_irq(void)
 {
 {
 	omap_board_config = zoom2_config;
 	omap_board_config = zoom2_config;
 	omap_board_config_size = ARRAY_SIZE(zoom2_config);
 	omap_board_config_size = ARRAY_SIZE(zoom2_config);
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
+				 mt46h32m32lf6_sdrc_params);
 	omap_init_irq();
 	omap_init_irq();
 	omap_gpio_init();
 	omap_gpio_init();
 }
 }

+ 1 - 0
arch/arm/mach-omap2/clock24xx.c

@@ -769,6 +769,7 @@ int __init omap2_clk_init(void)
 		if (c->cpu & cpu_mask) {
 		if (c->cpu & cpu_mask) {
 			clkdev_add(&c->lk);
 			clkdev_add(&c->lk);
 			clk_register(c->lk.clk);
 			clk_register(c->lk.clk);
+			omap2_init_clk_clkdm(c->lk.clk);
 		}
 		}
 
 
 	/* Check the MPU rate set by bootloader */
 	/* Check the MPU rate set by bootloader */

+ 44 - 30
arch/arm/mach-omap2/clockdomain.c

@@ -137,6 +137,36 @@ static void _clkdm_del_autodeps(struct clockdomain *clkdm)
 	}
 	}
 }
 }
 
 
+/*
+ * _omap2_clkdm_set_hwsup - set the hwsup idle transition bit
+ * @clkdm: struct clockdomain *
+ * @enable: int 0 to disable, 1 to enable
+ *
+ * Internal helper for actually switching the bit that controls hwsup
+ * idle transitions for clkdm.
+ */
+static void _omap2_clkdm_set_hwsup(struct clockdomain *clkdm, int enable)
+{
+	u32 v;
+
+	if (cpu_is_omap24xx()) {
+		if (enable)
+			v = OMAP24XX_CLKSTCTRL_ENABLE_AUTO;
+		else
+			v = OMAP24XX_CLKSTCTRL_DISABLE_AUTO;
+	} else if (cpu_is_omap34xx()) {
+		if (enable)
+			v = OMAP34XX_CLKSTCTRL_ENABLE_AUTO;
+		else
+			v = OMAP34XX_CLKSTCTRL_DISABLE_AUTO;
+	} else {
+		BUG();
+	}
+
+	cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask,
+			    v << __ffs(clkdm->clktrctrl_mask),
+			    clkdm->pwrdm.ptr->prcm_offs, CM_CLKSTCTRL);
+}
 
 
 static struct clockdomain *_clkdm_lookup(const char *name)
 static struct clockdomain *_clkdm_lookup(const char *name)
 {
 {
@@ -456,8 +486,6 @@ int omap2_clkdm_wakeup(struct clockdomain *clkdm)
  */
  */
 void omap2_clkdm_allow_idle(struct clockdomain *clkdm)
 void omap2_clkdm_allow_idle(struct clockdomain *clkdm)
 {
 {
-	u32 v;
-
 	if (!clkdm)
 	if (!clkdm)
 		return;
 		return;
 
 
@@ -473,18 +501,7 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm)
 	if (atomic_read(&clkdm->usecount) > 0)
 	if (atomic_read(&clkdm->usecount) > 0)
 		_clkdm_add_autodeps(clkdm);
 		_clkdm_add_autodeps(clkdm);
 
 
-	if (cpu_is_omap24xx())
-		v = OMAP24XX_CLKSTCTRL_ENABLE_AUTO;
-	else if (cpu_is_omap34xx())
-		v = OMAP34XX_CLKSTCTRL_ENABLE_AUTO;
-	else
-		BUG();
-
-
-	cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask,
-			    v << __ffs(clkdm->clktrctrl_mask),
-			    clkdm->pwrdm.ptr->prcm_offs,
-			    CM_CLKSTCTRL);
+	_omap2_clkdm_set_hwsup(clkdm, 1);
 
 
 	pwrdm_clkdm_state_switch(clkdm);
 	pwrdm_clkdm_state_switch(clkdm);
 }
 }
@@ -500,8 +517,6 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm)
  */
  */
 void omap2_clkdm_deny_idle(struct clockdomain *clkdm)
 void omap2_clkdm_deny_idle(struct clockdomain *clkdm)
 {
 {
-	u32 v;
-
 	if (!clkdm)
 	if (!clkdm)
 		return;
 		return;
 
 
@@ -514,16 +529,7 @@ void omap2_clkdm_deny_idle(struct clockdomain *clkdm)
 	pr_debug("clockdomain: disabling automatic idle transitions for %s\n",
 	pr_debug("clockdomain: disabling automatic idle transitions for %s\n",
 		 clkdm->name);
 		 clkdm->name);
 
 
-	if (cpu_is_omap24xx())
-		v = OMAP24XX_CLKSTCTRL_DISABLE_AUTO;
-	else if (cpu_is_omap34xx())
-		v = OMAP34XX_CLKSTCTRL_DISABLE_AUTO;
-	else
-		BUG();
-
-	cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask,
-			    v << __ffs(clkdm->clktrctrl_mask),
-			    clkdm->pwrdm.ptr->prcm_offs, CM_CLKSTCTRL);
+	_omap2_clkdm_set_hwsup(clkdm, 0);
 
 
 	if (atomic_read(&clkdm->usecount) > 0)
 	if (atomic_read(&clkdm->usecount) > 0)
 		_clkdm_del_autodeps(clkdm);
 		_clkdm_del_autodeps(clkdm);
@@ -569,10 +575,14 @@ int omap2_clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
 	v = omap2_clkdm_clktrctrl_read(clkdm);
 	v = omap2_clkdm_clktrctrl_read(clkdm);
 
 
 	if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ||
 	if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ||
-	    (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO))
+	    (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) {
+		/* Disable HW transitions when we are changing deps */
+		_omap2_clkdm_set_hwsup(clkdm, 0);
 		_clkdm_add_autodeps(clkdm);
 		_clkdm_add_autodeps(clkdm);
-	else
+		_omap2_clkdm_set_hwsup(clkdm, 1);
+	} else {
 		omap2_clkdm_wakeup(clkdm);
 		omap2_clkdm_wakeup(clkdm);
+	}
 
 
 	pwrdm_wait_transition(clkdm->pwrdm.ptr);
 	pwrdm_wait_transition(clkdm->pwrdm.ptr);
 	pwrdm_clkdm_state_switch(clkdm);
 	pwrdm_clkdm_state_switch(clkdm);
@@ -623,10 +633,14 @@ int omap2_clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
 	v = omap2_clkdm_clktrctrl_read(clkdm);
 	v = omap2_clkdm_clktrctrl_read(clkdm);
 
 
 	if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ||
 	if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ||
-	    (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO))
+	    (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) {
+		/* Disable HW transitions when we are changing deps */
+		_omap2_clkdm_set_hwsup(clkdm, 0);
 		_clkdm_del_autodeps(clkdm);
 		_clkdm_del_autodeps(clkdm);
-	else
+		_omap2_clkdm_set_hwsup(clkdm, 1);
+	} else {
 		omap2_clkdm_sleep(clkdm);
 		omap2_clkdm_sleep(clkdm);
+	}
 
 
 	pwrdm_clkdm_state_switch(clkdm);
 	pwrdm_clkdm_state_switch(clkdm);
 
 

+ 1 - 1
arch/arm/mach-pxa/cpufreq-pxa2xx.c

@@ -155,7 +155,7 @@ MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table
 
 
 static pxa_freqs_t pxa27x_freqs[] = {
 static pxa_freqs_t pxa27x_freqs[] = {
 	{104000, 104000, PXA27x_CCCR(1,	 8, 2), 0, CCLKCFG2(1, 0, 1),  900000, 1705000 },
 	{104000, 104000, PXA27x_CCCR(1,	 8, 2), 0, CCLKCFG2(1, 0, 1),  900000, 1705000 },
-	{156000, 104000, PXA27x_CCCR(1,	 8, 6), 0, CCLKCFG2(1, 1, 1), 1000000, 1705000 },
+	{156000, 104000, PXA27x_CCCR(1,	 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
 	{208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
 	{208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
 	{312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
 	{312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
 	{416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
 	{416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },

+ 1 - 1
arch/arm/mach-pxa/csb726.c

@@ -238,7 +238,7 @@ static struct resource csb726_lan_resources[] = {
 };
 };
 
 
 struct smsc911x_platform_config csb726_lan_config = {
 struct smsc911x_platform_config csb726_lan_config = {
-	.irq_type	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
 	.irq_type	= SMSC911X_IRQ_TYPE_PUSH_PULL,
 	.irq_type	= SMSC911X_IRQ_TYPE_PUSH_PULL,
 	.flags		= SMSC911X_USE_32BIT,
 	.flags		= SMSC911X_USE_32BIT,
 	.phy_interface	= PHY_INTERFACE_MODE_MII,
 	.phy_interface	= PHY_INTERFACE_MODE_MII,

+ 1 - 0
arch/arm/mach-sa1100/Makefile

@@ -25,6 +25,7 @@ led-$(CONFIG_SA1100_CERF)		+= leds-cerf.o
 
 
 obj-$(CONFIG_SA1100_COLLIE)		+= collie.o
 obj-$(CONFIG_SA1100_COLLIE)		+= collie.o
 
 
+obj-$(CONFIG_SA1100_H3100)		+= h3600.o
 obj-$(CONFIG_SA1100_H3600)		+= h3600.o
 obj-$(CONFIG_SA1100_H3600)		+= h3600.o
 
 
 obj-$(CONFIG_SA1100_HACKKIT)		+= hackkit.o
 obj-$(CONFIG_SA1100_HACKKIT)		+= hackkit.o

+ 18 - 2
arch/arm/mm/cache-v6.S

@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
+#include <asm/unwind.h>
 
 
 #include "proc-macros.S"
 #include "proc-macros.S"
 
 
@@ -121,11 +122,13 @@ ENTRY(v6_coherent_kern_range)
  *	- the Icache does not read data from the write buffer
  *	- the Icache does not read data from the write buffer
  */
  */
 ENTRY(v6_coherent_user_range)
 ENTRY(v6_coherent_user_range)
-
+ UNWIND(.fnstart		)
 #ifdef HARVARD_CACHE
 #ifdef HARVARD_CACHE
 	bic	r0, r0, #CACHE_LINE_SIZE - 1
 	bic	r0, r0, #CACHE_LINE_SIZE - 1
-1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D line
+1:
+ USER(	mcr	p15, 0, r0, c7, c10, 1	)	@ clean D line
 	add	r0, r0, #CACHE_LINE_SIZE
 	add	r0, r0, #CACHE_LINE_SIZE
+2:
 	cmp	r0, r1
 	cmp	r0, r1
 	blo	1b
 	blo	1b
 #endif
 #endif
@@ -142,6 +145,19 @@ ENTRY(v6_coherent_user_range)
 #endif
 #endif
 	mov	pc, lr
 	mov	pc, lr
 
 
+/*
+ * Fault handling for the cache operation above. If the virtual address in r0
+ * isn't mapped, just try the next page.
+ */
+9001:
+	mov	r0, r0, lsr #12
+	mov	r0, r0, lsl #12
+	add	r0, r0, #4096
+	b	2b
+ UNWIND(.fnend		)
+ENDPROC(v6_coherent_user_range)
+ENDPROC(v6_coherent_kern_range)
+
 /*
 /*
  *	v6_flush_kern_dcache_page(kaddr)
  *	v6_flush_kern_dcache_page(kaddr)
  *
  *

+ 17 - 2
arch/arm/mm/cache-v7.S

@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
+#include <asm/unwind.h>
 
 
 #include "proc-macros.S"
 #include "proc-macros.S"
 
 
@@ -153,13 +154,16 @@ ENTRY(v7_coherent_kern_range)
  *	- the Icache does not read data from the write buffer
  *	- the Icache does not read data from the write buffer
  */
  */
 ENTRY(v7_coherent_user_range)
 ENTRY(v7_coherent_user_range)
+ UNWIND(.fnstart		)
 	dcache_line_size r2, r3
 	dcache_line_size r2, r3
 	sub	r3, r2, #1
 	sub	r3, r2, #1
 	bic	r0, r0, r3
 	bic	r0, r0, r3
-1:	mcr	p15, 0, r0, c7, c11, 1		@ clean D line to the point of unification
+1:
+ USER(	mcr	p15, 0, r0, c7, c11, 1	)	@ clean D line to the point of unification
 	dsb
 	dsb
-	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I line
+ USER(	mcr	p15, 0, r0, c7, c5, 1	)	@ invalidate I line
 	add	r0, r0, r2
 	add	r0, r0, r2
+2:
 	cmp	r0, r1
 	cmp	r0, r1
 	blo	1b
 	blo	1b
 	mov	r0, #0
 	mov	r0, #0
@@ -167,6 +171,17 @@ ENTRY(v7_coherent_user_range)
 	dsb
 	dsb
 	isb
 	isb
 	mov	pc, lr
 	mov	pc, lr
+
+/*
+ * Fault handling for the cache operation above. If the virtual address in r0
+ * isn't mapped, just try the next page.
+ */
+9001:
+	mov	r0, r0, lsr #12
+	mov	r0, r0, lsl #12
+	add	r0, r0, #4096
+	b	2b
+ UNWIND(.fnend		)
 ENDPROC(v7_coherent_kern_range)
 ENDPROC(v7_coherent_kern_range)
 ENDPROC(v7_coherent_user_range)
 ENDPROC(v7_coherent_user_range)
 
 

+ 3 - 6
arch/arm/mm/fault-armv.c

@@ -153,14 +153,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 
 
 	page = pfn_to_page(pfn);
 	page = pfn_to_page(pfn);
 	mapping = page_mapping(page);
 	mapping = page_mapping(page);
-	if (mapping) {
 #ifndef CONFIG_SMP
 #ifndef CONFIG_SMP
-		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
-
-		if (dirty)
-			__flush_dcache_page(mapping, page);
+	if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+		__flush_dcache_page(mapping, page);
 #endif
 #endif
-
+	if (mapping) {
 		if (cache_is_vivt())
 		if (cache_is_vivt())
 			make_coherent(mapping, vma, addr, pfn);
 			make_coherent(mapping, vma, addr, pfn);
 		else if (vma->vm_flags & VM_EXEC)
 		else if (vma->vm_flags & VM_EXEC)

+ 5 - 0
arch/arm/mm/fault.c

@@ -292,6 +292,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 		 * down_read()
 		 * down_read()
 		 */
 		 */
 		might_sleep();
 		might_sleep();
+#ifdef CONFIG_DEBUG_VM
+		if (!user_mode(regs) &&
+		    !search_exception_tables(regs->ARM_pc))
+			goto no_context;
+#endif
 	}
 	}
 
 
 	fault = __do_page_fault(mm, addr, fsr, tsk);
 	fault = __do_page_fault(mm, addr, fsr, tsk);

+ 2 - 0
arch/arm/mm/highmem.c

@@ -46,6 +46,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
 	if (!PageHighMem(page))
 	if (!PageHighMem(page))
 		return page_address(page);
 		return page_address(page);
 
 
+	debug_kmap_atomic(type);
+
 	kmap = kmap_high_get(page);
 	kmap = kmap_high_get(page);
 	if (kmap)
 	if (kmap)
 		return kmap;
 		return kmap;

+ 1 - 1
arch/arm/mm/init.c

@@ -483,7 +483,7 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
 	/*
 	/*
 	 * Convert start_pfn/end_pfn to a struct page pointer.
 	 * Convert start_pfn/end_pfn to a struct page pointer.
 	 */
 	 */
-	start_pg = pfn_to_page(start_pfn);
+	start_pg = pfn_to_page(start_pfn - 1) + 1;
 	end_pg = pfn_to_page(end_pfn);
 	end_pg = pfn_to_page(end_pfn);
 
 
 	/*
 	/*

+ 9 - 6
arch/arm/plat-omap/dma.c

@@ -829,10 +829,10 @@ EXPORT_SYMBOL(omap_free_dma);
  *
  *
  * @param arb_rate
  * @param arb_rate
  * @param max_fifo_depth
  * @param max_fifo_depth
- * @param tparams - Number of thereads to reserve : DMA_THREAD_RESERVE_NORM
- * 						    DMA_THREAD_RESERVE_ONET
- * 						    DMA_THREAD_RESERVE_TWOT
- * 						    DMA_THREAD_RESERVE_THREET
+ * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
+ * 						   DMA_THREAD_RESERVE_ONET
+ * 						   DMA_THREAD_RESERVE_TWOT
+ * 						   DMA_THREAD_RESERVE_THREET
  */
  */
 void
 void
 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
@@ -844,11 +844,14 @@ omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
 		return;
 		return;
 	}
 	}
 
 
+	if (max_fifo_depth == 0)
+		max_fifo_depth = 1;
 	if (arb_rate == 0)
 	if (arb_rate == 0)
 		arb_rate = 1;
 		arb_rate = 1;
 
 
-	reg = (arb_rate & 0xff) << 16;
-	reg |= (0xff & max_fifo_depth);
+	reg = 0xff & max_fifo_depth;
+	reg |= (0x3 & tparams) << 12;
+	reg |= (arb_rate & 0xff) << 16;
 
 
 	dma_write(reg, GCR);
 	dma_write(reg, GCR);
 }
 }

+ 1 - 1
arch/arm/plat-omap/mcbsp.c

@@ -595,7 +595,7 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx)
 	rx &= 1;
 	rx &= 1;
 	if (cpu_is_omap2430() || cpu_is_omap34xx()) {
 	if (cpu_is_omap2430() || cpu_is_omap34xx()) {
 		w = OMAP_MCBSP_READ(io_base, RCCR);
 		w = OMAP_MCBSP_READ(io_base, RCCR);
-		w |= (tx ? RDISABLE : 0);
+		w |= (rx ? RDISABLE : 0);
 		OMAP_MCBSP_WRITE(io_base, RCCR, w);
 		OMAP_MCBSP_WRITE(io_base, RCCR, w);
 	}
 	}
 	w = OMAP_MCBSP_READ(io_base, SPCR1);
 	w = OMAP_MCBSP_READ(io_base, SPCR1);

+ 5 - 5
arch/powerpc/include/asm/firmware.h

@@ -37,7 +37,7 @@
 #define FW_FEATURE_VIO		ASM_CONST(0x0000000000004000)
 #define FW_FEATURE_VIO		ASM_CONST(0x0000000000004000)
 #define FW_FEATURE_RDMA		ASM_CONST(0x0000000000008000)
 #define FW_FEATURE_RDMA		ASM_CONST(0x0000000000008000)
 #define FW_FEATURE_LLAN		ASM_CONST(0x0000000000010000)
 #define FW_FEATURE_LLAN		ASM_CONST(0x0000000000010000)
-#define FW_FEATURE_BULK		ASM_CONST(0x0000000000020000)
+#define FW_FEATURE_BULK_REMOVE	ASM_CONST(0x0000000000020000)
 #define FW_FEATURE_XDABR	ASM_CONST(0x0000000000040000)
 #define FW_FEATURE_XDABR	ASM_CONST(0x0000000000040000)
 #define FW_FEATURE_MULTITCE	ASM_CONST(0x0000000000080000)
 #define FW_FEATURE_MULTITCE	ASM_CONST(0x0000000000080000)
 #define FW_FEATURE_SPLPAR	ASM_CONST(0x0000000000100000)
 #define FW_FEATURE_SPLPAR	ASM_CONST(0x0000000000100000)
@@ -45,8 +45,7 @@
 #define FW_FEATURE_LPAR		ASM_CONST(0x0000000000400000)
 #define FW_FEATURE_LPAR		ASM_CONST(0x0000000000400000)
 #define FW_FEATURE_PS3_LV1	ASM_CONST(0x0000000000800000)
 #define FW_FEATURE_PS3_LV1	ASM_CONST(0x0000000000800000)
 #define FW_FEATURE_BEAT		ASM_CONST(0x0000000001000000)
 #define FW_FEATURE_BEAT		ASM_CONST(0x0000000001000000)
-#define FW_FEATURE_BULK_REMOVE	ASM_CONST(0x0000000002000000)
-#define FW_FEATURE_CMO		ASM_CONST(0x0000000004000000)
+#define FW_FEATURE_CMO		ASM_CONST(0x0000000002000000)
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
@@ -58,8 +57,9 @@ enum {
 		FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT |
 		FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT |
 		FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
 		FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
 		FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
 		FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
-		FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE |
-		FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | FW_FEATURE_CMO,
+		FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
+		FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
+		FW_FEATURE_CMO,
 	FW_FEATURE_PSERIES_ALWAYS = 0,
 	FW_FEATURE_PSERIES_ALWAYS = 0,
 	FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
 	FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
 	FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
 	FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,

+ 2 - 0
arch/powerpc/kernel/cputable.c

@@ -711,6 +711,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.cpu_setup		= __setup_cpu_750,
 		.cpu_setup		= __setup_cpu_750,
 		.machine_check		= machine_check_generic,
 		.machine_check		= machine_check_generic,
 		.platform		= "ppc750",
 		.platform		= "ppc750",
+		.oprofile_cpu_type      = "ppc/750",
+		.oprofile_type		= PPC_OPROFILE_G4,
 	},
 	},
 	{	/* 745/755 */
 	{	/* 745/755 */
 		.pvr_mask		= 0xfffff000,
 		.pvr_mask		= 0xfffff000,

+ 1 - 2
arch/powerpc/kernel/entry_64.S

@@ -1038,8 +1038,7 @@ _GLOBAL(mod_return_to_handler)
 	 * We are in a module using the module's TOC.
 	 * We are in a module using the module's TOC.
 	 * Switch to our TOC to run inside the core kernel.
 	 * Switch to our TOC to run inside the core kernel.
 	 */
 	 */
-	LOAD_REG_IMMEDIATE(r4,ftrace_return_to_handler)
-	ld	r2, 8(r4)
+	ld	r2, PACATOC(r13)
 
 
 	bl	.ftrace_return_to_handler
 	bl	.ftrace_return_to_handler
 	nop
 	nop

+ 0 - 6
arch/powerpc/kernel/kgdb.c

@@ -282,12 +282,6 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 {
 {
 	unsigned long *ptr = gdb_regs;
 	unsigned long *ptr = gdb_regs;
 	int reg;
 	int reg;
-#ifdef CONFIG_SPE
-	union {
-		u32 v32[2];
-		u64 v64;
-	} acc;
-#endif
 
 
 	for (reg = 0; reg < 32; reg++)
 	for (reg = 0; reg < 32; reg++)
 		UNPACK64(regs->gpr[reg], ptr);
 		UNPACK64(regs->gpr[reg], ptr);

+ 1 - 1
arch/powerpc/kernel/pci-common.c

@@ -1190,7 +1190,7 @@ EXPORT_SYMBOL(pcibios_align_resource);
  * Reparent resource children of pr that conflict with res
  * Reparent resource children of pr that conflict with res
  * under res, and make res replace those children.
  * under res, and make res replace those children.
  */
  */
-static int __init reparent_resources(struct resource *parent,
+static int reparent_resources(struct resource *parent,
 				     struct resource *res)
 				     struct resource *res)
 {
 {
 	struct resource *p, **pp;
 	struct resource *p, **pp;

+ 7 - 3
arch/powerpc/kernel/process.c

@@ -1016,9 +1016,13 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	int curr_frame = current->curr_ret_stack;
 	int curr_frame = current->curr_ret_stack;
 	extern void return_to_handler(void);
 	extern void return_to_handler(void);
-	unsigned long addr = (unsigned long)return_to_handler;
+	unsigned long rth = (unsigned long)return_to_handler;
+	unsigned long mrth = -1;
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
-	addr = *(unsigned long*)addr;
+	extern void mod_return_to_handler(void);
+	rth = *(unsigned long *)rth;
+	mrth = (unsigned long)mod_return_to_handler;
+	mrth = *(unsigned long *)mrth;
 #endif
 #endif
 #endif
 #endif
 
 
@@ -1044,7 +1048,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
 		if (!firstframe || ip != lr) {
 		if (!firstframe || ip != lr) {
 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-			if (ip == addr && curr_frame >= 0) {
+			if ((ip == rth || ip == mrth) && curr_frame >= 0) {
 				printk(" (%pS)",
 				printk(" (%pS)",
 				       (void *)current->ret_stack[curr_frame].ret);
 				       (void *)current->ret_stack[curr_frame].ret);
 				curr_frame--;
 				curr_frame--;

+ 1 - 0
arch/powerpc/kernel/vmlinux.lds.S

@@ -236,6 +236,7 @@ SECTIONS
 		READ_MOSTLY_DATA(L1_CACHE_BYTES)
 		READ_MOSTLY_DATA(L1_CACHE_BYTES)
 	}
 	}
 
 
+	. = ALIGN(PAGE_SIZE);
 	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
 	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
 		NOSAVE_DATA
 		NOSAVE_DATA
 	}
 	}

+ 4 - 6
arch/powerpc/mm/slb_low.S

@@ -72,19 +72,17 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
 1:
 1:
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
 
-	/* vmalloc/ioremap mapping encoding bits, the "li" instructions below
-	 * will be patched by the kernel at boot
+	/* vmalloc mapping gets the encoding from the PACA as the mapping
+	 * can be demoted from 64K -> 4K dynamically on some machines
 	 */
 	 */
-BEGIN_FTR_SECTION
-	/* check whether this is in vmalloc or ioremap space */
 	clrldi	r11,r10,48
 	clrldi	r11,r10,48
 	cmpldi	r11,(VMALLOC_SIZE >> 28) - 1
 	cmpldi	r11,(VMALLOC_SIZE >> 28) - 1
 	bgt	5f
 	bgt	5f
 	lhz	r11,PACAVMALLOCSLLP(r13)
 	lhz	r11,PACAVMALLOCSLLP(r13)
 	b	6f
 	b	6f
 5:
 5:
-END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
-_GLOBAL(slb_miss_kernel_load_io)
+	/* IO mapping */
+	_GLOBAL(slb_miss_kernel_load_io)
 	li	r11,0
 	li	r11,0
 6:
 6:
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION

+ 1 - 1
arch/powerpc/platforms/cell/axon_msi.c

@@ -365,7 +365,7 @@ static int axon_msi_probe(struct of_device *device,
 		printk(KERN_ERR
 		printk(KERN_ERR
 		       "axon_msi: couldn't parse dcr properties on %s\n",
 		       "axon_msi: couldn't parse dcr properties on %s\n",
 			dn->full_name);
 			dn->full_name);
-		goto out;
+		goto out_free_msic;
 	}
 	}
 
 
 	msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
 	msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);

+ 5 - 2
arch/powerpc/platforms/powermac/low_i2c.c

@@ -540,8 +540,11 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
 	/* Make sure IRQ is disabled */
 	/* Make sure IRQ is disabled */
 	kw_write_reg(reg_ier, 0);
 	kw_write_reg(reg_ier, 0);
 
 
-	/* Request chip interrupt */
-	if (request_irq(host->irq, kw_i2c_irq, 0, "keywest i2c", host))
+	/* Request chip interrupt. We set IRQF_TIMER because we don't
+	 * want that interrupt disabled between the 2 passes of driver
+	 * suspend or we'll have issues running the pfuncs
+	 */
+	if (request_irq(host->irq, kw_i2c_irq, IRQF_TIMER, "keywest i2c", host))
 		host->irq = NO_IRQ;
 		host->irq = NO_IRQ;
 
 
 	printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
 	printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",

+ 1 - 2
arch/powerpc/platforms/pseries/firmware.c

@@ -51,11 +51,10 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
 	{FW_FEATURE_VIO,		"hcall-vio"},
 	{FW_FEATURE_VIO,		"hcall-vio"},
 	{FW_FEATURE_RDMA,		"hcall-rdma"},
 	{FW_FEATURE_RDMA,		"hcall-rdma"},
 	{FW_FEATURE_LLAN,		"hcall-lLAN"},
 	{FW_FEATURE_LLAN,		"hcall-lLAN"},
-	{FW_FEATURE_BULK,		"hcall-bulk"},
+	{FW_FEATURE_BULK_REMOVE,	"hcall-bulk"},
 	{FW_FEATURE_XDABR,		"hcall-xdabr"},
 	{FW_FEATURE_XDABR,		"hcall-xdabr"},
 	{FW_FEATURE_MULTITCE,		"hcall-multi-tce"},
 	{FW_FEATURE_MULTITCE,		"hcall-multi-tce"},
 	{FW_FEATURE_SPLPAR,		"hcall-splpar"},
 	{FW_FEATURE_SPLPAR,		"hcall-splpar"},
-	{FW_FEATURE_BULK_REMOVE,	"hcall-bulk"},
 };
 };
 
 
 /* Build up the firmware features bitmask using the contents of
 /* Build up the firmware features bitmask using the contents of

+ 1 - 1
arch/s390/hypfs/hypfs_diag.c

@@ -438,7 +438,7 @@ static int diag204_probe(void)
 		}
 		}
 		if (diag204((unsigned long)SUBC_STIB6 |
 		if (diag204((unsigned long)SUBC_STIB6 |
 			    (unsigned long)INFO_EXT, pages, buf) >= 0) {
 			    (unsigned long)INFO_EXT, pages, buf) >= 0) {
-			diag204_store_sc = SUBC_STIB7;
+			diag204_store_sc = SUBC_STIB6;
 			diag204_info_type = INFO_EXT;
 			diag204_info_type = INFO_EXT;
 			goto out;
 			goto out;
 		}
 		}

+ 3 - 3
arch/s390/kernel/processor.c

@@ -31,9 +31,9 @@ void __cpuinit print_cpu_info(void)
 
 
 static int show_cpuinfo(struct seq_file *m, void *v)
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
 {
-	static const char *hwcap_str[9] = {
+	static const char *hwcap_str[10] = {
 		"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
 		"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
-		"edat", "etf3eh"
+		"edat", "etf3eh", "highgprs"
 	};
 	};
 	struct _lowcore *lc;
 	struct _lowcore *lc;
 	unsigned long n = (unsigned long) v - 1;
 	unsigned long n = (unsigned long) v - 1;
@@ -48,7 +48,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 			   num_online_cpus(), loops_per_jiffy/(500000/HZ),
 			   num_online_cpus(), loops_per_jiffy/(500000/HZ),
 			   (loops_per_jiffy/(5000/HZ))%100);
 			   (loops_per_jiffy/(5000/HZ))%100);
 		seq_puts(m, "features\t: ");
 		seq_puts(m, "features\t: ");
-		for (i = 0; i < 9; i++)
+		for (i = 0; i < 10; i++)
 			if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
 			if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
 				seq_printf(m, "%s ", hwcap_str[i]);
 				seq_printf(m, "%s ", hwcap_str[i]);
 		seq_puts(m, "\n");
 		seq_puts(m, "\n");

+ 1 - 1
arch/sh/kernel/entry-common.S

@@ -121,7 +121,7 @@ noresched:
 ENTRY(resume_userspace)
 ENTRY(resume_userspace)
 	! r8: current_thread_info
 	! r8: current_thread_info
 	cli
 	cli
-	TRACE_IRQS_OfF
+	TRACE_IRQS_OFF
 	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
 	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
 	tst	#(_TIF_WORK_MASK & 0xff), r0
 	tst	#(_TIF_WORK_MASK & 0xff), r0
 	bt/s	__restore_all
 	bt/s	__restore_all

+ 27 - 10
arch/sh/kernel/ftrace.c

@@ -291,31 +291,48 @@ struct syscall_metadata *syscall_nr_to_meta(int nr)
 	return syscalls_metadata[nr];
 	return syscalls_metadata[nr];
 }
 }
 
 
-void arch_init_ftrace_syscalls(void)
+int syscall_name_to_nr(char *name)
+{
+	int i;
+
+	if (!syscalls_metadata)
+		return -1;
+	for (i = 0; i < NR_syscalls; i++)
+		if (syscalls_metadata[i])
+			if (!strcmp(syscalls_metadata[i]->name, name))
+				return i;
+	return -1;
+}
+
+void set_syscall_enter_id(int num, int id)
+{
+	syscalls_metadata[num]->enter_id = id;
+}
+
+void set_syscall_exit_id(int num, int id)
+{
+	syscalls_metadata[num]->exit_id = id;
+}
+
+static int __init arch_init_ftrace_syscalls(void)
 {
 {
 	int i;
 	int i;
 	struct syscall_metadata *meta;
 	struct syscall_metadata *meta;
 	unsigned long **psys_syscall_table = &sys_call_table;
 	unsigned long **psys_syscall_table = &sys_call_table;
-	static atomic_t refs;
-
-	if (atomic_inc_return(&refs) != 1)
-		goto end;
 
 
 	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
 	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
 					FTRACE_SYSCALL_MAX, GFP_KERNEL);
 					FTRACE_SYSCALL_MAX, GFP_KERNEL);
 	if (!syscalls_metadata) {
 	if (!syscalls_metadata) {
 		WARN_ON(1);
 		WARN_ON(1);
-		return;
+		return -ENOMEM;
 	}
 	}
 
 
 	for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
 	for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
 		meta = find_syscall_meta(psys_syscall_table[i]);
 		meta = find_syscall_meta(psys_syscall_table[i]);
 		syscalls_metadata[i] = meta;
 		syscalls_metadata[i] = meta;
 	}
 	}
-	return;
 
 
-	/* Paranoid: avoid overflow */
-end:
-	atomic_dec(&refs);
+	return 0;
 }
 }
+arch_initcall(arch_init_ftrace_syscalls);
 #endif /* CONFIG_FTRACE_SYSCALLS */
 #endif /* CONFIG_FTRACE_SYSCALLS */

+ 2 - 0
arch/sh/kernel/setup.c

@@ -549,6 +549,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 
 
 	if (cpu == 0)
 	if (cpu == 0)
 		seq_printf(m, "machine\t\t: %s\n", get_system_type());
 		seq_printf(m, "machine\t\t: %s\n", get_system_type());
+	else
+		seq_printf(m, "\n");
 
 
 	seq_printf(m, "processor\t: %d\n", cpu);
 	seq_printf(m, "processor\t: %d\n", cpu);
 	seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
 	seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);

+ 4 - 5
arch/sh/kernel/signal_32.c

@@ -145,7 +145,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
 {
 {
 	struct task_struct *tsk = current;
 	struct task_struct *tsk = current;
 
 
-	if (!(current_cpu_data.flags & CPU_HAS_FPU))
+	if (!(boot_cpu_data.flags & CPU_HAS_FPU))
 		return 0;
 		return 0;
 
 
 	set_used_math();
 	set_used_math();
@@ -158,7 +158,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
 {
 {
 	struct task_struct *tsk = current;
 	struct task_struct *tsk = current;
 
 
-	if (!(current_cpu_data.flags & CPU_HAS_FPU))
+	if (!(boot_cpu_data.flags & CPU_HAS_FPU))
 		return 0;
 		return 0;
 
 
 	if (!used_math()) {
 	if (!used_math()) {
@@ -199,7 +199,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
 #undef COPY
 #undef COPY
 
 
 #ifdef CONFIG_SH_FPU
 #ifdef CONFIG_SH_FPU
-	if (current_cpu_data.flags & CPU_HAS_FPU) {
+	if (boot_cpu_data.flags & CPU_HAS_FPU) {
 		int owned_fp;
 		int owned_fp;
 		struct task_struct *tsk = current;
 		struct task_struct *tsk = current;
 
 
@@ -472,6 +472,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[6]);
 		err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
 		err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
 		regs->pr = (unsigned long) frame->retcode;
 		regs->pr = (unsigned long) frame->retcode;
+		flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
 	}
 	}
 
 
 	if (err)
 	if (err)
@@ -497,8 +498,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
 	pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
 		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
 		 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
 
 
-	flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
-
 	return 0;
 	return 0;
 
 
 give_sigsegv:
 give_sigsegv:

+ 2 - 0
arch/sh/kernel/smp.c

@@ -35,6 +35,8 @@ static inline void __init smp_store_cpu_info(unsigned int cpu)
 {
 {
 	struct sh_cpuinfo *c = cpu_data + cpu;
 	struct sh_cpuinfo *c = cpu_data + cpu;
 
 
+	memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
+
 	c->loops_per_jiffy = loops_per_jiffy;
 	c->loops_per_jiffy = loops_per_jiffy;
 }
 }
 
 

+ 4 - 3
arch/sh/kernel/traps_32.c

@@ -25,6 +25,7 @@
 #include <linux/kexec.h>
 #include <linux/kexec.h>
 #include <linux/limits.h>
 #include <linux/limits.h>
 #include <linux/proc_fs.h>
 #include <linux/proc_fs.h>
+#include <linux/sysfs.h>
 #include <asm/system.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <asm/fpu.h>
 #include <asm/fpu.h>
@@ -159,12 +160,12 @@ void die(const char * str, struct pt_regs * regs, long err)
 
 
 	oops_enter();
 	oops_enter();
 
 
-	console_verbose();
 	spin_lock_irq(&die_lock);
 	spin_lock_irq(&die_lock);
+	console_verbose();
 	bust_spinlocks(1);
 	bust_spinlocks(1);
 
 
 	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
 	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-
+	sysfs_printk_last_file();
 	print_modules();
 	print_modules();
 	show_regs(regs);
 	show_regs(regs);
 
 
@@ -180,6 +181,7 @@ void die(const char * str, struct pt_regs * regs, long err)
 	bust_spinlocks(0);
 	bust_spinlocks(0);
 	add_taint(TAINT_DIE);
 	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
 	spin_unlock_irq(&die_lock);
+	oops_exit();
 
 
 	if (kexec_should_crash(current))
 	if (kexec_should_crash(current))
 		crash_kexec(regs);
 		crash_kexec(regs);
@@ -190,7 +192,6 @@ void die(const char * str, struct pt_regs * regs, long err)
 	if (panic_on_oops)
 	if (panic_on_oops)
 		panic("Fatal exception");
 		panic("Fatal exception");
 
 
-	oops_exit();
 	do_exit(SIGSEGV);
 	do_exit(SIGSEGV);
 }
 }
 
 

+ 1 - 1
arch/sh/mm/cache.c

@@ -128,7 +128,7 @@ void __update_cache(struct vm_area_struct *vma,
 		return;
 		return;
 
 
 	page = pfn_to_page(pfn);
 	page = pfn_to_page(pfn);
-	if (pfn_valid(pfn) && page_mapping(page)) {
+	if (pfn_valid(pfn)) {
 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
 		if (dirty) {
 		if (dirty) {
 			unsigned long addr = (unsigned long)page_address(page);
 			unsigned long addr = (unsigned long)page_address(page);

+ 2 - 2
arch/sparc/kernel/ldc.c

@@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
 	snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
 	snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
 
 
 	err = request_irq(lp->cfg.rx_irq, ldc_rx,
 	err = request_irq(lp->cfg.rx_irq, ldc_rx,
-			  IRQF_SAMPLE_RANDOM | IRQF_SHARED,
+			  IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
 			  lp->rx_irq_name, lp);
 			  lp->rx_irq_name, lp);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
 	err = request_irq(lp->cfg.tx_irq, ldc_tx,
 	err = request_irq(lp->cfg.tx_irq, ldc_tx,
-			  IRQF_SAMPLE_RANDOM | IRQF_SHARED,
+			  IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
 			  lp->tx_irq_name, lp);
 			  lp->tx_irq_name, lp);
 	if (err) {
 	if (err) {
 		free_irq(lp->cfg.rx_irq, lp);
 		free_irq(lp->cfg.rx_irq, lp);

+ 1 - 1
arch/sparc/kernel/perf_event.c

@@ -437,7 +437,7 @@ static const struct sparc_pmu niagara2_pmu = {
 	.lower_shift	= 6,
 	.lower_shift	= 6,
 	.event_mask	= 0xfff,
 	.event_mask	= 0xfff,
 	.hv_bit		= 0x8,
 	.hv_bit		= 0x8,
-	.irq_bit	= 0x03,
+	.irq_bit	= 0x30,
 	.upper_nop	= 0x220,
 	.upper_nop	= 0x220,
 	.lower_nop	= 0x220,
 	.lower_nop	= 0x220,
 };
 };

+ 1 - 1
arch/sparc/mm/init_64.c

@@ -265,7 +265,7 @@ static void flush_dcache(unsigned long pfn)
 	struct page *page;
 	struct page *page;
 
 
 	page = pfn_to_page(pfn);
 	page = pfn_to_page(pfn);
-	if (page && page_mapping(page)) {
+	if (page) {
 		unsigned long pg_flags;
 		unsigned long pg_flags;
 
 
 		pg_flags = page->flags;
 		pg_flags = page->flags;

+ 10 - 1
arch/x86/Kconfig

@@ -491,7 +491,7 @@ if PARAVIRT_GUEST
 source "arch/x86/xen/Kconfig"
 source "arch/x86/xen/Kconfig"
 
 
 config VMI
 config VMI
-	bool "VMI Guest support"
+	bool "VMI Guest support (DEPRECATED)"
 	select PARAVIRT
 	select PARAVIRT
 	depends on X86_32
 	depends on X86_32
 	---help---
 	---help---
@@ -500,6 +500,15 @@ config VMI
 	  at the moment), by linking the kernel to a GPL-ed ROM module
 	  at the moment), by linking the kernel to a GPL-ed ROM module
 	  provided by the hypervisor.
 	  provided by the hypervisor.
 
 
+	  As of September 2009, VMware has started a phased retirement
+	  of this feature from VMware's products. Please see
+	  feature-removal-schedule.txt for details.  If you are
+	  planning to enable this option, please note that you cannot
+	  live migrate a VMI enabled VM to a future VMware product,
+	  which doesn't support VMI. So if you expect your kernel to
+	  seamlessly migrate to newer VMware products, keep this
+	  disabled.
+
 config KVM_CLOCK
 config KVM_CLOCK
 	bool "KVM paravirtualized clock"
 	bool "KVM paravirtualized clock"
 	select PARAVIRT
 	select PARAVIRT

+ 3 - 0
arch/x86/boot/setup.ld

@@ -53,6 +53,9 @@ SECTIONS
 
 
 	/DISCARD/ : { *(.note*) }
 	/DISCARD/ : { *(.note*) }
 
 
+	/*
+	 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
+	 */
 	. = ASSERT(_end <= 0x8000, "Setup too big!");
 	. = ASSERT(_end <= 0x8000, "Setup too big!");
 	. = ASSERT(hdr == 0x1f1, "The setup header has the wrong offset!");
 	. = ASSERT(hdr == 0x1f1, "The setup header has the wrong offset!");
 	/* Necessary for the very-old-loader check to work... */
 	/* Necessary for the very-old-loader check to work... */

+ 2 - 1
arch/x86/include/asm/mce.h

@@ -108,6 +108,8 @@ struct mce_log {
 #define K8_MCE_THRESHOLD_BANK_5    (MCE_THRESHOLD_BASE + 5 * 9)
 #define K8_MCE_THRESHOLD_BANK_5    (MCE_THRESHOLD_BASE + 5 * 9)
 #define K8_MCE_THRESHOLD_DRAM_ECC  (MCE_THRESHOLD_BANK_4 + 0)
 #define K8_MCE_THRESHOLD_DRAM_ECC  (MCE_THRESHOLD_BANK_4 + 0)
 
 
+extern struct atomic_notifier_head x86_mce_decoder_chain;
+
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
 #include <linux/percpu.h>
 #include <linux/percpu.h>
@@ -213,6 +215,5 @@ extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
 void intel_init_thermal(struct cpuinfo_x86 *c);
 void intel_init_thermal(struct cpuinfo_x86 *c);
 
 
 void mce_log_therm_throt_event(__u64 status);
 void mce_log_therm_throt_event(__u64 status);
-
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_X86_MCE_H */
 #endif /* _ASM_X86_MCE_H */

+ 4 - 24
arch/x86/include/asm/paravirt.h

@@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
 
 
 static inline unsigned long __raw_local_save_flags(void)
 static inline unsigned long __raw_local_save_flags(void)
 {
 {
-	unsigned long f;
-
-	asm volatile(paravirt_alt(PARAVIRT_CALL)
-		     : "=a"(f)
-		     : paravirt_type(pv_irq_ops.save_fl),
-		       paravirt_clobber(CLBR_EAX)
-		     : "memory", "cc");
-	return f;
+	return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
 }
 }
 
 
 static inline void raw_local_irq_restore(unsigned long f)
 static inline void raw_local_irq_restore(unsigned long f)
 {
 {
-	asm volatile(paravirt_alt(PARAVIRT_CALL)
-		     : "=a"(f)
-		     : PV_FLAGS_ARG(f),
-		       paravirt_type(pv_irq_ops.restore_fl),
-		       paravirt_clobber(CLBR_EAX)
-		     : "memory", "cc");
+	PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
 }
 }
 
 
 static inline void raw_local_irq_disable(void)
 static inline void raw_local_irq_disable(void)
 {
 {
-	asm volatile(paravirt_alt(PARAVIRT_CALL)
-		     :
-		     : paravirt_type(pv_irq_ops.irq_disable),
-		       paravirt_clobber(CLBR_EAX)
-		     : "memory", "eax", "cc");
+	PVOP_VCALLEE0(pv_irq_ops.irq_disable);
 }
 }
 
 
 static inline void raw_local_irq_enable(void)
 static inline void raw_local_irq_enable(void)
 {
 {
-	asm volatile(paravirt_alt(PARAVIRT_CALL)
-		     :
-		     : paravirt_type(pv_irq_ops.irq_enable),
-		       paravirt_clobber(CLBR_EAX)
-		     : "memory", "eax", "cc");
+	PVOP_VCALLEE0(pv_irq_ops.irq_enable);
 }
 }
 
 
 static inline unsigned long __raw_local_irq_save(void)
 static inline unsigned long __raw_local_irq_save(void)

+ 6 - 4
arch/x86/include/asm/paravirt_types.h

@@ -494,10 +494,11 @@ int paravirt_disable_iospace(void);
 #define EXTRA_CLOBBERS
 #define EXTRA_CLOBBERS
 #define VEXTRA_CLOBBERS
 #define VEXTRA_CLOBBERS
 #else  /* CONFIG_X86_64 */
 #else  /* CONFIG_X86_64 */
+/* [re]ax isn't an arg, but the return val */
 #define PVOP_VCALL_ARGS					\
 #define PVOP_VCALL_ARGS					\
 	unsigned long __edi = __edi, __esi = __esi,	\
 	unsigned long __edi = __edi, __esi = __esi,	\
-		__edx = __edx, __ecx = __ecx
-#define PVOP_CALL_ARGS		PVOP_VCALL_ARGS, __eax
+		__edx = __edx, __ecx = __ecx, __eax = __eax
+#define PVOP_CALL_ARGS		PVOP_VCALL_ARGS
 
 
 #define PVOP_CALL_ARG1(x)		"D" ((unsigned long)(x))
 #define PVOP_CALL_ARG1(x)		"D" ((unsigned long)(x))
 #define PVOP_CALL_ARG2(x)		"S" ((unsigned long)(x))
 #define PVOP_CALL_ARG2(x)		"S" ((unsigned long)(x))
@@ -509,6 +510,7 @@ int paravirt_disable_iospace(void);
 				"=c" (__ecx)
 				"=c" (__ecx)
 #define PVOP_CALL_CLOBBERS	PVOP_VCALL_CLOBBERS, "=a" (__eax)
 #define PVOP_CALL_CLOBBERS	PVOP_VCALL_CLOBBERS, "=a" (__eax)
 
 
+/* void functions are still allowed [re]ax for scratch */
 #define PVOP_VCALLEE_CLOBBERS	"=a" (__eax)
 #define PVOP_VCALLEE_CLOBBERS	"=a" (__eax)
 #define PVOP_CALLEE_CLOBBERS	PVOP_VCALLEE_CLOBBERS
 #define PVOP_CALLEE_CLOBBERS	PVOP_VCALLEE_CLOBBERS
 
 
@@ -583,8 +585,8 @@ int paravirt_disable_iospace(void);
 		       VEXTRA_CLOBBERS,					\
 		       VEXTRA_CLOBBERS,					\
 		       pre, post, ##__VA_ARGS__)
 		       pre, post, ##__VA_ARGS__)
 
 
-#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...)			\
-	____PVOP_CALL(rettype, op.func, CLBR_RET_REG,			\
+#define __PVOP_VCALLEESAVE(op, pre, post, ...)				\
+	____PVOP_VCALL(op.func, CLBR_RET_REG,				\
 		      PVOP_VCALLEE_CLOBBERS, ,				\
 		      PVOP_VCALLEE_CLOBBERS, ,				\
 		      pre, post, ##__VA_ARGS__)
 		      pre, post, ##__VA_ARGS__)
 
 

+ 3 - 0
arch/x86/kernel/acpi/realmode/wakeup.lds.S

@@ -57,5 +57,8 @@ SECTIONS
 		*(.note*)
 		*(.note*)
 	}
 	}
 
 
+	/*
+	 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
+	 */
 	. = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
 	. = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
 }
 }

+ 4 - 4
arch/x86/kernel/apic/x2apic_uv_x.c

@@ -352,14 +352,14 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
 
 
 	for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
 	for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
 		alias.v = uv_read_local_mmr(redir_addrs[i].alias);
 		alias.v = uv_read_local_mmr(redir_addrs[i].alias);
-		if (alias.s.base == 0) {
+		if (alias.s.enable && alias.s.base == 0) {
 			*size = (1UL << alias.s.m_alias);
 			*size = (1UL << alias.s.m_alias);
 			redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
 			redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
 			*base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
 			*base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
 			return;
 			return;
 		}
 		}
 	}
 	}
-	BUG();
+	*base = *size = 0;
 }
 }
 
 
 enum map_type {map_wb, map_uc};
 enum map_type {map_wb, map_uc};
@@ -619,12 +619,12 @@ void __init uv_system_init(void)
 		uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
 		uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
 		uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
 		uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
 		uv_cpu_hub_info(cpu)->m_val = m_val;
 		uv_cpu_hub_info(cpu)->m_val = m_val;
-		uv_cpu_hub_info(cpu)->n_val = m_val;
+		uv_cpu_hub_info(cpu)->n_val = n_val;
 		uv_cpu_hub_info(cpu)->numa_blade_id = blade;
 		uv_cpu_hub_info(cpu)->numa_blade_id = blade;
 		uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
 		uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
 		uv_cpu_hub_info(cpu)->pnode = pnode;
 		uv_cpu_hub_info(cpu)->pnode = pnode;
 		uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
 		uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
-		uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
+		uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
 		uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
 		uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
 		uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
 		uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
 		uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
 		uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;

+ 22 - 10
arch/x86/kernel/cpu/mcheck/mce.c

@@ -88,18 +88,26 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
 static DEFINE_PER_CPU(struct mce, mces_seen);
 static DEFINE_PER_CPU(struct mce, mces_seen);
 static int			cpu_missing;
 static int			cpu_missing;
 
 
-static void default_decode_mce(struct mce *m)
+/*
+ * CPU/chipset specific EDAC code can register a notifier call here to print
+ * MCE errors in a human-readable form.
+ */
+ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
+
+static int default_decode_mce(struct notifier_block *nb, unsigned long val,
+			       void *data)
 {
 {
 	pr_emerg("No human readable MCE decoding support on this CPU type.\n");
 	pr_emerg("No human readable MCE decoding support on this CPU type.\n");
 	pr_emerg("Run the message through 'mcelog --ascii' to decode.\n");
 	pr_emerg("Run the message through 'mcelog --ascii' to decode.\n");
+
+	return NOTIFY_STOP;
 }
 }
 
 
-/*
- * CPU/chipset specific EDAC code can register a callback here to print
- * MCE errors in a human-readable form:
- */
-void (*x86_mce_decode_callback)(struct mce *m) = default_decode_mce;
-EXPORT_SYMBOL(x86_mce_decode_callback);
+static struct notifier_block mce_dec_nb = {
+	.notifier_call = default_decode_mce,
+	.priority      = -1,
+};
 
 
 /* MCA banks polled by the period polling timer for corrected events */
 /* MCA banks polled by the period polling timer for corrected events */
 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
@@ -210,9 +218,9 @@ static void print_mce(struct mce *m)
 
 
 	/*
 	/*
 	 * Print out human-readable details about the MCE error,
 	 * Print out human-readable details about the MCE error,
-	 * (if the CPU has an implementation for that):
+	 * (if the CPU has an implementation for that)
 	 */
 	 */
-	x86_mce_decode_callback(m);
+	atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
 }
 }
 
 
 static void print_mce_head(void)
 static void print_mce_head(void)
@@ -1220,7 +1228,8 @@ static int __cpuinit mce_cap_init(void)
 	rdmsrl(MSR_IA32_MCG_CAP, cap);
 	rdmsrl(MSR_IA32_MCG_CAP, cap);
 
 
 	b = cap & MCG_BANKCNT_MASK;
 	b = cap & MCG_BANKCNT_MASK;
-	printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
+	if (!banks)
+		printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
 
 
 	if (b > MAX_NR_BANKS) {
 	if (b > MAX_NR_BANKS) {
 		printk(KERN_WARNING
 		printk(KERN_WARNING
@@ -1426,6 +1435,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
 	mce_cpu_features(c);
 	mce_cpu_features(c);
 	mce_init_timer();
 	mce_init_timer();
 	INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
 	INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
+
+	if (raw_smp_processor_id() == 0)
+		atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb);
 }
 }
 
 
 /*
 /*

+ 2 - 4
arch/x86/kernel/irq.c

@@ -63,10 +63,10 @@ static int show_other_interrupts(struct seq_file *p, int prec)
 	for_each_online_cpu(j)
 	for_each_online_cpu(j)
 		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
 		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
 	seq_printf(p, "  Spurious interrupts\n");
 	seq_printf(p, "  Spurious interrupts\n");
-	seq_printf(p, "%*s: ", prec, "CNT");
+	seq_printf(p, "%*s: ", prec, "PMI");
 	for_each_online_cpu(j)
 	for_each_online_cpu(j)
 		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
 		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
-	seq_printf(p, "  Performance counter interrupts\n");
+	seq_printf(p, "  Performance monitoring interrupts\n");
 	seq_printf(p, "%*s: ", prec, "PND");
 	seq_printf(p, "%*s: ", prec, "PND");
 	for_each_online_cpu(j)
 	for_each_online_cpu(j)
 		seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
 		seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
@@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
 				__func__, smp_processor_id(), vector, irq);
 				__func__, smp_processor_id(), vector, irq);
 	}
 	}
 
 
-	run_local_timers();
 	irq_exit();
 	irq_exit();
 
 
 	set_irq_regs(old_regs);
 	set_irq_regs(old_regs);
@@ -269,7 +268,6 @@ void smp_generic_interrupt(struct pt_regs *regs)
 	if (generic_interrupt_extension)
 	if (generic_interrupt_extension)
 		generic_interrupt_extension();
 		generic_interrupt_extension();
 
 
-	run_local_timers();
 	irq_exit();
 	irq_exit();
 
 
 	set_irq_regs(old_regs);
 	set_irq_regs(old_regs);

+ 1 - 1
arch/x86/kernel/pci-dma.c

@@ -311,7 +311,7 @@ void pci_iommu_shutdown(void)
 	amd_iommu_shutdown();
 	amd_iommu_shutdown();
 }
 }
 /* Must execute after PCI subsystem */
 /* Must execute after PCI subsystem */
-fs_initcall(pci_iommu_init);
+rootfs_initcall(pci_iommu_init);
 
 
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */

+ 0 - 1
arch/x86/kernel/smp.c

@@ -198,7 +198,6 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
 {
 {
 	ack_APIC_irq();
 	ack_APIC_irq();
 	inc_irq_stat(irq_resched_count);
 	inc_irq_stat(irq_resched_count);
-	run_local_timers();
 	/*
 	/*
 	 * KVM uses this interrupt to force a cpu out of guest mode
 	 * KVM uses this interrupt to force a cpu out of guest mode
 	 */
 	 */

+ 2 - 1
arch/x86/kernel/time.c

@@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
 #ifdef CONFIG_FRAME_POINTER
 #ifdef CONFIG_FRAME_POINTER
 		return *(unsigned long *)(regs->bp + sizeof(long));
 		return *(unsigned long *)(regs->bp + sizeof(long));
 #else
 #else
-		unsigned long *sp = (unsigned long *)regs->sp;
+		unsigned long *sp =
+			(unsigned long *)kernel_stack_pointer(regs);
 		/*
 		/*
 		 * Return address is either directly at stack pointer
 		 * Return address is either directly at stack pointer
 		 * or above a saved flags. Eflags has bits 22-31 zero,
 		 * or above a saved flags. Eflags has bits 22-31 zero,

+ 2 - 2
arch/x86/kernel/tlb_uv.c

@@ -843,8 +843,8 @@ static int __init uv_bau_init(void)
 				       GFP_KERNEL, cpu_to_node(cur_cpu));
 				       GFP_KERNEL, cpu_to_node(cur_cpu));
 
 
 	uv_bau_retry_limit = 1;
 	uv_bau_retry_limit = 1;
-	uv_nshift = uv_hub_info->n_val;
-	uv_mmask = (1UL << uv_hub_info->n_val) - 1;
+	uv_nshift = uv_hub_info->m_val;
+	uv_mmask = (1UL << uv_hub_info->m_val) - 1;
 	nblades = uv_num_possible_blades();
 	nblades = uv_num_possible_blades();
 
 
 	uv_bau_table_bases = (struct bau_control **)
 	uv_bau_table_bases = (struct bau_control **)

+ 10 - 2
arch/x86/kernel/trampoline.c

@@ -3,8 +3,16 @@
 #include <asm/trampoline.h>
 #include <asm/trampoline.h>
 #include <asm/e820.h>
 #include <asm/e820.h>
 
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
+#define __trampinit
+#define __trampinitdata
+#else
+#define __trampinit __cpuinit
+#define __trampinitdata __cpuinitdata
+#endif
+
 /* ready for x86_64 and x86 */
 /* ready for x86_64 and x86 */
-unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
+unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
 
 
 void __init reserve_trampoline_memory(void)
 void __init reserve_trampoline_memory(void)
 {
 {
@@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void)
  * bootstrap into the page concerned. The caller
  * bootstrap into the page concerned. The caller
  * has made sure it's suitably aligned.
  * has made sure it's suitably aligned.
  */
  */
-unsigned long __cpuinit setup_trampoline(void)
+unsigned long __trampinit setup_trampoline(void)
 {
 {
 	memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
 	memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
 	return virt_to_phys(trampoline_base);
 	return virt_to_phys(trampoline_base);

+ 4 - 0
arch/x86/kernel/trampoline_64.S

@@ -32,8 +32,12 @@
 #include <asm/segment.h>
 #include <asm/segment.h>
 #include <asm/processor-flags.h>
 #include <asm/processor-flags.h>
 
 
+#ifdef CONFIG_ACPI_SLEEP
+.section .rodata, "a", @progbits
+#else
 /* We can free up the trampoline after bootup if cpu hotplug is not supported. */
 /* We can free up the trampoline after bootup if cpu hotplug is not supported. */
 __CPUINITRODATA
 __CPUINITRODATA
+#endif
 .code16
 .code16
 
 
 ENTRY(trampoline_data)
 ENTRY(trampoline_data)

+ 1 - 1
arch/x86/kernel/vmi_32.c

@@ -648,7 +648,7 @@ static inline int __init activate_vmi(void)
 
 
 	pv_info.paravirt_enabled = 1;
 	pv_info.paravirt_enabled = 1;
 	pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
 	pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
-	pv_info.name = "vmi";
+	pv_info.name = "vmi [deprecated]";
 
 
 	pv_init_ops.patch = vmi_patch;
 	pv_init_ops.patch = vmi_patch;
 
 

+ 3 - 0
arch/x86/kernel/vmlinux.lds.S

@@ -305,6 +305,9 @@ SECTIONS
 
 
 
 
 #ifdef CONFIG_X86_32
 #ifdef CONFIG_X86_32
+/*
+ * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
+ */
 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
 	   "kernel image bigger than KERNEL_IMAGE_SIZE");
 	   "kernel image bigger than KERNEL_IMAGE_SIZE");
 #else
 #else

+ 4 - 12
block/blk-core.c

@@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
 		part_stat_inc(cpu, part, merges[rw]);
 		part_stat_inc(cpu, part, merges[rw]);
 	else {
 	else {
 		part_round_stats(cpu, part);
 		part_round_stats(cpu, part);
-		part_inc_in_flight(part);
+		part_inc_in_flight(part, rw);
 	}
 	}
 
 
 	part_stat_unlock();
 	part_stat_unlock();
@@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
 	if (now == part->stamp)
 	if (now == part->stamp)
 		return;
 		return;
 
 
-	if (part->in_flight) {
+	if (part_in_flight(part)) {
 		__part_stat_add(cpu, part, time_in_queue,
 		__part_stat_add(cpu, part, time_in_queue,
-				part->in_flight * (now - part->stamp));
+				part_in_flight(part) * (now - part->stamp));
 		__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
 		__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
 	}
 	}
 	part->stamp = now;
 	part->stamp = now;
@@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
 		part_stat_inc(cpu, part, ios[rw]);
 		part_stat_inc(cpu, part, ios[rw]);
 		part_stat_add(cpu, part, ticks[rw], duration);
 		part_stat_add(cpu, part, ticks[rw], duration);
 		part_round_stats(cpu, part);
 		part_round_stats(cpu, part);
-		part_dec_in_flight(part);
+		part_dec_in_flight(part, rw);
 
 
 		part_stat_unlock();
 		part_stat_unlock();
 	}
 	}
@@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 }
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 EXPORT_SYMBOL(kblockd_schedule_work);
 
 
-int kblockd_schedule_delayed_work(struct request_queue *q,
-				  struct delayed_work *work,
-				  unsigned long delay)
-{
-	return queue_delayed_work(kblockd_workqueue, work, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
 int __init blk_dev_init(void)
 int __init blk_dev_init(void)
 {
 {
 	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
 	BUILD_BUG_ON(__REQ_NR_BITS > 8 *

+ 1 - 1
block/blk-merge.c

@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
 		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
 		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
 
 
 		part_round_stats(cpu, part);
 		part_round_stats(cpu, part);
-		part_dec_in_flight(part);
+		part_dec_in_flight(part, rq_data_dir(req));
 
 
 		part_stat_unlock();
 		part_stat_unlock();
 	}
 	}

+ 1 - 1
block/blk-settings.c

@@ -242,7 +242,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 /**
 /**
  * blk_queue_max_discard_sectors - set max sectors for a single discard
  * blk_queue_max_discard_sectors - set max sectors for a single discard
  * @q:  the request queue for the device
  * @q:  the request queue for the device
- * @max_discard: maximum number of sectors to discard
+ * @max_discard_sectors: maximum number of sectors to discard
  **/
  **/
 void blk_queue_max_discard_sectors(struct request_queue *q,
 void blk_queue_max_discard_sectors(struct request_queue *q,
 		unsigned int max_discard_sectors)
 		unsigned int max_discard_sectors)

+ 1 - 1
block/blk-tag.c

@@ -359,7 +359,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
 		max_depth -= 2;
 		max_depth -= 2;
 		if (!max_depth)
 		if (!max_depth)
 			max_depth = 1;
 			max_depth = 1;
-		if (q->in_flight[0] > max_depth)
+		if (q->in_flight[BLK_RW_ASYNC] > max_depth)
 			return 1;
 			return 1;
 	}
 	}
 
 

+ 142 - 117
block/cfq-iosched.c

@@ -150,7 +150,7 @@ struct cfq_data {
 	 * idle window management
 	 * idle window management
 	 */
 	 */
 	struct timer_list idle_slice_timer;
 	struct timer_list idle_slice_timer;
-	struct delayed_work unplug_work;
+	struct work_struct unplug_work;
 
 
 	struct cfq_queue *active_queue;
 	struct cfq_queue *active_queue;
 	struct cfq_io_context *active_cic;
 	struct cfq_io_context *active_cic;
@@ -230,7 +230,7 @@ CFQ_CFQQ_FNS(coop);
 	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
 
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
+static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
 				       struct io_context *, gfp_t);
 				       struct io_context *, gfp_t);
 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
 						struct io_context *);
 						struct io_context *);
@@ -241,40 +241,35 @@ static inline int rq_in_driver(struct cfq_data *cfqd)
 }
 }
 
 
 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
-					    int is_sync)
+					    bool is_sync)
 {
 {
-	return cic->cfqq[!!is_sync];
+	return cic->cfqq[is_sync];
 }
 }
 
 
 static inline void cic_set_cfqq(struct cfq_io_context *cic,
 static inline void cic_set_cfqq(struct cfq_io_context *cic,
-				struct cfq_queue *cfqq, int is_sync)
+				struct cfq_queue *cfqq, bool is_sync)
 {
 {
-	cic->cfqq[!!is_sync] = cfqq;
+	cic->cfqq[is_sync] = cfqq;
 }
 }
 
 
 /*
 /*
  * We regard a request as SYNC, if it's either a read or has the SYNC bit
  * We regard a request as SYNC, if it's either a read or has the SYNC bit
  * set (in which case it could also be direct WRITE).
  * set (in which case it could also be direct WRITE).
  */
  */
-static inline int cfq_bio_sync(struct bio *bio)
+static inline bool cfq_bio_sync(struct bio *bio)
 {
 {
-	if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
-		return 1;
-
-	return 0;
+	return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
 }
 }
 
 
 /*
 /*
  * scheduler run of queue, if there are requests pending and no one in the
  * scheduler run of queue, if there are requests pending and no one in the
  * driver that will restart queueing
  * driver that will restart queueing
  */
  */
-static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
-					 unsigned long delay)
+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 {
 {
 	if (cfqd->busy_queues) {
 	if (cfqd->busy_queues) {
 		cfq_log(cfqd, "schedule dispatch");
 		cfq_log(cfqd, "schedule dispatch");
-		kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
-						delay);
+		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
 	}
 	}
 }
 }
 
 
@@ -290,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q)
  * if a queue is marked sync and has sync io queued. A sync queue with async
  * if a queue is marked sync and has sync io queued. A sync queue with async
  * io only, should not get full sync slice length.
  * io only, should not get full sync slice length.
  */
  */
-static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
+static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 				 unsigned short prio)
 				 unsigned short prio)
 {
 {
 	const int base_slice = cfqd->cfq_slice[sync];
 	const int base_slice = cfqd->cfq_slice[sync];
@@ -318,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  * isn't valid until the first request from the dispatch is activated
  * isn't valid until the first request from the dispatch is activated
  * and the slice time set.
  * and the slice time set.
  */
  */
-static inline int cfq_slice_used(struct cfq_queue *cfqq)
+static inline bool cfq_slice_used(struct cfq_queue *cfqq)
 {
 {
 	if (cfq_cfqq_slice_new(cfqq))
 	if (cfq_cfqq_slice_new(cfqq))
 		return 0;
 		return 0;
@@ -493,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  * we will service the queues.
  * we will service the queues.
  */
  */
 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-				 int add_front)
+				 bool add_front)
 {
 {
 	struct rb_node **p, *parent;
 	struct rb_node **p, *parent;
 	struct cfq_queue *__cfqq;
 	struct cfq_queue *__cfqq;
@@ -509,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 		} else
 		} else
 			rb_key += jiffies;
 			rb_key += jiffies;
 	} else if (!add_front) {
 	} else if (!add_front) {
+		/*
+		 * Get our rb key offset. Subtract any residual slice
+		 * value carried from last service. A negative resid
+		 * count indicates slice overrun, and this should position
+		 * the next service time further away in the tree.
+		 */
 		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
 		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
-		rb_key += cfqq->slice_resid;
+		rb_key -= cfqq->slice_resid;
 		cfqq->slice_resid = 0;
 		cfqq->slice_resid = 0;
-	} else
-		rb_key = 0;
+	} else {
+		rb_key = -HZ;
+		__cfqq = cfq_rb_first(&cfqd->service_tree);
+		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
+	}
 
 
 	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
 	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
 		/*
 		/*
@@ -547,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 			n = &(*p)->rb_left;
 			n = &(*p)->rb_left;
 		else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
 		else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
 			n = &(*p)->rb_right;
 			n = &(*p)->rb_right;
-		else if (rb_key < __cfqq->rb_key)
+		else if (time_before(rb_key, __cfqq->rb_key))
 			n = &(*p)->rb_left;
 			n = &(*p)->rb_left;
 		else
 		else
 			n = &(*p)->rb_right;
 			n = &(*p)->rb_right;
@@ -827,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
 	 * reposition in fifo if next is older than rq
 	 * reposition in fifo if next is older than rq
 	 */
 	 */
 	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
 	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
-	    time_before(next->start_time, rq->start_time))
+	    time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
 		list_move(&rq->queuelist, &next->queuelist);
 		list_move(&rq->queuelist, &next->queuelist);
+		rq_set_fifo_time(rq, rq_fifo_time(next));
+	}
 
 
 	cfq_remove_request(next);
 	cfq_remove_request(next);
 }
 }
@@ -844,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
 	 * Disallow merge of a sync bio into an async request.
 	 * Disallow merge of a sync bio into an async request.
 	 */
 	 */
 	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
 	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
-		return 0;
+		return false;
 
 
 	/*
 	/*
 	 * Lookup the cfqq that this bio will be queued with. Allow
 	 * Lookup the cfqq that this bio will be queued with. Allow
@@ -852,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
 	 */
 	 */
 	cic = cfq_cic_lookup(cfqd, current->io_context);
 	cic = cfq_cic_lookup(cfqd, current->io_context);
 	if (!cic)
 	if (!cic)
-		return 0;
+		return false;
 
 
 	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
 	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
-	if (cfqq == RQ_CFQQ(rq))
-		return 1;
-
-	return 0;
+	return cfqq == RQ_CFQQ(rq);
 }
 }
 
 
 static void __cfq_set_active_queue(struct cfq_data *cfqd,
 static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -886,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
  */
  */
 static void
 static void
 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-		    int timed_out)
+		    bool timed_out)
 {
 {
 	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
 	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
 
 
@@ -914,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 	}
 	}
 }
 }
 
 
-static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
+static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
 {
 {
 	struct cfq_queue *cfqq = cfqd->active_queue;
 	struct cfq_queue *cfqq = cfqd->active_queue;
 
 
@@ -1026,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  */
  */
 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
 					      struct cfq_queue *cur_cfqq,
 					      struct cfq_queue *cur_cfqq,
-					      int probe)
+					      bool probe)
 {
 {
 	struct cfq_queue *cfqq;
 	struct cfq_queue *cfqq;
 
 
@@ -1090,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
 	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
 		return;
 		return;
 
 
+	/*
+	 * If our average think time is larger than the remaining time
+	 * slice, then don't idle. This avoids overrunning the allotted
+	 * time slice.
+	 */
+	if (sample_valid(cic->ttime_samples) &&
+	    (cfqq->slice_end - jiffies < cic->ttime_mean))
+		return;
+
 	cfq_mark_cfqq_wait_request(cfqq);
 	cfq_mark_cfqq_wait_request(cfqq);
 
 
 	/*
 	/*
@@ -1129,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
  */
  */
 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
 {
 {
-	struct cfq_data *cfqd = cfqq->cfqd;
-	struct request *rq;
-	int fifo;
+	struct request *rq = NULL;
 
 
 	if (cfq_cfqq_fifo_expire(cfqq))
 	if (cfq_cfqq_fifo_expire(cfqq))
 		return NULL;
 		return NULL;
@@ -1141,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
 	if (list_empty(&cfqq->fifo))
 	if (list_empty(&cfqq->fifo))
 		return NULL;
 		return NULL;
 
 
-	fifo = cfq_cfqq_sync(cfqq);
 	rq = rq_entry_fifo(cfqq->fifo.next);
 	rq = rq_entry_fifo(cfqq->fifo.next);
-
-	if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
+	if (time_before(jiffies, rq_fifo_time(rq)))
 		rq = NULL;
 		rq = NULL;
 
 
-	cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
+	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
 	return rq;
 	return rq;
 }
 }
 
 
@@ -1248,67 +1256,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
 	return dispatched;
 	return dispatched;
 }
 }
 
 
-/*
- * Dispatch a request from cfqq, moving them to the request queue
- * dispatch list.
- */
-static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
 {
-	struct request *rq;
-
-	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
-
-	/*
-	 * follow expired path, else get first next available
-	 */
-	rq = cfq_check_fifo(cfqq);
-	if (!rq)
-		rq = cfqq->next_rq;
-
-	/*
-	 * insert request into driver dispatch list
-	 */
-	cfq_dispatch_insert(cfqd->queue, rq);
-
-	if (!cfqd->active_cic) {
-		struct cfq_io_context *cic = RQ_CIC(rq);
-
-		atomic_long_inc(&cic->ioc->refcount);
-		cfqd->active_cic = cic;
-	}
-}
-
-/*
- * Find the cfqq that we need to service and move a request from that to the
- * dispatch list
- */
-static int cfq_dispatch_requests(struct request_queue *q, int force)
-{
-	struct cfq_data *cfqd = q->elevator->elevator_data;
-	struct cfq_queue *cfqq;
 	unsigned int max_dispatch;
 	unsigned int max_dispatch;
 
 
-	if (!cfqd->busy_queues)
-		return 0;
-
-	if (unlikely(force))
-		return cfq_forced_dispatch(cfqd);
-
-	cfqq = cfq_select_queue(cfqd);
-	if (!cfqq)
-		return 0;
-
 	/*
 	/*
 	 * Drain async requests before we start sync IO
 	 * Drain async requests before we start sync IO
 	 */
 	 */
 	if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
 	if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
-		return 0;
+		return false;
 
 
 	/*
 	/*
 	 * If this is an async queue and we have sync IO in flight, let it wait
 	 * If this is an async queue and we have sync IO in flight, let it wait
 	 */
 	 */
 	if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
 	if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
-		return 0;
+		return false;
 
 
 	max_dispatch = cfqd->cfq_quantum;
 	max_dispatch = cfqd->cfq_quantum;
 	if (cfq_class_idle(cfqq))
 	if (cfq_class_idle(cfqq))
@@ -1322,13 +1284,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
 		 * idle queue must always only have a single IO in flight
 		 * idle queue must always only have a single IO in flight
 		 */
 		 */
 		if (cfq_class_idle(cfqq))
 		if (cfq_class_idle(cfqq))
-			return 0;
+			return false;
 
 
 		/*
 		/*
 		 * We have other queues, don't allow more IO from this one
 		 * We have other queues, don't allow more IO from this one
 		 */
 		 */
 		if (cfqd->busy_queues > 1)
 		if (cfqd->busy_queues > 1)
-			return 0;
+			return false;
 
 
 		/*
 		/*
 		 * Sole queue user, allow bigger slice
 		 * Sole queue user, allow bigger slice
@@ -1352,13 +1314,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
 			max_dispatch = depth;
 			max_dispatch = depth;
 	}
 	}
 
 
-	if (cfqq->dispatched >= max_dispatch)
+	/*
+	 * If we're below the current max, allow a dispatch
+	 */
+	return cfqq->dispatched < max_dispatch;
+}
+
+/*
+ * Dispatch a request from cfqq, moving them to the request queue
+ * dispatch list.
+ */
+static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+	struct request *rq;
+
+	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+
+	if (!cfq_may_dispatch(cfqd, cfqq))
+		return false;
+
+	/*
+	 * follow expired path, else get first next available
+	 */
+	rq = cfq_check_fifo(cfqq);
+	if (!rq)
+		rq = cfqq->next_rq;
+
+	/*
+	 * insert request into driver dispatch list
+	 */
+	cfq_dispatch_insert(cfqd->queue, rq);
+
+	if (!cfqd->active_cic) {
+		struct cfq_io_context *cic = RQ_CIC(rq);
+
+		atomic_long_inc(&cic->ioc->refcount);
+		cfqd->active_cic = cic;
+	}
+
+	return true;
+}
+
+/*
+ * Find the cfqq that we need to service and move a request from that to the
+ * dispatch list
+ */
+static int cfq_dispatch_requests(struct request_queue *q, int force)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct cfq_queue *cfqq;
+
+	if (!cfqd->busy_queues)
+		return 0;
+
+	if (unlikely(force))
+		return cfq_forced_dispatch(cfqd);
+
+	cfqq = cfq_select_queue(cfqd);
+	if (!cfqq)
 		return 0;
 		return 0;
 
 
 	/*
 	/*
-	 * Dispatch a request from this cfqq
+	 * Dispatch a request from this cfqq, if it is allowed
 	 */
 	 */
-	cfq_dispatch_request(cfqd, cfqq);
+	if (!cfq_dispatch_request(cfqd, cfqq))
+		return 0;
+
 	cfqq->slice_dispatch++;
 	cfqq->slice_dispatch++;
 	cfq_clear_cfqq_must_dispatch(cfqq);
 	cfq_clear_cfqq_must_dispatch(cfqq);
 
 
@@ -1399,7 +1420,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
 
 
 	if (unlikely(cfqd->active_queue == cfqq)) {
 	if (unlikely(cfqd->active_queue == cfqq)) {
 		__cfq_slice_expired(cfqd, cfqq, 0);
 		__cfq_slice_expired(cfqd, cfqq, 0);
-		cfq_schedule_dispatch(cfqd, 0);
+		cfq_schedule_dispatch(cfqd);
 	}
 	}
 
 
 	kmem_cache_free(cfq_pool, cfqq);
 	kmem_cache_free(cfq_pool, cfqq);
@@ -1494,7 +1515,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
 {
 	if (unlikely(cfqq == cfqd->active_queue)) {
 	if (unlikely(cfqq == cfqd->active_queue)) {
 		__cfq_slice_expired(cfqd, cfqq, 0);
 		__cfq_slice_expired(cfqd, cfqq, 0);
-		cfq_schedule_dispatch(cfqd, 0);
+		cfq_schedule_dispatch(cfqd);
 	}
 	}
 
 
 	cfq_put_queue(cfqq);
 	cfq_put_queue(cfqq);
@@ -1658,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
 }
 }
 
 
 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-			  pid_t pid, int is_sync)
+			  pid_t pid, bool is_sync)
 {
 {
 	RB_CLEAR_NODE(&cfqq->rb_node);
 	RB_CLEAR_NODE(&cfqq->rb_node);
 	RB_CLEAR_NODE(&cfqq->p_node);
 	RB_CLEAR_NODE(&cfqq->p_node);
@@ -1678,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 }
 }
 
 
 static struct cfq_queue *
 static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
 		     struct io_context *ioc, gfp_t gfp_mask)
 		     struct io_context *ioc, gfp_t gfp_mask)
 {
 {
 	struct cfq_queue *cfqq, *new_cfqq = NULL;
 	struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1742,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
 }
 }
 
 
 static struct cfq_queue *
 static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
 	      gfp_t gfp_mask)
 	      gfp_t gfp_mask)
 {
 {
 	const int ioprio = task_ioprio(ioc);
 	const int ioprio = task_ioprio(ioc);
@@ -1977,7 +1998,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 	    (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
 	    (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
 		enable_idle = 0;
 		enable_idle = 0;
 	else if (sample_valid(cic->ttime_samples)) {
 	else if (sample_valid(cic->ttime_samples)) {
-		if (cic->ttime_mean > cfqd->cfq_slice_idle)
+		unsigned int slice_idle = cfqd->cfq_slice_idle;
+		if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
+			slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
+		if (cic->ttime_mean > slice_idle)
 			enable_idle = 0;
 			enable_idle = 0;
 		else
 		else
 			enable_idle = 1;
 			enable_idle = 1;
@@ -1996,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  * Check if new_cfqq should preempt the currently active queue. Return 0 for
  * Check if new_cfqq should preempt the currently active queue. Return 0 for
  * no or if we aren't sure, a 1 will cause a preempt.
  * no or if we aren't sure, a 1 will cause a preempt.
  */
  */
-static int
+static bool
 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
 		   struct request *rq)
 		   struct request *rq)
 {
 {
@@ -2004,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
 
 
 	cfqq = cfqd->active_queue;
 	cfqq = cfqd->active_queue;
 	if (!cfqq)
 	if (!cfqq)
-		return 0;
+		return false;
 
 
 	if (cfq_slice_used(cfqq))
 	if (cfq_slice_used(cfqq))
-		return 1;
+		return true;
 
 
 	if (cfq_class_idle(new_cfqq))
 	if (cfq_class_idle(new_cfqq))
-		return 0;
+		return false;
 
 
 	if (cfq_class_idle(cfqq))
 	if (cfq_class_idle(cfqq))
-		return 1;
+		return true;
 
 
 	/*
 	/*
 	 * if the new request is sync, but the currently running queue is
 	 * if the new request is sync, but the currently running queue is
 	 * not, let the sync request have priority.
 	 * not, let the sync request have priority.
 	 */
 	 */
 	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
 	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
-		return 1;
+		return true;
 
 
 	/*
 	/*
 	 * So both queues are sync. Let the new request get disk time if
 	 * So both queues are sync. Let the new request get disk time if
 	 * it's a metadata request and the current queue is doing regular IO.
 	 * it's a metadata request and the current queue is doing regular IO.
 	 */
 	 */
 	if (rq_is_meta(rq) && !cfqq->meta_pending)
 	if (rq_is_meta(rq) && !cfqq->meta_pending)
-		return 1;
+		return false;
 
 
 	/*
 	/*
 	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
 	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
 	 */
 	 */
 	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
 	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
-		return 1;
+		return true;
 
 
 	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
 	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
-		return 0;
+		return false;
 
 
 	/*
 	/*
 	 * if this request is as-good as one we would expect from the
 	 * if this request is as-good as one we would expect from the
 	 * current cfqq, let it preempt
 	 * current cfqq, let it preempt
 	 */
 	 */
 	if (cfq_rq_close(cfqd, rq))
 	if (cfq_rq_close(cfqd, rq))
-		return 1;
+		return true;
 
 
-	return 0;
+	return false;
 }
 }
 
 
 /*
 /*
@@ -2130,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
 
 
 	cfq_add_rq_rb(rq);
 	cfq_add_rq_rb(rq);
 
 
+	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
 	list_add_tail(&rq->queuelist, &cfqq->fifo);
 	list_add_tail(&rq->queuelist, &cfqq->fifo);
 
 
 	cfq_rq_enqueued(cfqd, cfqq, rq);
 	cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -2211,7 +2236,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
 	}
 	}
 
 
 	if (!rq_in_driver(cfqd))
 	if (!rq_in_driver(cfqd))
-		cfq_schedule_dispatch(cfqd, 0);
+		cfq_schedule_dispatch(cfqd);
 }
 }
 
 
 /*
 /*
@@ -2309,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
 	struct cfq_data *cfqd = q->elevator->elevator_data;
 	struct cfq_data *cfqd = q->elevator->elevator_data;
 	struct cfq_io_context *cic;
 	struct cfq_io_context *cic;
 	const int rw = rq_data_dir(rq);
 	const int rw = rq_data_dir(rq);
-	const int is_sync = rq_is_sync(rq);
+	const bool is_sync = rq_is_sync(rq);
 	struct cfq_queue *cfqq;
 	struct cfq_queue *cfqq;
 	unsigned long flags;
 	unsigned long flags;
 
 
@@ -2341,7 +2366,7 @@ queue_fail:
 	if (cic)
 	if (cic)
 		put_io_context(cic->ioc);
 		put_io_context(cic->ioc);
 
 
-	cfq_schedule_dispatch(cfqd, 0);
+	cfq_schedule_dispatch(cfqd);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 	cfq_log(cfqd, "set_request fail");
 	cfq_log(cfqd, "set_request fail");
 	return 1;
 	return 1;
@@ -2350,7 +2375,7 @@ queue_fail:
 static void cfq_kick_queue(struct work_struct *work)
 static void cfq_kick_queue(struct work_struct *work)
 {
 {
 	struct cfq_data *cfqd =
 	struct cfq_data *cfqd =
-		container_of(work, struct cfq_data, unplug_work.work);
+		container_of(work, struct cfq_data, unplug_work);
 	struct request_queue *q = cfqd->queue;
 	struct request_queue *q = cfqd->queue;
 
 
 	spin_lock_irq(q->queue_lock);
 	spin_lock_irq(q->queue_lock);
@@ -2404,7 +2429,7 @@ static void cfq_idle_slice_timer(unsigned long data)
 expire:
 expire:
 	cfq_slice_expired(cfqd, timed_out);
 	cfq_slice_expired(cfqd, timed_out);
 out_kick:
 out_kick:
-	cfq_schedule_dispatch(cfqd, 0);
+	cfq_schedule_dispatch(cfqd);
 out_cont:
 out_cont:
 	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
 	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
 }
 }
@@ -2412,7 +2437,7 @@ out_cont:
 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
 {
 {
 	del_timer_sync(&cfqd->idle_slice_timer);
 	del_timer_sync(&cfqd->idle_slice_timer);
-	cancel_delayed_work_sync(&cfqd->unplug_work);
+	cancel_work_sync(&cfqd->unplug_work);
 }
 }
 
 
 static void cfq_put_async_queues(struct cfq_data *cfqd)
 static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2494,7 +2519,7 @@ static void *cfq_init_queue(struct request_queue *q)
 	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
 	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
 	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
 	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
 
 
-	INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
+	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
 
 	cfqd->cfq_quantum = cfq_quantum;
 	cfqd->cfq_quantum = cfq_quantum;
 	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
 	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];

+ 1 - 3
block/elevator.c

@@ -1059,9 +1059,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 		return count;
 		return count;
 
 
 	strlcpy(elevator_name, name, sizeof(elevator_name));
 	strlcpy(elevator_name, name, sizeof(elevator_name));
-	strstrip(elevator_name);
-
-	e = elevator_get(elevator_name);
+	e = elevator_get(strstrip(elevator_name));
 	if (!e) {
 	if (!e) {
 		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
 		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
 		return -EINVAL;
 		return -EINVAL;

+ 3 - 1
block/genhd.c

@@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
+static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 static struct device_attribute dev_attr_fail =
 static struct device_attribute dev_attr_fail =
 	__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
 	__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = {
 	&dev_attr_alignment_offset.attr,
 	&dev_attr_alignment_offset.attr,
 	&dev_attr_capability.attr,
 	&dev_attr_capability.attr,
 	&dev_attr_stat.attr,
 	&dev_attr_stat.attr,
+	&dev_attr_inflight.attr,
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 	&dev_attr_fail.attr,
 	&dev_attr_fail.attr,
 #endif
 #endif
@@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
 			   part_stat_read(hd, merges[1]),
 			   part_stat_read(hd, merges[1]),
 			   (unsigned long long)part_stat_read(hd, sectors[1]),
 			   (unsigned long long)part_stat_read(hd, sectors[1]),
 			   jiffies_to_msecs(part_stat_read(hd, ticks[1])),
 			   jiffies_to_msecs(part_stat_read(hd, ticks[1])),
-			   hd->in_flight,
+			   part_in_flight(hd),
 			   jiffies_to_msecs(part_stat_read(hd, io_ticks)),
 			   jiffies_to_msecs(part_stat_read(hd, io_ticks)),
 			   jiffies_to_msecs(part_stat_read(hd, time_in_queue))
 			   jiffies_to_msecs(part_stat_read(hd, time_in_queue))
 			);
 			);

+ 2 - 2
drivers/acpi/Kconfig

@@ -218,10 +218,10 @@ config ACPI_PROCESSOR_AGGREGATOR
 	depends on X86
 	depends on X86
 	help
 	help
 	  ACPI 4.0 defines processor Aggregator, which enables OS to perform
 	  ACPI 4.0 defines processor Aggregator, which enables OS to perform
-	  specfic processor configuration and control that applies to all
+	  specific processor configuration and control that applies to all
 	  processors in the platform. Currently only logical processor idling
 	  processors in the platform. Currently only logical processor idling
 	  is defined, which is to reduce power consumption. This driver
 	  is defined, which is to reduce power consumption. This driver
-	  support the new device.
+	  supports the new device.
 
 
 config ACPI_THERMAL
 config ACPI_THERMAL
 	tristate "Thermal Zone"
 	tristate "Thermal Zone"

+ 1 - 0
drivers/acpi/ac.c

@@ -245,6 +245,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
 		acpi_bus_generate_netlink_event(device->pnp.device_class,
 		acpi_bus_generate_netlink_event(device->pnp.device_class,
 						  dev_name(&device->dev), event,
 						  dev_name(&device->dev), event,
 						  (u32) ac->state);
 						  (u32) ac->state);
+		acpi_notifier_call_chain(device, event, (u32) ac->state);
 #ifdef CONFIG_ACPI_SYSFS_POWER
 #ifdef CONFIG_ACPI_SYSFS_POWER
 		kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
 		kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
 #endif
 #endif

+ 3 - 0
drivers/acpi/button.c

@@ -251,6 +251,9 @@ int acpi_lid_open(void)
 	acpi_status status;
 	acpi_status status;
 	unsigned long long state;
 	unsigned long long state;
 
 
+	if (!lid_device)
+		return -ENODEV;
+
 	status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
 	status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
 				       &state);
 				       &state);
 	if (ACPI_FAILURE(status))
 	if (ACPI_FAILURE(status))

+ 11 - 0
drivers/acpi/pci_root.c

@@ -389,6 +389,17 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
 
 
 		pbus = pdev->subordinate;
 		pbus = pdev->subordinate;
 		pci_dev_put(pdev);
 		pci_dev_put(pdev);
+
+		/*
+		 * This function may be called for a non-PCI device that has a
+		 * PCI parent (eg. a disk under a PCI SATA controller).  In that
+		 * case pdev->subordinate will be NULL for the parent.
+		 */
+		if (!pbus) {
+			dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n");
+			pdev = NULL;
+			break;
+		}
 	}
 	}
 out:
 out:
 	list_for_each_entry_safe(node, tmp, &device_list, node)
 	list_for_each_entry_safe(node, tmp, &device_list, node)

+ 6 - 1
drivers/acpi/video.c

@@ -1109,7 +1109,12 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
 	 */
 	 */
 
 
 	/* Does this device support video switching? */
 	/* Does this device support video switching? */
-	if (video->cap._DOS) {
+	if (video->cap._DOS || video->cap._DOD) {
+		if (!video->cap._DOS) {
+			printk(KERN_WARNING FW_BUG
+				"ACPI(%s) defines _DOD but not _DOS\n",
+				acpi_device_bid(video->device));
+		}
 		video->flags.multihead = 1;
 		video->flags.multihead = 1;
 		status = 0;
 		status = 0;
 	}
 	}

+ 1 - 1
drivers/acpi/video_detect.c

@@ -84,7 +84,7 @@ long acpi_is_video_device(struct acpi_device *device)
 		return 0;
 		return 0;
 
 
 	/* Does this device able to support video switching ? */
 	/* Does this device able to support video switching ? */
-	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) &&
+	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
 	    ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
 	    ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
 		video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
 		video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
 
 

+ 37 - 42
drivers/block/cciss.c

@@ -68,6 +68,12 @@ MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
 MODULE_VERSION("3.6.20");
 MODULE_VERSION("3.6.20");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 
 
+static int cciss_allow_hpsa;
+module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cciss_allow_hpsa,
+	"Prevent cciss driver from accessing hardware known to be "
+	" supported by the hpsa driver");
+
 #include "cciss_cmd.h"
 #include "cciss_cmd.h"
 #include "cciss.h"
 #include "cciss.h"
 #include <linux/cciss_ioctl.h>
 #include <linux/cciss_ioctl.h>
@@ -101,8 +107,6 @@ static const struct pci_device_id cciss_pci_device_id[] = {
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
-	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
-		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
 	{0,}
 };
 };
 
 
@@ -123,8 +127,6 @@ static struct board_type products[] = {
 	{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
 	{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
 	{0x40910E11, "Smart Array 6i", &SA5_access},
 	{0x40910E11, "Smart Array 6i", &SA5_access},
 	{0x3225103C, "Smart Array P600", &SA5_access},
 	{0x3225103C, "Smart Array P600", &SA5_access},
-	{0x3223103C, "Smart Array P800", &SA5_access},
-	{0x3234103C, "Smart Array P400", &SA5_access},
 	{0x3235103C, "Smart Array P400i", &SA5_access},
 	{0x3235103C, "Smart Array P400i", &SA5_access},
 	{0x3211103C, "Smart Array E200i", &SA5_access},
 	{0x3211103C, "Smart Array E200i", &SA5_access},
 	{0x3212103C, "Smart Array E200", &SA5_access},
 	{0x3212103C, "Smart Array E200", &SA5_access},
@@ -132,6 +134,10 @@ static struct board_type products[] = {
 	{0x3214103C, "Smart Array E200i", &SA5_access},
 	{0x3214103C, "Smart Array E200i", &SA5_access},
 	{0x3215103C, "Smart Array E200i", &SA5_access},
 	{0x3215103C, "Smart Array E200i", &SA5_access},
 	{0x3237103C, "Smart Array E500", &SA5_access},
 	{0x3237103C, "Smart Array E500", &SA5_access},
+/* controllers below this line are also supported by the hpsa driver. */
+#define HPSA_BOUNDARY 0x3223103C
+	{0x3223103C, "Smart Array P800", &SA5_access},
+	{0x3234103C, "Smart Array P400", &SA5_access},
 	{0x323D103C, "Smart Array P700m", &SA5_access},
 	{0x323D103C, "Smart Array P700m", &SA5_access},
 	{0x3241103C, "Smart Array P212", &SA5_access},
 	{0x3241103C, "Smart Array P212", &SA5_access},
 	{0x3243103C, "Smart Array P410", &SA5_access},
 	{0x3243103C, "Smart Array P410", &SA5_access},
@@ -140,7 +146,6 @@ static struct board_type products[] = {
 	{0x3249103C, "Smart Array P812", &SA5_access},
 	{0x3249103C, "Smart Array P812", &SA5_access},
 	{0x324A103C, "Smart Array P712m", &SA5_access},
 	{0x324A103C, "Smart Array P712m", &SA5_access},
 	{0x324B103C, "Smart Array P711m", &SA5_access},
 	{0x324B103C, "Smart Array P711m", &SA5_access},
-	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
 };
 };
 
 
 /* How long to wait (in milliseconds) for board to go into simple mode */
 /* How long to wait (in milliseconds) for board to go into simple mode */
@@ -3754,7 +3759,27 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
 	__u64 cfg_offset;
 	__u64 cfg_offset;
 	__u32 cfg_base_addr;
 	__u32 cfg_base_addr;
 	__u64 cfg_base_addr_index;
 	__u64 cfg_base_addr_index;
-	int i, err;
+	int i, prod_index, err;
+
+	subsystem_vendor_id = pdev->subsystem_vendor;
+	subsystem_device_id = pdev->subsystem_device;
+	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+		    subsystem_vendor_id);
+
+	for (i = 0; i < ARRAY_SIZE(products); i++) {
+		/* Stand aside for hpsa driver on request */
+		if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
+			return -ENODEV;
+		if (board_id == products[i].board_id)
+			break;
+	}
+	prod_index = i;
+	if (prod_index == ARRAY_SIZE(products)) {
+		dev_warn(&pdev->dev,
+			"unrecognized board ID: 0x%08lx, ignoring.\n",
+			(unsigned long) board_id);
+		return -ENODEV;
+	}
 
 
 	/* check to see if controller has been disabled */
 	/* check to see if controller has been disabled */
 	/* BEFORE trying to enable it */
 	/* BEFORE trying to enable it */
@@ -3778,11 +3803,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
 		return err;
 		return err;
 	}
 	}
 
 
-	subsystem_vendor_id = pdev->subsystem_vendor;
-	subsystem_device_id = pdev->subsystem_device;
-	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
-		    subsystem_vendor_id);
-
 #ifdef CCISS_DEBUG
 #ifdef CCISS_DEBUG
 	printk("command = %x\n", command);
 	printk("command = %x\n", command);
 	printk("irq = %x\n", pdev->irq);
 	printk("irq = %x\n", pdev->irq);
@@ -3868,14 +3888,9 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
 	 * leave a little room for ioctl calls.
 	 * leave a little room for ioctl calls.
 	 */
 	 */
 	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
 	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
-	for (i = 0; i < ARRAY_SIZE(products); i++) {
-		if (board_id == products[i].board_id) {
-			c->product_name = products[i].product_name;
-			c->access = *(products[i].access);
-			c->nr_cmds = c->max_commands - 4;
-			break;
-		}
-	}
+	c->product_name = products[prod_index].product_name;
+	c->access = *(products[prod_index].access);
+	c->nr_cmds = c->max_commands - 4;
 	if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
 	if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
 	    (readb(&c->cfgtable->Signature[1]) != 'I') ||
 	    (readb(&c->cfgtable->Signature[1]) != 'I') ||
 	    (readb(&c->cfgtable->Signature[2]) != 'S') ||
 	    (readb(&c->cfgtable->Signature[2]) != 'S') ||
@@ -3884,27 +3899,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
 		err = -ENODEV;
 		err = -ENODEV;
 		goto err_out_free_res;
 		goto err_out_free_res;
 	}
 	}
-	/* We didn't find the controller in our list. We know the
-	 * signature is valid. If it's an HP device let's try to
-	 * bind to the device and fire it up. Otherwise we bail.
-	 */
-	if (i == ARRAY_SIZE(products)) {
-		if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
-			c->product_name = products[i-1].product_name;
-			c->access = *(products[i-1].access);
-			c->nr_cmds = c->max_commands - 4;
-			printk(KERN_WARNING "cciss: This is an unknown "
-				"Smart Array controller.\n"
-				"cciss: Please update to the latest driver "
-				"available from www.hp.com.\n");
-		} else {
-			printk(KERN_WARNING "cciss: Sorry, I don't know how"
-				" to access the Smart Array controller %08lx\n"
-					, (unsigned long)board_id);
-			err = -ENODEV;
-			goto err_out_free_res;
-		}
-	}
 #ifdef CONFIG_X86
 #ifdef CONFIG_X86
 	{
 	{
 		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
 		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
@@ -4254,7 +4248,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
 	mutex_init(&hba[i]->busy_shutting_down);
 	mutex_init(&hba[i]->busy_shutting_down);
 
 
 	if (cciss_pci_init(hba[i], pdev) != 0)
 	if (cciss_pci_init(hba[i], pdev) != 0)
-		goto clean0;
+		goto clean_no_release_regions;
 
 
 	sprintf(hba[i]->devname, "cciss%d", i);
 	sprintf(hba[i]->devname, "cciss%d", i);
 	hba[i]->ctlr = i;
 	hba[i]->ctlr = i;
@@ -4391,13 +4385,14 @@ clean2:
 clean1:
 clean1:
 	cciss_destroy_hba_sysfs_entry(hba[i]);
 	cciss_destroy_hba_sysfs_entry(hba[i]);
 clean0:
 clean0:
+	pci_release_regions(pdev);
+clean_no_release_regions:
 	hba[i]->busy_initializing = 0;
 	hba[i]->busy_initializing = 0;
 
 
 	/*
 	/*
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Smart Array controllers that pci_enable_device does not undo
 	 * Smart Array controllers that pci_enable_device does not undo
 	 */
 	 */
-	pci_release_regions(pdev);
 	pci_set_drvdata(pdev, NULL);
 	pci_set_drvdata(pdev, NULL);
 	free_hba(i);
 	free_hba(i);
 	return -1;
 	return -1;

+ 1 - 0
drivers/char/genrtc.c

@@ -43,6 +43,7 @@
 #define RTC_VERSION	"1.07"
 #define RTC_VERSION	"1.07"
 
 
 #include <linux/module.h>
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/miscdevice.h>
 #include <linux/miscdevice.h>
 #include <linux/fcntl.h>
 #include <linux/fcntl.h>

+ 1 - 0
drivers/char/rtc.c

@@ -74,6 +74,7 @@
 #include <linux/proc_fs.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
+#include <linux/sched.h>
 #include <linux/sysctl.h>
 #include <linux/sysctl.h>
 #include <linux/wait.h>
 #include <linux/wait.h>
 #include <linux/bcd.h>
 #include <linux/bcd.h>

+ 1 - 0
drivers/char/sonypi.c

@@ -36,6 +36,7 @@
  */
  */
 
 
 #include <linux/module.h>
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/input.h>
 #include <linux/input.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/init.h>

+ 14 - 17
drivers/char/tty_buffer.c

@@ -402,28 +402,26 @@ static void flush_to_ldisc(struct work_struct *work)
 		container_of(work, struct tty_struct, buf.work.work);
 		container_of(work, struct tty_struct, buf.work.work);
 	unsigned long 	flags;
 	unsigned long 	flags;
 	struct tty_ldisc *disc;
 	struct tty_ldisc *disc;
-	struct tty_buffer *tbuf, *head;
-	char *char_buf;
-	unsigned char *flag_buf;
 
 
 	disc = tty_ldisc_ref(tty);
 	disc = tty_ldisc_ref(tty);
 	if (disc == NULL)	/*  !TTY_LDISC */
 	if (disc == NULL)	/*  !TTY_LDISC */
 		return;
 		return;
 
 
 	spin_lock_irqsave(&tty->buf.lock, flags);
 	spin_lock_irqsave(&tty->buf.lock, flags);
-	/* So we know a flush is running */
-	set_bit(TTY_FLUSHING, &tty->flags);
-	head = tty->buf.head;
-	if (head != NULL) {
-		tty->buf.head = NULL;
-		for (;;) {
-			int count = head->commit - head->read;
+
+	if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
+		struct tty_buffer *head;
+		while ((head = tty->buf.head) != NULL) {
+			int count;
+			char *char_buf;
+			unsigned char *flag_buf;
+
+			count = head->commit - head->read;
 			if (!count) {
 			if (!count) {
 				if (head->next == NULL)
 				if (head->next == NULL)
 					break;
 					break;
-				tbuf = head;
-				head = head->next;
-				tty_buffer_free(tty, tbuf);
+				tty->buf.head = head->next;
+				tty_buffer_free(tty, head);
 				continue;
 				continue;
 			}
 			}
 			/* Ldisc or user is trying to flush the buffers
 			/* Ldisc or user is trying to flush the buffers
@@ -445,9 +443,9 @@ static void flush_to_ldisc(struct work_struct *work)
 							flag_buf, count);
 							flag_buf, count);
 			spin_lock_irqsave(&tty->buf.lock, flags);
 			spin_lock_irqsave(&tty->buf.lock, flags);
 		}
 		}
-		/* Restore the queue head */
-		tty->buf.head = head;
+		clear_bit(TTY_FLUSHING, &tty->flags);
 	}
 	}
+
 	/* We may have a deferred request to flush the input buffer,
 	/* We may have a deferred request to flush the input buffer,
 	   if so pull the chain under the lock and empty the queue */
 	   if so pull the chain under the lock and empty the queue */
 	if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
 	if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
@@ -455,7 +453,6 @@ static void flush_to_ldisc(struct work_struct *work)
 		clear_bit(TTY_FLUSHPENDING, &tty->flags);
 		clear_bit(TTY_FLUSHPENDING, &tty->flags);
 		wake_up(&tty->read_wait);
 		wake_up(&tty->read_wait);
 	}
 	}
-	clear_bit(TTY_FLUSHING, &tty->flags);
 	spin_unlock_irqrestore(&tty->buf.lock, flags);
 	spin_unlock_irqrestore(&tty->buf.lock, flags);
 
 
 	tty_ldisc_deref(disc);
 	tty_ldisc_deref(disc);
@@ -471,7 +468,7 @@ static void flush_to_ldisc(struct work_struct *work)
  */
  */
 void tty_flush_to_ldisc(struct tty_struct *tty)
 void tty_flush_to_ldisc(struct tty_struct *tty)
 {
 {
-	flush_to_ldisc(&tty->buf.work.work);
+	flush_delayed_work(&tty->buf.work);
 }
 }
 
 
 /**
 /**

+ 1 - 1
drivers/char/vt_ioctl.c

@@ -1532,7 +1532,7 @@ long vt_compat_ioctl(struct tty_struct *tty, struct file * file,
 
 
 	case PIO_UNIMAP:
 	case PIO_UNIMAP:
 	case GIO_UNIMAP:
 	case GIO_UNIMAP:
-		ret = do_unimap_ioctl(cmd, up, perm, vc);
+		ret = compat_unimap_ioctl(cmd, up, perm, vc);
 		break;
 		break;
 
 
 	/*
 	/*

+ 12 - 9
drivers/edac/edac_mce_amd.c

@@ -3,7 +3,6 @@
 
 
 static bool report_gart_errors;
 static bool report_gart_errors;
 static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
 static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
-static void (*orig_mce_callback)(struct mce *m);
 
 
 void amd_report_gart_errors(bool v)
 void amd_report_gart_errors(bool v)
 {
 {
@@ -363,8 +362,10 @@ static inline void amd_decode_err_code(unsigned int ec)
 		pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
 		pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
 }
 }
 
 
-static void amd_decode_mce(struct mce *m)
+static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
+			   void *data)
 {
 {
+	struct mce *m = (struct mce *)data;
 	struct err_regs regs;
 	struct err_regs regs;
 	int node, ecc;
 	int node, ecc;
 
 
@@ -420,20 +421,22 @@ static void amd_decode_mce(struct mce *m)
 	}
 	}
 
 
 	amd_decode_err_code(m->status & 0xffff);
 	amd_decode_err_code(m->status & 0xffff);
+
+	return NOTIFY_STOP;
 }
 }
 
 
+static struct notifier_block amd_mce_dec_nb = {
+	.notifier_call	= amd_decode_mce,
+};
+
 static int __init mce_amd_init(void)
 static int __init mce_amd_init(void)
 {
 {
 	/*
 	/*
 	 * We can decode MCEs for Opteron and later CPUs:
 	 * We can decode MCEs for Opteron and later CPUs:
 	 */
 	 */
 	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
 	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
-	    (boot_cpu_data.x86 >= 0xf)) {
-		/* safe the default decode mce callback */
-		orig_mce_callback = x86_mce_decode_callback;
-
-		x86_mce_decode_callback = amd_decode_mce;
-	}
+	    (boot_cpu_data.x86 >= 0xf))
+		atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -442,7 +445,7 @@ early_initcall(mce_amd_init);
 #ifdef MODULE
 #ifdef MODULE
 static void __exit mce_amd_exit(void)
 static void __exit mce_amd_exit(void)
 {
 {
-	x86_mce_decode_callback = orig_mce_callback;
+	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
 }
 }
 
 
 MODULE_DESCRIPTION("AMD MCE decoder");
 MODULE_DESCRIPTION("AMD MCE decoder");

+ 19 - 20
drivers/firewire/sbp2.c

@@ -188,14 +188,7 @@ static struct fw_device *target_device(struct sbp2_target *tgt)
 /* Impossible login_id, to detect logout attempt before successful login */
 /* Impossible login_id, to detect logout attempt before successful login */
 #define INVALID_LOGIN_ID 0x10000
 #define INVALID_LOGIN_ID 0x10000
 
 
-/*
- * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
- * provided in the config rom. Most devices do provide a value, which
- * we'll use for login management orbs, but with some sane limits.
- */
-#define SBP2_MIN_LOGIN_ORB_TIMEOUT	5000U	/* Timeout in ms */
-#define SBP2_MAX_LOGIN_ORB_TIMEOUT	40000U	/* Timeout in ms */
-#define SBP2_ORB_TIMEOUT		2000U	/* Timeout in ms */
+#define SBP2_ORB_TIMEOUT		2000U		/* Timeout in ms */
 #define SBP2_ORB_NULL			0x80000000
 #define SBP2_ORB_NULL			0x80000000
 #define SBP2_RETRY_LIMIT		0xf		/* 15 retries */
 #define SBP2_RETRY_LIMIT		0xf		/* 15 retries */
 #define SBP2_CYCLE_LIMIT		(0xc8 << 12)	/* 200 125us cycles */
 #define SBP2_CYCLE_LIMIT		(0xc8 << 12)	/* 200 125us cycles */
@@ -1034,7 +1027,6 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
 {
 {
 	struct fw_csr_iterator ci;
 	struct fw_csr_iterator ci;
 	int key, value;
 	int key, value;
-	unsigned int timeout;
 
 
 	fw_csr_iterator_init(&ci, directory);
 	fw_csr_iterator_init(&ci, directory);
 	while (fw_csr_iterator_next(&ci, &key, &value)) {
 	while (fw_csr_iterator_next(&ci, &key, &value)) {
@@ -1059,17 +1051,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
 
 
 		case SBP2_CSR_UNIT_CHARACTERISTICS:
 		case SBP2_CSR_UNIT_CHARACTERISTICS:
 			/* the timeout value is stored in 500ms units */
 			/* the timeout value is stored in 500ms units */
-			timeout = ((unsigned int) value >> 8 & 0xff) * 500;
-			timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
-			tgt->mgt_orb_timeout =
-				  min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
-
-			if (timeout > tgt->mgt_orb_timeout)
-				fw_notify("%s: config rom contains %ds "
-					  "management ORB timeout, limiting "
-					  "to %ds\n", tgt->bus_id,
-					  timeout / 1000,
-					  tgt->mgt_orb_timeout / 1000);
+			tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500;
 			break;
 			break;
 
 
 		case SBP2_CSR_LOGICAL_UNIT_NUMBER:
 		case SBP2_CSR_LOGICAL_UNIT_NUMBER:
@@ -1087,6 +1069,22 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
 	return 0;
 	return 0;
 }
 }
 
 
+/*
+ * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
+ * provided in the config rom. Most devices do provide a value, which
+ * we'll use for login management orbs, but with some sane limits.
+ */
+static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
+{
+	unsigned int timeout = tgt->mgt_orb_timeout;
+
+	if (timeout > 40000)
+		fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n",
+			  tgt->bus_id, timeout / 1000);
+
+	tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
+}
+
 static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
 static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
 				  u32 firmware_revision)
 				  u32 firmware_revision)
 {
 {
@@ -1171,6 +1169,7 @@ static int sbp2_probe(struct device *dev)
 			       &firmware_revision) < 0)
 			       &firmware_revision) < 0)
 		goto fail_tgt_put;
 		goto fail_tgt_put;
 
 
+	sbp2_clamp_management_orb_timeout(tgt);
 	sbp2_init_workarounds(tgt, model, firmware_revision);
 	sbp2_init_workarounds(tgt, model, firmware_revision);
 
 
 	/*
 	/*

+ 1 - 1
drivers/hid/hid-core.c

@@ -1066,7 +1066,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
  * @type: HID report type (HID_*_REPORT)
  * @type: HID report type (HID_*_REPORT)
  * @data: report contents
  * @data: report contents
  * @size: size of data parameter
  * @size: size of data parameter
- * @interrupt: called from atomic?
+ * @interrupt: distinguish between interrupt and control transfers
  *
  *
  * This is data entry for lower layers.
  * This is data entry for lower layers.
  */
  */

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно