فهرست منبع

Merge commit 'v2.6.36-rc5' into perf/core

Merge reason: Pick up the latest fixes in -rc5.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Ingo Molnar 14 سال پیش
والد
کامیت
7ed569206e
100فایلهای تغییر یافته به همراه1122 افزوده شده و 499 حذف شده
  1. 3 4
      Documentation/hwmon/sysfs-interface
  2. 1 1
      Documentation/power/regulator/overview.txt
  3. 380 0
      Documentation/workqueue.txt
  4. 7 4
      MAINTAINERS
  5. 1 1
      Makefile
  6. 2 0
      arch/alpha/include/asm/cacheflush.h
  7. 5 1
      arch/alpha/include/asm/unistd.h
  8. 22 39
      arch/alpha/kernel/entry.S
  9. 7 5
      arch/alpha/kernel/err_ev6.c
  10. 17 16
      arch/alpha/kernel/err_marvel.c
  11. 20 15
      arch/alpha/kernel/err_titan.c
  12. 2 7
      arch/alpha/kernel/osf_sys.c
  13. 1 1
      arch/alpha/kernel/pci-sysfs.c
  14. 3 38
      arch/alpha/kernel/signal.c
  15. 1 1
      arch/alpha/kernel/srm_env.c
  16. 3 0
      arch/alpha/kernel/systbls.S
  17. 5 5
      arch/alpha/kernel/time.c
  18. 0 3
      arch/alpha/kernel/traps.c
  19. 2 0
      arch/arm/kernel/entry-common.S
  20. 2 1
      arch/arm/mach-s3c64xx/dev-spi.c
  21. 52 52
      arch/arm/mach-s3c64xx/mach-real6410.c
  22. 19 1
      arch/arm/mach-s5pv210/clock.c
  23. 1 1
      arch/arm/mach-s5pv210/cpu.c
  24. 8 1
      arch/arm/plat-s5p/dev-fimc0.c
  25. 8 1
      arch/arm/plat-s5p/dev-fimc1.c
  26. 8 1
      arch/arm/plat-s5p/dev-fimc2.c
  27. 4 3
      arch/arm/plat-samsung/gpio-config.c
  28. 5 5
      arch/arm/plat-samsung/include/plat/gpio-cfg.h
  29. 31 20
      arch/frv/kernel/signal.c
  30. 1 1
      arch/ia64/include/asm/compat.h
  31. 11 31
      arch/ia64/kernel/fsys.S
  32. 1 1
      arch/mips/include/asm/compat.h
  33. 11 11
      arch/mn10300/kernel/mn10300-serial.c
  34. 1 1
      arch/parisc/include/asm/compat.h
  35. 1 1
      arch/powerpc/include/asm/compat.h
  36. 1 1
      arch/s390/include/asm/compat.h
  37. 1 1
      arch/sparc/include/asm/compat.h
  38. 3 0
      arch/tile/include/arch/chip_tile64.h
  39. 3 0
      arch/tile/include/arch/chip_tilepro.h
  40. 4 3
      arch/tile/include/asm/compat.h
  41. 4 4
      arch/tile/include/asm/io.h
  42. 12 0
      arch/tile/include/asm/processor.h
  43. 6 9
      arch/tile/include/asm/ptrace.h
  44. 13 5
      arch/tile/include/asm/sigcontext.h
  45. 1 0
      arch/tile/include/asm/signal.h
  46. 13 8
      arch/tile/include/asm/syscalls.h
  47. 20 10
      arch/tile/kernel/process.c
  48. 15 12
      arch/tile/kernel/signal.c
  49. 1 1
      arch/tile/kernel/stack.c
  50. 1 1
      arch/x86/Makefile
  51. 14 8
      arch/x86/ia32/ia32entry.S
  52. 1 1
      arch/x86/include/asm/compat.h
  53. 2 2
      arch/x86/include/asm/cpufeature.h
  54. 0 1
      arch/x86/include/asm/hpet.h
  55. 3 3
      arch/x86/kernel/apic/x2apic_uv_x.c
  56. 0 18
      arch/x86/kernel/early-quirks.c
  57. 17 14
      arch/x86/kernel/hpet.c
  58. 1 1
      drivers/Makefile
  59. 79 6
      drivers/dca/dca-core.c
  60. 1 0
      drivers/firewire/ohci.c
  61. 5 5
      drivers/gpu/drm/drm_crtc_helper.c
  62. 4 0
      drivers/gpu/drm/drm_pci.c
  63. 5 0
      drivers/gpu/drm/drm_platform.c
  64. 1 1
      drivers/gpu/drm/drm_sysfs.c
  65. 5 1
      drivers/gpu/drm/i915/intel_crt.c
  66. 1 1
      drivers/gpu/drm/i915/intel_dp.c
  67. 2 1
      drivers/gpu/drm/i915/intel_dvo.c
  68. 1 1
      drivers/gpu/drm/i915/intel_hdmi.c
  69. 5 2
      drivers/gpu/drm/i915/intel_lvds.c
  70. 3 2
      drivers/gpu/drm/i915/intel_sdvo.c
  71. 5 6
      drivers/gpu/drm/i915/intel_tv.c
  72. 3 3
      drivers/gpu/drm/nouveau/nouveau_connector.c
  73. 3 2
      drivers/gpu/drm/radeon/atombios_crtc.c
  74. 19 8
      drivers/gpu/drm/radeon/evergreen.c
  75. 7 17
      drivers/gpu/drm/radeon/r100.c
  76. 25 0
      drivers/gpu/drm/radeon/r600_blit_kms.c
  77. 24 0
      drivers/gpu/drm/radeon/r600_blit_shaders.h
  78. 2 3
      drivers/gpu/drm/radeon/r600_cs.c
  79. 47 0
      drivers/gpu/drm/radeon/radeon_combios.c
  80. 10 5
      drivers/gpu/drm/radeon/radeon_connectors.c
  81. 5 4
      drivers/gpu/drm/radeon/radeon_display.c
  82. 2 1
      drivers/gpu/drm/radeon/radeon_mode.h
  83. 4 3
      drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
  84. 3 1
      drivers/hid/hid-core.c
  85. 4 0
      drivers/hid/hid-ids.h
  86. 1 0
      drivers/hid/hid-mosart.c
  87. 1 0
      drivers/hid/hid-topseed.c
  88. 7 1
      drivers/hid/usbhid/hid-core.c
  89. 4 0
      drivers/hid/usbhid/hid-quirks.c
  90. 1 1
      drivers/hid/usbhid/hiddev.c
  91. 1 0
      drivers/hid/usbhid/usbhid.h
  92. 24 19
      drivers/hwmon/adm1031.c
  93. 0 1
      drivers/hwmon/emc1403.c
  94. 3 3
      drivers/hwmon/f75375s.c
  95. 2 2
      drivers/hwmon/lis3lv02d_i2c.c
  96. 2 2
      drivers/hwmon/lis3lv02d_spi.c
  97. 11 10
      drivers/hwmon/lm95241.c
  98. 1 0
      drivers/hwmon/w83627ehf.c
  99. 3 9
      drivers/ide/ide-probe.c
  100. 4 2
      drivers/md/md.c

+ 3 - 4
Documentation/hwmon/sysfs-interface

@@ -91,12 +91,11 @@ name		The chip name.
 		I2C devices get this attribute created automatically.
 		RO
 
-update_rate	The rate at which the chip will update readings.
+update_interval	The interval at which the chip will update readings.
 		Unit: millisecond
 		RW
-		Some devices have a variable update rate. This attribute
-		can be used to change the update rate to the desired
-		frequency.
+		Some devices have a variable update rate or interval.
+		This attribute can be used to change it to the desired value.
 
 
 ************

+ 1 - 1
Documentation/power/regulator/overview.txt

@@ -13,7 +13,7 @@ regulators (where voltage output is controllable) and current sinks (where
 current limit is controllable).
 
 (C) 2008  Wolfson Microelectronics PLC.
-Author: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Author: Liam Girdwood <lrg@slimlogic.co.uk>
 
 
 Nomenclature

+ 380 - 0
Documentation/workqueue.txt

@@ -0,0 +1,380 @@
+
+Concurrency Managed Workqueue (cmwq)
+
+September, 2010		Tejun Heo <tj@kernel.org>
+			Florian Mickler <florian@mickler.org>
+
+CONTENTS
+
+1. Introduction
+2. Why cmwq?
+3. The Design
+4. Application Programming Interface (API)
+5. Example Execution Scenarios
+6. Guidelines
+
+
+1. Introduction
+
+There are many cases where an asynchronous process execution context
+is needed and the workqueue (wq) API is the most commonly used
+mechanism for such cases.
+
+When such an asynchronous execution context is needed, a work item
+describing which function to execute is put on a queue.  An
+independent thread serves as the asynchronous execution context.  The
+queue is called workqueue and the thread is called worker.
+
+While there are work items on the workqueue the worker executes the
+functions associated with the work items one after the other.  When
+there is no work item left on the workqueue the worker becomes idle.
+When a new work item gets queued, the worker begins executing again.
+
+
+2. Why cmwq?
+
+In the original wq implementation, a multi threaded (MT) wq had one
+worker thread per CPU and a single threaded (ST) wq had one worker
+thread system-wide.  A single MT wq needed to keep around the same
+number of workers as the number of CPUs.  The kernel grew a lot of MT
+wq users over the years and with the number of CPU cores continuously
+rising, some systems saturated the default 32k PID space just booting
+up.
+
+Although MT wq wasted a lot of resource, the level of concurrency
+provided was unsatisfactory.  The limitation was common to both ST and
+MT wq albeit less severe on MT.  Each wq maintained its own separate
+worker pool.  A MT wq could provide only one execution context per CPU
+while a ST wq one for the whole system.  Work items had to compete for
+those very limited execution contexts leading to various problems
+including proneness to deadlocks around the single execution context.
+
+The tension between the provided level of concurrency and resource
+usage also forced its users to make unnecessary tradeoffs like libata
+choosing to use ST wq for polling PIOs and accepting an unnecessary
+limitation that no two polling PIOs can progress at the same time.  As
+MT wq don't provide much better concurrency, users which require
+higher level of concurrency, like async or fscache, had to implement
+their own thread pool.
+
+Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
+focus on the following goals.
+
+* Maintain compatibility with the original workqueue API.
+
+* Use per-CPU unified worker pools shared by all wq to provide
+  flexible level of concurrency on demand without wasting a lot of
+  resource.
+
+* Automatically regulate worker pool and level of concurrency so that
+  the API users don't need to worry about such details.
+
+
+3. The Design
+
+In order to ease the asynchronous execution of functions a new
+abstraction, the work item, is introduced.
+
+A work item is a simple struct that holds a pointer to the function
+that is to be executed asynchronously.  Whenever a driver or subsystem
+wants a function to be executed asynchronously it has to set up a work
+item pointing to that function and queue that work item on a
+workqueue.
+
+Special purpose threads, called worker threads, execute the functions
+off of the queue, one after the other.  If no work is queued, the
+worker threads become idle.  These worker threads are managed in so
+called thread-pools.
+
+The cmwq design differentiates between the user-facing workqueues that
+subsystems and drivers queue work items on and the backend mechanism
+which manages thread-pool and processes the queued work items.
+
+The backend is called gcwq.  There is one gcwq for each possible CPU
+and one gcwq to serve work items queued on unbound workqueues.
+
+Subsystems and drivers can create and queue work items through special
+workqueue API functions as they see fit. They can influence some
+aspects of the way the work items are executed by setting flags on the
+workqueue they are putting the work item on. These flags include
+things like CPU locality, reentrancy, concurrency limits and more. To
+get a detailed overview refer to the API description of
+alloc_workqueue() below.
+
+When a work item is queued to a workqueue, the target gcwq is
+determined according to the queue parameters and workqueue attributes
+and appended on the shared worklist of the gcwq.  For example, unless
+specifically overridden, a work item of a bound workqueue will be
+queued on the worklist of exactly that gcwq that is associated to the
+CPU the issuer is running on.
+
+For any worker pool implementation, managing the concurrency level
+(how many execution contexts are active) is an important issue.  cmwq
+tries to keep the concurrency at a minimal but sufficient level.
+Minimal to save resources and sufficient in that the system is used at
+its full capacity.
+
+Each gcwq bound to an actual CPU implements concurrency management by
+hooking into the scheduler.  The gcwq is notified whenever an active
+worker wakes up or sleeps and keeps track of the number of the
+currently runnable workers.  Generally, work items are not expected to
+hog a CPU and consume many cycles.  That means maintaining just enough
+concurrency to prevent work processing from stalling should be
+optimal.  As long as there are one or more runnable workers on the
+CPU, the gcwq doesn't start execution of a new work, but, when the
+last running worker goes to sleep, it immediately schedules a new
+worker so that the CPU doesn't sit idle while there are pending work
+items.  This allows using a minimal number of workers without losing
+execution bandwidth.
+
+Keeping idle workers around doesn't cost other than the memory space
+for kthreads, so cmwq holds onto idle ones for a while before killing
+them.
+
+For an unbound wq, the above concurrency management doesn't apply and
+the gcwq for the pseudo unbound CPU tries to start executing all work
+items as soon as possible.  The responsibility of regulating
+concurrency level is on the users.  There is also a flag to mark a
+bound wq to ignore the concurrency management.  Please refer to the
+API section for details.
+
+Forward progress guarantee relies on that workers can be created when
+more execution contexts are necessary, which in turn is guaranteed
+through the use of rescue workers.  All work items which might be used
+on code paths that handle memory reclaim are required to be queued on
+wq's that have a rescue-worker reserved for execution under memory
+pressure.  Else it is possible that the thread-pool deadlocks waiting
+for execution contexts to free up.
+
+
+4. Application Programming Interface (API)
+
+alloc_workqueue() allocates a wq.  The original create_*workqueue()
+functions are deprecated and scheduled for removal.  alloc_workqueue()
+takes three arguments - @name, @flags and @max_active.  @name is the
+name of the wq and also used as the name of the rescuer thread if
+there is one.
+
+A wq no longer manages execution resources but serves as a domain for
+forward progress guarantee, flush and work item attributes.  @flags
+and @max_active control how work items are assigned execution
+resources, scheduled and executed.
+
+@flags:
+
+  WQ_NON_REENTRANT
+
+	By default, a wq guarantees non-reentrance only on the same
+	CPU.  A work item may not be executed concurrently on the same
+	CPU by multiple workers but is allowed to be executed
+	concurrently on multiple CPUs.  This flag makes sure
+	non-reentrance is enforced across all CPUs.  Work items queued
+	to a non-reentrant wq are guaranteed to be executed by at most
+	one worker system-wide at any given time.
+
+  WQ_UNBOUND
+
+	Work items queued to an unbound wq are served by a special
+	gcwq which hosts workers which are not bound to any specific
+	CPU.  This makes the wq behave as a simple execution context
+	provider without concurrency management.  The unbound gcwq
+	tries to start execution of work items as soon as possible.
+	Unbound wq sacrifices locality but is useful for the following
+	cases.
+
+	* Wide fluctuation in the concurrency level requirement is
+	  expected and using bound wq may end up creating large number
+	  of mostly unused workers across different CPUs as the issuer
+	  hops through different CPUs.
+
+	* Long running CPU intensive workloads which can be better
+	  managed by the system scheduler.
+
+  WQ_FREEZEABLE
+
+	A freezeable wq participates in the freeze phase of the system
+	suspend operations.  Work items on the wq are drained and no
+	new work item starts execution until thawed.
+
+  WQ_RESCUER
+
+	All wq which might be used in the memory reclaim paths _MUST_
+	have this flag set.  This reserves one worker exclusively for
+	the execution of this wq under memory pressure.
+
+  WQ_HIGHPRI
+
+	Work items of a highpri wq are queued at the head of the
+	worklist of the target gcwq and start execution regardless of
+	the current concurrency level.  In other words, highpri work
+	items will always start execution as soon as execution
+	resource is available.
+
+	Ordering among highpri work items is preserved - a highpri
+	work item queued after another highpri work item will start
+	execution after the earlier highpri work item starts.
+
+	Although highpri work items are not held back by other
+	runnable work items, they still contribute to the concurrency
+	level.  Highpri work items in runnable state will prevent
+	non-highpri work items from starting execution.
+
+	This flag is meaningless for unbound wq.
+
+  WQ_CPU_INTENSIVE
+
+	Work items of a CPU intensive wq do not contribute to the
+	concurrency level.  In other words, runnable CPU intensive
+	work items will not prevent other work items from starting
+	execution.  This is useful for bound work items which are
+	expected to hog CPU cycles so that their execution is
+	regulated by the system scheduler.
+
+	Although CPU intensive work items don't contribute to the
+	concurrency level, start of their executions is still
+	regulated by the concurrency management and runnable
+	non-CPU-intensive work items can delay execution of CPU
+	intensive work items.
+
+	This flag is meaningless for unbound wq.
+
+  WQ_HIGHPRI | WQ_CPU_INTENSIVE
+
+	This combination makes the wq avoid interaction with
+	concurrency management completely and behave as a simple
+	per-CPU execution context provider.  Work items queued on a
+	highpri CPU-intensive wq start execution as soon as resources
+	are available and don't affect execution of other work items.
+
+@max_active:
+
+@max_active determines the maximum number of execution contexts per
+CPU which can be assigned to the work items of a wq.  For example,
+with @max_active of 16, at most 16 work items of the wq can be
+executing at the same time per CPU.
+
+Currently, for a bound wq, the maximum limit for @max_active is 512
+and the default value used when 0 is specified is 256.  For an unbound
+wq, the limit is higher of 512 and 4 * num_possible_cpus().  These
+values are chosen sufficiently high such that they are not the
+limiting factor while providing protection in runaway cases.
+
+The number of active work items of a wq is usually regulated by the
+users of the wq, more specifically, by how many work items the users
+may queue at the same time.  Unless there is a specific need for
+throttling the number of active work items, specifying '0' is
+recommended.
+
+Some users depend on the strict execution ordering of ST wq.  The
+combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
+behavior.  Work items on such wq are always queued to the unbound gcwq
+and only one work item can be active at any given time thus achieving
+the same ordering property as ST wq.
+
+
+5. Example Execution Scenarios
+
+The following example execution scenarios try to illustrate how cmwq
+behave under different configurations.
+
+ Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
+ w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
+ again before finishing.  w1 and w2 burn CPU for 5ms then sleep for
+ 10ms.
+
+Ignoring all other tasks, works and processing overhead, and assuming
+simple FIFO scheduling, the following is one highly simplified version
+of possible sequences of events with the original wq.
+
+ TIME IN MSECS	EVENT
+ 0		w0 starts and burns CPU
+ 5		w0 sleeps
+ 15		w0 wakes up and burns CPU
+ 20		w0 finishes
+ 20		w1 starts and burns CPU
+ 25		w1 sleeps
+ 35		w1 wakes up and finishes
+ 35		w2 starts and burns CPU
+ 40		w2 sleeps
+ 50		w2 wakes up and finishes
+
+And with cmwq with @max_active >= 3,
+
+ TIME IN MSECS	EVENT
+ 0		w0 starts and burns CPU
+ 5		w0 sleeps
+ 5		w1 starts and burns CPU
+ 10		w1 sleeps
+ 10		w2 starts and burns CPU
+ 15		w2 sleeps
+ 15		w0 wakes up and burns CPU
+ 20		w0 finishes
+ 20		w1 wakes up and finishes
+ 25		w2 wakes up and finishes
+
+If @max_active == 2,
+
+ TIME IN MSECS	EVENT
+ 0		w0 starts and burns CPU
+ 5		w0 sleeps
+ 5		w1 starts and burns CPU
+ 10		w1 sleeps
+ 15		w0 wakes up and burns CPU
+ 20		w0 finishes
+ 20		w1 wakes up and finishes
+ 20		w2 starts and burns CPU
+ 25		w2 sleeps
+ 35		w2 wakes up and finishes
+
+Now, let's assume w1 and w2 are queued to a different wq q1 which has
+WQ_HIGHPRI set,
+
+ TIME IN MSECS	EVENT
+ 0		w1 and w2 start and burn CPU
+ 5		w1 sleeps
+ 10		w2 sleeps
+ 10		w0 starts and burns CPU
+ 15		w0 sleeps
+ 15		w1 wakes up and finishes
+ 20		w2 wakes up and finishes
+ 25		w0 wakes up and burns CPU
+ 30		w0 finishes
+
+If q1 has WQ_CPU_INTENSIVE set,
+
+ TIME IN MSECS	EVENT
+ 0		w0 starts and burns CPU
+ 5		w0 sleeps
+ 5		w1 and w2 start and burn CPU
+ 10		w1 sleeps
+ 15		w2 sleeps
+ 15		w0 wakes up and burns CPU
+ 20		w0 finishes
+ 20		w1 wakes up and finishes
+ 25		w2 wakes up and finishes
+
+
+6. Guidelines
+
+* Do not forget to use WQ_RESCUER if a wq may process work items which
+  are used during memory reclaim.  Each wq with WQ_RESCUER set has one
+  rescuer thread reserved for it.  If there is dependency among
+  multiple work items used during memory reclaim, they should be
+  queued to separate wq each with WQ_RESCUER.
+
+* Unless strict ordering is required, there is no need to use ST wq.
+
+* Unless there is a specific need, using 0 for @max_active is
+  recommended.  In most use cases, concurrency level usually stays
+  well under the default limit.
+
+* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
+  flush and work item attributes.  Work items which are not involved
+  in memory reclaim and don't need to be flushed as a part of a group
+  of work items, and don't require any special attribute, can use one
+  of the system wq.  There is no difference in execution
+  characteristics between using a dedicated wq and a system wq.
+
+* Unless work items are expected to consume a huge amount of CPU
+  cycles, using a bound wq is usually beneficial due to the increased
+  level of locality in wq operations and work item execution.

+ 7 - 4
MAINTAINERS

@@ -1135,7 +1135,7 @@ ATLX ETHERNET DRIVERS
 M:	Jay Cliburn <jcliburn@gmail.com>
 M:	Chris Snook <chris.snook@gmail.com>
 M:	Jie Yang <jie.yang@atheros.com>
-L:	atl1-devel@lists.sourceforge.net
+L:	netdev@vger.kernel.org
 W:	http://sourceforge.net/projects/atl1
 W:	http://atl1.sourceforge.net
 S:	Maintained
@@ -2657,9 +2657,12 @@ S:	Maintained
 F:	drivers/media/video/gspca/
 
 HARDWARE MONITORING
+M:	Jean Delvare <khali@linux-fr.org>
+M:	Guenter Roeck <guenter.roeck@ericsson.com>
 L:	lm-sensors@lm-sensors.org
 W:	http://www.lm-sensors.org/
-S:	Orphan
+T:	quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+S:	Maintained
 F:	Documentation/hwmon/
 F:	drivers/hwmon/
 F:	include/linux/hwmon*.h
@@ -3939,7 +3942,7 @@ F:	drivers/char/isicom.c
 F:	include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M:	Felipe Balbi <felipe.balbi@nokia.com>
+M:	Felipe Balbi <balbi@ti.com>
 L:	linux-usb@vger.kernel.org
 T:	git git://gitorious.org/usb/usb.git
 S:	Maintained
@@ -4237,7 +4240,7 @@ S:	Maintained
 F:	drivers/char/hw_random/omap-rng.c
 
 OMAP USB SUPPORT
-M:	Felipe Balbi <felipe.balbi@nokia.com>
+M:	Felipe Balbi <balbi@ti.com>
 M:	David Brownell <dbrownell@users.sourceforge.net>
 L:	linux-usb@vger.kernel.org
 L:	linux-omap@vger.kernel.org

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 36
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Sheep on Meth
 
 # *DOCUMENTATION*

+ 2 - 0
arch/alpha/include/asm/cacheflush.h

@@ -43,6 +43,8 @@ extern void smp_imb(void);
 /* ??? Ought to use this in arch/alpha/kernel/signal.c too.  */
 
 #ifndef CONFIG_SMP
+#include <linux/sched.h>
+
 extern void __load_new_mm_context(struct mm_struct *);
 static inline void
 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,

+ 5 - 1
arch/alpha/include/asm/unistd.h

@@ -449,10 +449,13 @@
 #define __NR_pwritev			491
 #define __NR_rt_tgsigqueueinfo		492
 #define __NR_perf_event_open		493
+#define __NR_fanotify_init		494
+#define __NR_fanotify_mark		495
+#define __NR_prlimit64			496
 
 #ifdef __KERNEL__
 
-#define NR_SYSCALLS			494
+#define NR_SYSCALLS			497
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
@@ -463,6 +466,7 @@
 #define __ARCH_WANT_SYS_OLD_GETRLIMIT
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 /* "Conditional" syscalls.  What we want is
 

+ 22 - 39
arch/alpha/kernel/entry.S

@@ -317,14 +317,14 @@ ret_from_sys_call:
 	ldq	$0, SP_OFF($sp)
 	and	$0, 8, $0
 	beq	$0, restore_all
-ret_from_reschedule:
+ret_to_user:
 	/* Make sure need_resched and sigpending don't change between
 		sampling and the rti.  */
 	lda	$16, 7
 	call_pal PAL_swpipl
 	ldl	$5, TI_FLAGS($8)
 	and	$5, _TIF_WORK_MASK, $2
-	bne	$5, work_pending
+	bne	$2, work_pending
 restore_all:
 	RESTORE_ALL
 	call_pal PAL_rti
@@ -363,7 +363,7 @@ $ret_success:
  *       $8: current.
  *      $19: The old syscall number, or zero if this is not a return
  *           from a syscall that errored and is possibly restartable.
- *      $20: Error indication.
+ *      $20: The old a3 value
  */
 
 	.align	4
@@ -392,12 +392,18 @@ $work_resched:
 
 $work_notifysig:
 	mov	$sp, $16
-	br	$1, do_switch_stack
+	bsr	$1, do_switch_stack
 	mov	$sp, $17
 	mov	$5, $18
+	mov	$19, $9		/* save old syscall number */
+	mov	$20, $10	/* save old a3 */
+	and	$5, _TIF_SIGPENDING, $2
+	cmovne	$2, 0, $9	/* we don't want double syscall restarts */
 	jsr	$26, do_notify_resume
+	mov	$9, $19
+	mov	$10, $20
 	bsr	$1, undo_switch_stack
-	br	restore_all
+	br	ret_to_user
 .end work_pending
 
 /*
@@ -430,6 +436,7 @@ strace:
 	beq	$1, 1f
 	ldq	$27, 0($2)
 1:	jsr	$26, ($27), sys_gettimeofday
+ret_from_straced:
 	ldgp	$gp, 0($26)
 
 	/* check return.. */
@@ -757,11 +764,15 @@ sys_vfork:
 	.ent	sys_sigreturn
 sys_sigreturn:
 	.prologue 0
+	lda	$9, ret_from_straced
+	cmpult	$26, $9, $9
 	mov	$sp, $17
 	lda	$18, -SWITCH_STACK_SIZE($sp)
 	lda	$sp, -SWITCH_STACK_SIZE($sp)
 	jsr	$26, do_sigreturn
-	br	$1, undo_switch_stack
+	bne	$9, 1f
+	jsr	$26, syscall_trace
+1:	br	$1, undo_switch_stack
 	br	ret_from_sys_call
 .end sys_sigreturn
 
@@ -770,46 +781,18 @@ sys_sigreturn:
 	.ent	sys_rt_sigreturn
 sys_rt_sigreturn:
 	.prologue 0
+	lda	$9, ret_from_straced
+	cmpult	$26, $9, $9
 	mov	$sp, $17
 	lda	$18, -SWITCH_STACK_SIZE($sp)
 	lda	$sp, -SWITCH_STACK_SIZE($sp)
 	jsr	$26, do_rt_sigreturn
-	br	$1, undo_switch_stack
+	bne	$9, 1f
+	jsr	$26, syscall_trace
+1:	br	$1, undo_switch_stack
 	br	ret_from_sys_call
 .end sys_rt_sigreturn
 
-	.align	4
-	.globl	sys_sigsuspend
-	.ent	sys_sigsuspend
-sys_sigsuspend:
-	.prologue 0
-	mov	$sp, $17
-	br	$1, do_switch_stack
-	mov	$sp, $18
-	subq	$sp, 16, $sp
-	stq	$26, 0($sp)
-	jsr	$26, do_sigsuspend
-	ldq	$26, 0($sp)
-	lda	$sp, SWITCH_STACK_SIZE+16($sp)
-	ret
-.end sys_sigsuspend
-
-	.align	4
-	.globl	sys_rt_sigsuspend
-	.ent	sys_rt_sigsuspend
-sys_rt_sigsuspend:
-	.prologue 0
-	mov	$sp, $18
-	br	$1, do_switch_stack
-	mov	$sp, $19
-	subq	$sp, 16, $sp
-	stq	$26, 0($sp)
-	jsr	$26, do_rt_sigsuspend
-	ldq	$26, 0($sp)
-	lda	$sp, SWITCH_STACK_SIZE+16($sp)
-	ret
-.end sys_rt_sigsuspend
-
 	.align	4
 	.globl	sys_sethae
 	.ent	sys_sethae

+ 7 - 5
arch/alpha/kernel/err_ev6.c

@@ -90,11 +90,13 @@ static int
 ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, 
 	       u64 c_stat, u64 c_sts, int print)
 {
-	char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN",
-			       "MEMORY", "BCACHE", "DCACHE", 
-			       "BCACHE PROBE", "BCACHE PROBE" };
-	char *streamname[] = { "D", "I" };
-	char *bitsname[] = { "SINGLE", "DOUBLE" };
+	static const char * const sourcename[] = {
+		"UNKNOWN", "UNKNOWN", "UNKNOWN",
+		"MEMORY", "BCACHE", "DCACHE",
+		"BCACHE PROBE", "BCACHE PROBE"
+	};
+	static const char * const streamname[] = { "D", "I" };
+	static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
 	int status = MCHK_DISPOSITION_REPORT;
 	int source = -1, stream = -1, bits = -1;
 

+ 17 - 16
arch/alpha/kernel/err_marvel.c

@@ -589,22 +589,23 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
 static void
 marvel_print_pox_trans_sum(u64 trans_sum)
 {
-	char *pcix_cmd[] = { "Interrupt Acknowledge",
-			     "Special Cycle",
-			     "I/O Read",
-			     "I/O Write",
-			     "Reserved",
-			     "Reserved / Device ID Message",
-			     "Memory Read",
-			     "Memory Write",
-			     "Reserved / Alias to Memory Read Block",
-			     "Reserved / Alias to Memory Write Block",
-			     "Configuration Read",
-			     "Configuration Write",
-			     "Memory Read Multiple / Split Completion",
-			     "Dual Address Cycle",
-			     "Memory Read Line / Memory Read Block",
-			     "Memory Write and Invalidate / Memory Write Block"
+	static const char * const pcix_cmd[] = {
+		"Interrupt Acknowledge",
+		"Special Cycle",
+		"I/O Read",
+		"I/O Write",
+		"Reserved",
+		"Reserved / Device ID Message",
+		"Memory Read",
+		"Memory Write",
+		"Reserved / Alias to Memory Read Block",
+		"Reserved / Alias to Memory Write Block",
+		"Configuration Read",
+		"Configuration Write",
+		"Memory Read Multiple / Split Completion",
+		"Dual Address Cycle",
+		"Memory Read Line / Memory Read Block",
+		"Memory Write and Invalidate / Memory Write Block"
 	};
 
 #define IO7__POX_TRANSUM__PCI_ADDR__S		(0)

+ 20 - 15
arch/alpha/kernel/err_titan.c

@@ -75,8 +75,12 @@ titan_parse_p_serror(int which, u64 serror, int print)
 	int status = MCHK_DISPOSITION_REPORT;
 
 #ifdef CONFIG_VERBOSE_MCHECK
-	char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"};
-	char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"};
+	static const char * const serror_src[] = {
+		"GPCI", "APCI", "AGP HP", "AGP LP"
+	};
+	static const char * const serror_cmd[] = {
+		"DMA Read", "DMA RMW", "SGTE Read", "Reserved"
+	};
 #endif /* CONFIG_VERBOSE_MCHECK */
 
 #define TITAN__PCHIP_SERROR__LOST_UECC	(1UL << 0)
@@ -140,14 +144,15 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
 	int status = MCHK_DISPOSITION_REPORT;
 
 #ifdef CONFIG_VERBOSE_MCHECK
-	char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle",
-			       "I/O Read",	       	"I/O Write",
-			       "Reserved",	       	"Reserved",
-			       "Memory Read",		"Memory Write",
-			       "Reserved",		"Reserved",
-			       "Configuration Read",	"Configuration Write",
-			       "Memory Read Multiple",	"Dual Address Cycle",
-			       "Memory Read Line","Memory Write and Invalidate"
+	static const char * const perror_cmd[] = {
+		"Interrupt Acknowledge", "Special Cycle",
+		"I/O Read",		"I/O Write",
+		"Reserved",		"Reserved",
+		"Memory Read",		"Memory Write",
+		"Reserved",		"Reserved",
+		"Configuration Read",	"Configuration Write",
+		"Memory Read Multiple",	"Dual Address Cycle",
+		"Memory Read Line",	"Memory Write and Invalidate"
 	};
 #endif /* CONFIG_VERBOSE_MCHECK */
 
@@ -273,11 +278,11 @@ titan_parse_p_agperror(int which, u64 agperror, int print)
 	int cmd, len;
 	unsigned long addr;
 
-	char *agperror_cmd[] = { "Read (low-priority)",	"Read (high-priority)",
-				 "Write (low-priority)",
-				 "Write (high-priority)",
-				 "Reserved",		"Reserved",
-				 "Flush",		"Fence"
+	static const char * const agperror_cmd[] = {
+		"Read (low-priority)",	"Read (high-priority)",
+		"Write (low-priority)",	"Write (high-priority)",
+		"Reserved",		"Reserved",
+		"Flush",		"Fence"
 	};
 #endif /* CONFIG_VERBOSE_MCHECK */
 

+ 2 - 7
arch/alpha/kernel/osf_sys.c

@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/stddef.h>
 #include <linux/syscalls.h>
 #include <linux/unistd.h>
@@ -69,7 +68,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
 {
 	struct mm_struct *mm;
 
-	lock_kernel();
 	mm = current->mm;
 	mm->end_code = bss_start + bss_len;
 	mm->start_brk = bss_start + bss_len;
@@ -78,7 +76,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
 	printk("set_program_attributes(%lx %lx %lx %lx)\n",
 		text_start, text_len, bss_start, bss_len);
 #endif
-	unlock_kernel();
 	return 0;
 }
 
@@ -517,7 +514,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
 	long error;
 	int __user *min_buf_size_ptr;
 
-	lock_kernel();
 	switch (code) {
 	case PL_SET:
 		if (get_user(error, &args->set.nbytes))
@@ -547,7 +543,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
 		error = -EOPNOTSUPP;
 		break;
 	};
-	unlock_kernel();
 	return error;
 }
 
@@ -594,7 +589,7 @@ SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
 
 SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
 {
-	char *sysinfo_table[] = {
+	const char *sysinfo_table[] = {
 		utsname()->sysname,
 		utsname()->nodename,
 		utsname()->release,
@@ -606,7 +601,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
 		"dummy",	/* secure RPC domain */
 	};
 	unsigned long offset;
-	char *res;
+	const char *res;
 	long len, err = -EINVAL;
 
 	offset = command-1;

+ 1 - 1
arch/alpha/kernel/pci-sysfs.c

@@ -66,7 +66,7 @@ static int pci_mmap_resource(struct kobject *kobj,
 {
 	struct pci_dev *pdev = to_pci_dev(container_of(kobj,
 						       struct device, kobj));
-	struct resource *res = (struct resource *)attr->private;
+	struct resource *res = attr->private;
 	enum pci_mmap_state mmap_type;
 	struct pci_bus_region bar;
 	int i;

+ 3 - 38
arch/alpha/kernel/signal.c

@@ -144,8 +144,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
-asmlinkage int
-do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
+SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
 {
 	mask &= _BLOCKABLE;
 	spin_lock_irq(&current->sighand->siglock);
@@ -154,41 +153,6 @@ do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 
-	/* Indicate EINTR on return from any possible signal handler,
-	   which will not come back through here, but via sigreturn.  */
-	regs->r0 = EINTR;
-	regs->r19 = 1;
-
-	current->state = TASK_INTERRUPTIBLE;
-	schedule();
-	set_thread_flag(TIF_RESTORE_SIGMASK);
-	return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
-		 struct pt_regs *regs, struct switch_stack *sw)
-{
-	sigset_t set;
-
-	/* XXX: Don't preclude handling different sized sigset_t's.  */
-	if (sigsetsize != sizeof(sigset_t))
-		return -EINVAL;
-	if (copy_from_user(&set, uset, sizeof(set)))
-		return -EFAULT;
-
-	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->saved_sigmask = current->blocked;
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
-	/* Indicate EINTR on return from any possible signal handler,
-	   which will not come back through here, but via sigreturn.  */
-	regs->r0 = EINTR;
-	regs->r19 = 1;
-
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
 	set_thread_flag(TIF_RESTORE_SIGMASK);
@@ -239,6 +203,8 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
 	unsigned long usp;
 	long i, err = __get_user(regs->pc, &sc->sc_pc);
 
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
 	sw->r26 = (unsigned long) ret_from_sys_call;
 
 	err |= __get_user(regs->r0, sc->sc_regs+0);
@@ -591,7 +557,6 @@ syscall_restart(unsigned long r0, unsigned long r19,
 		regs->pc -= 4;
 		break;
 	case ERESTART_RESTARTBLOCK:
-		current_thread_info()->restart_block.fn = do_no_restart_syscall;
 		regs->r0 = EINTR;
 		break;
 	}

+ 1 - 1
arch/alpha/kernel/srm_env.c

@@ -87,7 +87,7 @@ static int srm_env_proc_show(struct seq_file *m, void *v)
 	srm_env_t	*entry;
 	char		*page;
 
-	entry = (srm_env_t *)m->private;
+	entry = m->private;
 	page = (char *)__get_free_page(GFP_USER);
 	if (!page)
 		return -ENOMEM;

+ 3 - 0
arch/alpha/kernel/systbls.S

@@ -512,6 +512,9 @@ sys_call_table:
 	.quad sys_pwritev
 	.quad sys_rt_tgsigqueueinfo
 	.quad sys_perf_event_open
+	.quad sys_fanotify_init
+	.quad sys_fanotify_mark				/* 495 */
+	.quad sys_prlimit64
 
 	.size sys_call_table, . - sys_call_table
 	.type sys_call_table, @object

+ 5 - 5
arch/alpha/kernel/time.c

@@ -191,16 +191,16 @@ irqreturn_t timer_interrupt(int irq, void *dev)
 
 	write_sequnlock(&xtime_lock);
 
-#ifndef CONFIG_SMP
-	while (nticks--)
-		update_process_times(user_mode(get_irq_regs()));
-#endif
-
 	if (test_perf_event_pending()) {
 		clear_perf_event_pending();
 		perf_event_do_pending();
 	}
 
+#ifndef CONFIG_SMP
+	while (nticks--)
+		update_process_times(user_mode(get_irq_regs()));
+#endif
+
 	return IRQ_HANDLED;
 }
 

+ 0 - 3
arch/alpha/kernel/traps.c

@@ -13,7 +13,6 @@
 #include <linux/sched.h>
 #include <linux/tty.h>
 #include <linux/delay.h>
-#include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kallsyms.h>
@@ -623,7 +622,6 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
 		return;
 	}
 
-	lock_kernel();
 	printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
 		pc, va, opcode, reg);
 	do_exit(SIGSEGV);
@@ -646,7 +644,6 @@ got_exception:
 	 * Yikes!  No one to forward the exception to.
 	 * Since the registers are in a weird format, dump them ourselves.
  	 */
-	lock_kernel();
 
 	printk("%s(%d): unhandled unaligned exception\n",
 	       current->comm, task_pid_nr(current));

+ 2 - 0
arch/arm/kernel/entry-common.S

@@ -418,11 +418,13 @@ ENDPROC(sys_clone_wrapper)
 
 sys_sigreturn_wrapper:
 		add	r0, sp, #S_OFF
+		mov	why, #0		@ prevent syscall restart handling
 		b	sys_sigreturn
 ENDPROC(sys_sigreturn_wrapper)
 
 sys_rt_sigreturn_wrapper:
 		add	r0, sp, #S_OFF
+		mov	why, #0		@ prevent syscall restart handling
 		b	sys_rt_sigreturn
 ENDPROC(sys_rt_sigreturn_wrapper)
 

+ 2 - 1
arch/arm/mach-s3c64xx/dev-spi.c

@@ -18,10 +18,11 @@
 #include <mach/map.h>
 #include <mach/gpio-bank-c.h>
 #include <mach/spi-clocks.h>
+#include <mach/irqs.h>
 
 #include <plat/s3c64xx-spi.h>
 #include <plat/gpio-cfg.h>
-#include <plat/irqs.h>
+#include <plat/devs.h>
 
 static char *spi_src_clks[] = {
 	[S3C64XX_SPI_SRCCLK_PCLK] = "pclk",

+ 52 - 52
arch/arm/mach-s3c64xx/mach-real6410.c

@@ -30,73 +30,73 @@
 #include <plat/devs.h>
 #include <plat/regs-serial.h>
 
-#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
-#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
-#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
+#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
 
 static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = {
 	[0] = {
-		.hwport	     = 0,
-		.flags	     = 0,
-		.ucon	     = UCON,
-		.ulcon	     = ULCON,
-		.ufcon	     = UFCON,
+		.hwport	= 0,
+		.flags	= 0,
+		.ucon	= UCON,
+		.ulcon	= ULCON,
+		.ufcon	= UFCON,
 	},
 	[1] = {
-		.hwport	     = 1,
-		.flags	     = 0,
-		.ucon	     = UCON,
-		.ulcon	     = ULCON,
-		.ufcon	     = UFCON,
+		.hwport	= 1,
+		.flags	= 0,
+		.ucon	= UCON,
+		.ulcon	= ULCON,
+		.ufcon	= UFCON,
 	},
 	[2] = {
-		.hwport	     = 2,
-		.flags	     = 0,
-		.ucon	     = UCON,
-		.ulcon	     = ULCON,
-		.ufcon	     = UFCON,
+		.hwport	= 2,
+		.flags	= 0,
+		.ucon	= UCON,
+		.ulcon	= ULCON,
+		.ufcon	= UFCON,
 	},
 	[3] = {
-		.hwport	     = 3,
-		.flags	     = 0,
-		.ucon	     = UCON,
-		.ulcon	     = ULCON,
-		.ufcon	     = UFCON,
+		.hwport	= 3,
+		.flags	= 0,
+		.ucon	= UCON,
+		.ulcon	= ULCON,
+		.ufcon	= UFCON,
 	},
 };
 
 /* DM9000AEP 10/100 ethernet controller */
 
 static struct resource real6410_dm9k_resource[] = {
-        [0] = {
-                .start = S3C64XX_PA_XM0CSN1,
-                .end   = S3C64XX_PA_XM0CSN1 + 1,
-                .flags = IORESOURCE_MEM
-        },
-        [1] = {
-                .start = S3C64XX_PA_XM0CSN1 + 4,
-                .end   = S3C64XX_PA_XM0CSN1 + 5,
-                .flags = IORESOURCE_MEM
-        },
-        [2] = {
-                .start = S3C_EINT(7),
-                .end   = S3C_EINT(7),
-                .flags = IORESOURCE_IRQ,
-        }
+	[0] = {
+		.start	= S3C64XX_PA_XM0CSN1,
+		.end	= S3C64XX_PA_XM0CSN1 + 1,
+		.flags	= IORESOURCE_MEM
+	},
+	[1] = {
+		.start	= S3C64XX_PA_XM0CSN1 + 4,
+		.end	= S3C64XX_PA_XM0CSN1 + 5,
+		.flags	= IORESOURCE_MEM
+	},
+	[2] = {
+		.start	= S3C_EINT(7),
+		.end	= S3C_EINT(7),
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
+	}
 };
 
 static struct dm9000_plat_data real6410_dm9k_pdata = {
-        .flags          = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+	.flags		= (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
 };
 
 static struct platform_device real6410_device_eth = {
-        .name           = "dm9000",
-        .id             = -1,
-        .num_resources  = ARRAY_SIZE(real6410_dm9k_resource),
-        .resource       = real6410_dm9k_resource,
-        .dev            = {
-                .platform_data  = &real6410_dm9k_pdata,
-        },
+	.name		= "dm9000",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(real6410_dm9k_resource),
+	.resource	= real6410_dm9k_resource,
+	.dev		= {
+		.platform_data	= &real6410_dm9k_pdata,
+	},
 };
 
 static struct platform_device *real6410_devices[] __initdata = {
@@ -129,12 +129,12 @@ static void __init real6410_machine_init(void)
 	/* set timing for nCS1 suitable for ethernet chip */
 
 	__raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
-			(6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
-			(4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
-			(1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
-			(13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
-			(4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
-			(0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
+		(6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
+		(4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
+		(1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
+		(13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
+		(4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
+		(0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
 
 	platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
 }

+ 19 - 1
arch/arm/mach-s5pv210/clock.c

@@ -280,6 +280,24 @@ static struct clk init_clocks_disable[] = {
 		.parent		= &clk_hclk_dsys.clk,
 		.enable		= s5pv210_clk_ip0_ctrl,
 		.ctrlbit	= (1<<29),
+	}, {
+		.name		= "fimc",
+		.id		= 0,
+		.parent		= &clk_hclk_dsys.clk,
+		.enable		= s5pv210_clk_ip0_ctrl,
+		.ctrlbit	= (1 << 24),
+	}, {
+		.name		= "fimc",
+		.id		= 1,
+		.parent		= &clk_hclk_dsys.clk,
+		.enable		= s5pv210_clk_ip0_ctrl,
+		.ctrlbit	= (1 << 25),
+	}, {
+		.name		= "fimc",
+		.id		= 2,
+		.parent		= &clk_hclk_dsys.clk,
+		.enable		= s5pv210_clk_ip0_ctrl,
+		.ctrlbit	= (1 << 26),
 	}, {
 		.name		= "otg",
 		.id		= -1,
@@ -357,7 +375,7 @@ static struct clk init_clocks_disable[] = {
 		.id		= 1,
 		.parent		= &clk_pclk_psys.clk,
 		.enable		= s5pv210_clk_ip3_ctrl,
-		.ctrlbit	= (1<<8),
+		.ctrlbit	= (1 << 10),
 	}, {
 		.name		= "i2c",
 		.id		= 2,

+ 1 - 1
arch/arm/mach-s5pv210/cpu.c

@@ -47,7 +47,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = {
 	{
 		.virtual	= (unsigned long)S5P_VA_SYSTIMER,
 		.pfn		= __phys_to_pfn(S5PV210_PA_SYSTIMER),
-		.length		= SZ_1M,
+		.length		= SZ_4K,
 		.type		= MT_DEVICE,
 	}, {
 		.virtual	= (unsigned long)VA_VIC2,

+ 8 - 1
arch/arm/plat-s5p/dev-fimc0.c

@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -18,7 +19,7 @@
 static struct resource s5p_fimc0_resource[] = {
 	[0] = {
 		.start	= S5P_PA_FIMC0,
-		.end	= S5P_PA_FIMC0 + SZ_1M - 1,
+		.end	= S5P_PA_FIMC0 + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc0_resource[] = {
 	},
 };
 
+static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32);
+
 struct platform_device s5p_device_fimc0 = {
 	.name		= "s5p-fimc",
 	.id		= 0,
 	.num_resources	= ARRAY_SIZE(s5p_fimc0_resource),
 	.resource	= s5p_fimc0_resource,
+	.dev		= {
+		.dma_mask		= &s5p_fimc0_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+	},
 };

+ 8 - 1
arch/arm/plat-s5p/dev-fimc1.c

@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -18,7 +19,7 @@
 static struct resource s5p_fimc1_resource[] = {
 	[0] = {
 		.start	= S5P_PA_FIMC1,
-		.end	= S5P_PA_FIMC1 + SZ_1M - 1,
+		.end	= S5P_PA_FIMC1 + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc1_resource[] = {
 	},
 };
 
+static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32);
+
 struct platform_device s5p_device_fimc1 = {
 	.name		= "s5p-fimc",
 	.id		= 1,
 	.num_resources	= ARRAY_SIZE(s5p_fimc1_resource),
 	.resource	= s5p_fimc1_resource,
+	.dev		= {
+		.dma_mask		= &s5p_fimc1_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+	},
 };

+ 8 - 1
arch/arm/plat-s5p/dev-fimc2.c

@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -18,7 +19,7 @@
 static struct resource s5p_fimc2_resource[] = {
 	[0] = {
 		.start	= S5P_PA_FIMC2,
-		.end	= S5P_PA_FIMC2 + SZ_1M - 1,
+		.end	= S5P_PA_FIMC2 + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc2_resource[] = {
 	},
 };
 
+static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32);
+
 struct platform_device s5p_device_fimc2 = {
 	.name		= "s5p-fimc",
 	.id		= 2,
 	.num_resources	= ARRAY_SIZE(s5p_fimc2_resource),
 	.resource	= s5p_fimc2_resource,
+	.dev		= {
+		.dma_mask		= &s5p_fimc2_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+	},
 };

+ 4 - 3
arch/arm/plat-samsung/gpio-config.c

@@ -273,13 +273,13 @@ s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
 	if (!chip)
 		return -EINVAL;
 
-	off = chip->chip.base - pin;
+	off = pin - chip->chip.base;
 	shift = off * 2;
 	reg = chip->base + 0x0C;
 
 	drvstr = __raw_readl(reg);
-	drvstr = 0xffff & (0x3 << shift);
 	drvstr = drvstr >> shift;
+	drvstr &= 0x3;
 
 	return (__force s5p_gpio_drvstr_t)drvstr;
 }
@@ -296,11 +296,12 @@ int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr)
 	if (!chip)
 		return -EINVAL;
 
-	off = chip->chip.base - pin;
+	off = pin - chip->chip.base;
 	shift = off * 2;
 	reg = chip->base + 0x0C;
 
 	tmp = __raw_readl(reg);
+	tmp &= ~(0x3 << shift);
 	tmp |= drvstr << shift;
 
 	__raw_writel(tmp, reg);

+ 5 - 5
arch/arm/plat-samsung/include/plat/gpio-cfg.h

@@ -143,12 +143,12 @@ extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin);
 /* Define values for the drvstr available for each gpio pin.
  *
  * These values control the value of the output signal driver strength,
- * configurable on most pins on the S5C series.
+ * configurable on most pins on the S5P series.
  */
-#define S5P_GPIO_DRVSTR_LV1	((__force s5p_gpio_drvstr_t)0x00)
-#define S5P_GPIO_DRVSTR_LV2	((__force s5p_gpio_drvstr_t)0x01)
-#define S5P_GPIO_DRVSTR_LV3	((__force s5p_gpio_drvstr_t)0x10)
-#define S5P_GPIO_DRVSTR_LV4	((__force s5p_gpio_drvstr_t)0x11)
+#define S5P_GPIO_DRVSTR_LV1	((__force s5p_gpio_drvstr_t)0x0)
+#define S5P_GPIO_DRVSTR_LV2	((__force s5p_gpio_drvstr_t)0x2)
+#define S5P_GPIO_DRVSTR_LV3	((__force s5p_gpio_drvstr_t)0x1)
+#define S5P_GPIO_DRVSTR_LV4	((__force s5p_gpio_drvstr_t)0x3)
 
 /**
  * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin

+ 31 - 20
arch/frv/kernel/signal.c

@@ -121,6 +121,9 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8)
 	struct user_context *user = current->thread.user;
 	unsigned long tbr, psr;
 
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
 	tbr = user->i.tbr;
 	psr = user->i.psr;
 	if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
@@ -250,6 +253,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
 	struct sigframe __user *frame;
 	int rsig;
 
+	set_fs(USER_DS);
+
 	frame = get_sigframe(ka, sizeof(*frame));
 
 	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -293,22 +298,23 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
 				   (unsigned long) (frame->retcode + 2));
 	}
 
-	/* set up registers for signal handler */
-	__frame->sp   = (unsigned long) frame;
-	__frame->lr   = (unsigned long) &frame->retcode;
-	__frame->gr8  = sig;
-
+	/* Set up registers for the signal handler */
 	if (current->personality & FDPIC_FUNCPTRS) {
 		struct fdpic_func_descriptor __user *funcptr =
 			(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
-		__get_user(__frame->pc, &funcptr->text);
-		__get_user(__frame->gr15, &funcptr->GOT);
+		struct fdpic_func_descriptor desc;
+		if (copy_from_user(&desc, funcptr, sizeof(desc)))
+			goto give_sigsegv;
+		__frame->pc = desc.text;
+		__frame->gr15 = desc.GOT;
 	} else {
 		__frame->pc   = (unsigned long) ka->sa.sa_handler;
 		__frame->gr15 = 0;
 	}
 
-	set_fs(USER_DS);
+	__frame->sp   = (unsigned long) frame;
+	__frame->lr   = (unsigned long) &frame->retcode;
+	__frame->gr8  = sig;
 
 	/* the tracer may want to single-step inside the handler */
 	if (test_thread_flag(TIF_SINGLESTEP))
@@ -323,7 +329,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
 	return 0;
 
 give_sigsegv:
-	force_sig(SIGSEGV, current);
+	force_sigsegv(sig, current);
 	return -EFAULT;
 
 } /* end setup_frame() */
@@ -338,6 +344,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 	struct rt_sigframe __user *frame;
 	int rsig;
 
+	set_fs(USER_DS);
+
 	frame = get_sigframe(ka, sizeof(*frame));
 
 	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -392,22 +400,23 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 	}
 
 	/* Set up registers for signal handler */
-	__frame->sp  = (unsigned long) frame;
-	__frame->lr   = (unsigned long) &frame->retcode;
-	__frame->gr8 = sig;
-	__frame->gr9 = (unsigned long) &frame->info;
-
 	if (current->personality & FDPIC_FUNCPTRS) {
 		struct fdpic_func_descriptor __user *funcptr =
 			(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
-		__get_user(__frame->pc, &funcptr->text);
-		__get_user(__frame->gr15, &funcptr->GOT);
+		struct fdpic_func_descriptor desc;
+		if (copy_from_user(&desc, funcptr, sizeof(desc)))
+			goto give_sigsegv;
+		__frame->pc = desc.text;
+		__frame->gr15 = desc.GOT;
 	} else {
 		__frame->pc   = (unsigned long) ka->sa.sa_handler;
 		__frame->gr15 = 0;
 	}
 
-	set_fs(USER_DS);
+	__frame->sp  = (unsigned long) frame;
+	__frame->lr  = (unsigned long) &frame->retcode;
+	__frame->gr8 = sig;
+	__frame->gr9 = (unsigned long) &frame->info;
 
 	/* the tracer may want to single-step inside the handler */
 	if (test_thread_flag(TIF_SINGLESTEP))
@@ -422,7 +431,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 	return 0;
 
 give_sigsegv:
-	force_sig(SIGSEGV, current);
+	force_sigsegv(sig, current);
 	return -EFAULT;
 
 } /* end setup_rt_frame() */
@@ -437,7 +446,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
 	int ret;
 
 	/* Are we from a system call? */
-	if (in_syscall(__frame)) {
+	if (__frame->syscallno != -1) {
 		/* If so, check system call restarting.. */
 		switch (__frame->gr8) {
 		case -ERESTART_RESTARTBLOCK:
@@ -456,6 +465,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
 			__frame->gr8 = __frame->orig_gr8;
 			__frame->pc -= 4;
 		}
+		__frame->syscallno = -1;
 	}
 
 	/* Set up the stack frame */
@@ -538,10 +548,11 @@ no_signal:
 			break;
 
 		case -ERESTART_RESTARTBLOCK:
-			__frame->gr8 = __NR_restart_syscall;
+			__frame->gr7 = __NR_restart_syscall;
 			__frame->pc -= 4;
 			break;
 		}
+		__frame->syscallno = -1;
 	}
 
 	/* if there's no signal to deliver, we just put the saved sigmask

+ 1 - 1
arch/ia64/include/asm/compat.h

@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr)
 }
 
 static __inline__ void __user *
-compat_alloc_user_space (long len)
+arch_compat_alloc_user_space (long len)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);

+ 11 - 31
arch/ia64/kernel/fsys.S

@@ -420,34 +420,31 @@ EX(.fail_efault, ld8 r14=[r33])			// r14 <- *set
 	;;
 
 	RSM_PSR_I(p0, r18, r19)			// mask interrupt delivery
-	mov ar.ccv=0
 	andcm r14=r14,r17			// filter out SIGKILL & SIGSTOP
+	mov r8=EINVAL			// default to EINVAL
 
 #ifdef CONFIG_SMP
 	// __ticket_spin_trylock(r31)
 	ld4 r17=[r31]
-	mov r8=EINVAL			// default to EINVAL
-	;;
-	extr r9=r17,17,15
 	;;
-	xor r18=r17,r9
+	mov.m ar.ccv=r17
+	extr.u r9=r17,17,15
 	adds r19=1,r17
+	extr.u r18=r17,0,15
 	;;
-	extr.u r18=r18,0,15
+	cmp.eq p6,p7=r9,r18
 	;;
-	cmp.eq p0,p7=0,r18
+(p6)	cmpxchg4.acq r9=[r31],r19,ar.ccv
+(p6)	dep.z r20=r19,1,15		// next serving ticket for unlock
 (p7)	br.cond.spnt.many .lock_contention
-	mov.m ar.ccv=r17
-	;;
-	cmpxchg4.acq r9=[r31],r19,ar.ccv
 	;;
 	cmp4.eq p0,p7=r9,r17
+	adds r31=2,r31
 (p7)	br.cond.spnt.many .lock_contention
 	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock
 	;;
 #else
 	ld8 r3=[r2]			// re-read current->blocked now that we hold the lock
-	mov r8=EINVAL			// default to EINVAL
 #endif
 	add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
 	add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -503,16 +500,8 @@ EX(.fail_efault, ld8 r14=[r33])			// r14 <- *set
 
 #ifdef CONFIG_SMP
 	// __ticket_spin_unlock(r31)
-	adds r31=2,r31
-	;;
-	ld2.bias r2=[r31]
-	mov r3=65534
-	;;
-	adds r2=2,r2
-	;;
-	and r3=r3,r2
-	;;
-	st2.rel [r31]=r3
+	st2.rel [r31]=r20
+	mov r20=0					// i must not leak kernel bits...
 #endif
 	SSM_PSR_I(p0, p9, r31)
 	;;
@@ -535,16 +524,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
 .sig_pending:
 #ifdef CONFIG_SMP
 	// __ticket_spin_unlock(r31)
-	adds r31=2,r31
-	;;
-	ld2.bias r2=[r31]
-	mov r3=65534
-	;;
-	adds r2=2,r2
-	;;
-	and r3=r3,r2
-	;;
-	st2.rel [r31]=r3
+	st2.rel [r31]=r20			// release the lock
 #endif
 	SSM_PSR_I(p0, p9, r17)
 	;;

+ 1 - 1
arch/mips/include/asm/compat.h

@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 	return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = (struct pt_regs *)
 		((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;

+ 11 - 11
arch/mn10300/kernel/mn10300-serial.c

@@ -156,17 +156,17 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
 	._intr		= &SC0ICR,
 	._rxb		= &SC0RXB,
 	._txb		= &SC0TXB,
-	.rx_name	= "ttySM0/Rx",
-	.tx_name	= "ttySM0/Tx",
+	.rx_name	= "ttySM0:Rx",
+	.tx_name	= "ttySM0:Tx",
 #ifdef CONFIG_MN10300_TTYSM0_TIMER8
-	.tm_name	= "ttySM0/Timer8",
+	.tm_name	= "ttySM0:Timer8",
 	._tmxmd		= &TM8MD,
 	._tmxbr		= &TM8BR,
 	._tmicr		= &TM8ICR,
 	.tm_irq		= TM8IRQ,
 	.div_timer	= MNSCx_DIV_TIMER_16BIT,
 #else /* CONFIG_MN10300_TTYSM0_TIMER2 */
-	.tm_name	= "ttySM0/Timer2",
+	.tm_name	= "ttySM0:Timer2",
 	._tmxmd		= &TM2MD,
 	._tmxbr		= (volatile u16 *) &TM2BR,
 	._tmicr		= &TM2ICR,
@@ -209,17 +209,17 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
 	._intr		= &SC1ICR,
 	._rxb		= &SC1RXB,
 	._txb		= &SC1TXB,
-	.rx_name	= "ttySM1/Rx",
-	.tx_name	= "ttySM1/Tx",
+	.rx_name	= "ttySM1:Rx",
+	.tx_name	= "ttySM1:Tx",
 #ifdef CONFIG_MN10300_TTYSM1_TIMER9
-	.tm_name	= "ttySM1/Timer9",
+	.tm_name	= "ttySM1:Timer9",
 	._tmxmd		= &TM9MD,
 	._tmxbr		= &TM9BR,
 	._tmicr		= &TM9ICR,
 	.tm_irq		= TM9IRQ,
 	.div_timer	= MNSCx_DIV_TIMER_16BIT,
 #else /* CONFIG_MN10300_TTYSM1_TIMER3 */
-	.tm_name	= "ttySM1/Timer3",
+	.tm_name	= "ttySM1:Timer3",
 	._tmxmd		= &TM3MD,
 	._tmxbr		= (volatile u16 *) &TM3BR,
 	._tmicr		= &TM3ICR,
@@ -260,9 +260,9 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
 	.uart.lock	=
 	__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
 	.name		= "ttySM2",
-	.rx_name	= "ttySM2/Rx",
-	.tx_name	= "ttySM2/Tx",
-	.tm_name	= "ttySM2/Timer10",
+	.rx_name	= "ttySM2:Rx",
+	.tx_name	= "ttySM2:Tx",
+	.tm_name	= "ttySM2:Timer10",
 	._iobase	= &SC2CTR,
 	._control	= &SC2CTR,
 	._status	= &SC2STR,

+ 1 - 1
arch/parisc/include/asm/compat.h

@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 	return (u32)(unsigned long)uptr;
 }
 
-static __inline__ void __user *compat_alloc_user_space(long len)
+static __inline__ void __user *arch_compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = &current->thread.regs;
 	return (void __user *)regs->gr[30];

+ 1 - 1
arch/powerpc/include/asm/compat.h

@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 	return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = current->thread.regs;
 	unsigned long usp = regs->gpr[1];

+ 1 - 1
arch/s390/include/asm/compat.h

@@ -181,7 +181,7 @@ static inline int is_compat_task(void)
 
 #endif
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	unsigned long stack;
 

+ 1 - 1
arch/sparc/include/asm/compat.h

@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 	return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = current_thread_info()->kregs;
 	unsigned long usp = regs->u_regs[UREG_I6];

+ 3 - 0
arch/tile/include/arch/chip_tile64.h

@@ -150,6 +150,9 @@
 /** Is the PROC_STATUS SPR supported? */
 #define CHIP_HAS_PROC_STATUS_SPR() 0
 
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
 /** Log of the number of mshims we have. */
 #define CHIP_LOG_NUM_MSHIMS() 2
 

+ 3 - 0
arch/tile/include/arch/chip_tilepro.h

@@ -150,6 +150,9 @@
 /** Is the PROC_STATUS SPR supported? */
 #define CHIP_HAS_PROC_STATUS_SPR() 1
 
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
 /** Log of the number of mshims we have. */
 #define CHIP_LOG_NUM_MSHIMS() 2
 

+ 4 - 3
arch/tile/include/asm/compat.h

@@ -195,7 +195,7 @@ static inline unsigned long ptr_to_compat_reg(void __user *uptr)
 	return (long)(int)(long __force)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	return (void __user *)regs->sp - len;
@@ -214,8 +214,9 @@ extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka,
 struct compat_sigaction;
 struct compat_siginfo;
 struct compat_sigaltstack;
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-		       compat_uptr_t __user *envp);
+long compat_sys_execve(const char __user *path,
+		       const compat_uptr_t __user *argv,
+		       const compat_uptr_t __user *envp);
 long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
 			     struct compat_sigaction __user *oact,
 			     size_t sigsetsize);

+ 4 - 4
arch/tile/include/asm/io.h

@@ -164,22 +164,22 @@ static inline void _tile_writeq(u64 val, unsigned long addr)
 #define iowrite32 writel
 #define iowrite64 writeq
 
-static inline void *memcpy_fromio(void *dst, void *src, int len)
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
+				 size_t len)
 {
 	int x;
 	BUG_ON((unsigned long)src & 0x3);
 	for (x = 0; x < len; x += 4)
 		*(u32 *)(dst + x) = readl(src + x);
-	return dst;
 }
 
-static inline void *memcpy_toio(void *dst, void *src, int len)
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
+				size_t len)
 {
 	int x;
 	BUG_ON((unsigned long)dst & 0x3);
 	for (x = 0; x < len; x += 4)
 		writel(*(u32 *)(src + x), dst + x);
-	return dst;
 }
 
 /*

+ 12 - 0
arch/tile/include/asm/processor.h

@@ -103,6 +103,18 @@ struct thread_struct {
 	/* Any other miscellaneous processor state bits */
 	unsigned long proc_status;
 #endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+	/* Interrupt base for PL0 interrupts */
+	unsigned long interrupt_vector_base;
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+	/* Tile cache retry fifo high-water mark */
+	unsigned long tile_rtf_hwm;
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+	/* Data stream prefetch control */
+	unsigned long dstream_pf;
+#endif
 #ifdef CONFIG_HARDWALL
 	/* Is this task tied to an activated hardwall? */
 	struct hardwall_info *hardwall;

+ 6 - 9
arch/tile/include/asm/ptrace.h

@@ -51,10 +51,7 @@ typedef uint_reg_t pt_reg_t;
 
 /*
  * This struct defines the way the registers are stored on the stack during a
- * system call/exception.  It should be a multiple of 8 bytes to preserve
- * normal stack alignment rules.
- *
- * Must track <sys/ucontext.h> and <sys/procfs.h>
+ * system call or exception.  "struct sigcontext" has the same shape.
  */
 struct pt_regs {
 	/* Saved main processor registers; 56..63 are special. */
@@ -80,11 +77,6 @@ struct pt_regs {
 
 #endif /* __ASSEMBLY__ */
 
-/* Flag bits in pt_regs.flags */
-#define PT_FLAGS_DISABLE_IRQ    1  /* on return to kernel, disable irqs */
-#define PT_FLAGS_CALLER_SAVES   2  /* caller-save registers are valid */
-#define PT_FLAGS_RESTORE_REGS   4  /* restore callee-save regs on return */
-
 #define PTRACE_GETREGS		12
 #define PTRACE_SETREGS		13
 #define PTRACE_GETFPREGS	14
@@ -101,6 +93,11 @@ struct pt_regs {
 
 #ifdef __KERNEL__
 
+/* Flag bits in pt_regs.flags */
+#define PT_FLAGS_DISABLE_IRQ    1  /* on return to kernel, disable irqs */
+#define PT_FLAGS_CALLER_SAVES   2  /* caller-save registers are valid */
+#define PT_FLAGS_RESTORE_REGS   4  /* restore callee-save regs on return */
+
 #ifndef __ASSEMBLY__
 
 #define instruction_pointer(regs) ((regs)->pc)

+ 13 - 5
arch/tile/include/asm/sigcontext.h

@@ -15,13 +15,21 @@
 #ifndef _ASM_TILE_SIGCONTEXT_H
 #define _ASM_TILE_SIGCONTEXT_H
 
-/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */
-#include <asm/ptrace.h>
-
-/* Must track <sys/ucontext.h> */
+#include <arch/abi.h>
 
+/*
+ * struct sigcontext has the same shape as struct pt_regs,
+ * but is simplified since we know the fault is from userspace.
+ */
 struct sigcontext {
-	struct pt_regs regs;
+	uint_reg_t gregs[53];	/* General-purpose registers.  */
+	uint_reg_t tp;		/* Aliases gregs[TREG_TP].  */
+	uint_reg_t sp;		/* Aliases gregs[TREG_SP].  */
+	uint_reg_t lr;		/* Aliases gregs[TREG_LR].  */
+	uint_reg_t pc;		/* Program counter.  */
+	uint_reg_t ics;		/* In Interrupt Critical Section?  */
+	uint_reg_t faultnum;	/* Fault number.  */
+	uint_reg_t pad[5];
 };
 
 #endif /* _ASM_TILE_SIGCONTEXT_H */

+ 1 - 0
arch/tile/include/asm/signal.h

@@ -24,6 +24,7 @@
 #include <asm-generic/signal.h>
 
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+struct pt_regs;
 int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
 int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
 void do_signal(struct pt_regs *regs);

+ 13 - 8
arch/tile/include/asm/syscalls.h

@@ -62,10 +62,12 @@ long sys_fork(void);
 long _sys_fork(struct pt_regs *regs);
 long sys_vfork(void);
 long _sys_vfork(struct pt_regs *regs);
-long sys_execve(char __user *filename, char __user * __user *argv,
-		char __user * __user *envp);
-long _sys_execve(char __user *filename, char __user * __user *argv,
-		 char __user * __user *envp, struct pt_regs *regs);
+long sys_execve(const char __user *filename,
+		const char __user *const __user *argv,
+		const char __user *const __user *envp);
+long _sys_execve(const char __user *filename,
+		 const char __user *const __user *argv,
+		 const char __user *const __user *envp, struct pt_regs *regs);
 
 /* kernel/signal.c */
 long sys_sigaltstack(const stack_t __user *, stack_t __user *);
@@ -86,10 +88,13 @@ int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
 #endif
 
 #ifdef CONFIG_COMPAT
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-		       compat_uptr_t __user *envp);
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-			compat_uptr_t __user *envp, struct pt_regs *regs);
+long compat_sys_execve(const char __user *path,
+		       const compat_uptr_t __user *argv,
+		       const compat_uptr_t __user *envp);
+long _compat_sys_execve(const char __user *path,
+			const compat_uptr_t __user *argv,
+			const compat_uptr_t __user *envp,
+			struct pt_regs *regs);
 long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
 			    struct compat_sigaltstack __user *uoss_ptr);
 long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,

+ 20 - 10
arch/tile/kernel/process.c

@@ -408,6 +408,15 @@ static void save_arch_state(struct thread_struct *t)
 #if CHIP_HAS_PROC_STATUS_SPR()
 	t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
 #endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+	t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+	t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+	t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
+#endif
 }
 
 static void restore_arch_state(const struct thread_struct *t)
@@ -428,14 +437,14 @@ static void restore_arch_state(const struct thread_struct *t)
 #if CHIP_HAS_PROC_STATUS_SPR()
 	__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
 #endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+	__insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
+#endif
 #if CHIP_HAS_TILE_RTF_HWM()
-	/*
-	 * Clear this whenever we switch back to a process in case
-	 * the previous process was monkeying with it.  Even if enabled
-	 * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a
-	 * performance hint, so isn't worth a full save/restore.
-	 */
-	__insn_mtspr(SPR_TILE_RTF_HWM, 0);
+	__insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+	__insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
 #endif
 }
 
@@ -561,8 +570,9 @@ out:
 }
 
 #ifdef CONFIG_COMPAT
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-			compat_uptr_t __user *envp, struct pt_regs *regs)
+long _compat_sys_execve(const char __user *path,
+			const compat_uptr_t __user *argv,
+			const compat_uptr_t __user *envp, struct pt_regs *regs)
 {
 	long error;
 	char *filename;
@@ -657,7 +667,7 @@ void show_regs(struct pt_regs *regs)
 	       regs->regs[51], regs->regs[52], regs->tp);
 	pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
 #else
-	for (i = 0; i < 52; i += 3)
+	for (i = 0; i < 52; i += 4)
 		pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
 		       " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
 		       i, regs->regs[i], i+1, regs->regs[i+1],

+ 15 - 12
arch/tile/kernel/signal.c

@@ -61,13 +61,19 @@ int restore_sigcontext(struct pt_regs *regs,
 	/* Always make any pending restarted system calls return -EINTR */
 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
+	/*
+	 * Enforce that sigcontext is like pt_regs, and doesn't mess
+	 * up our stack alignment rules.
+	 */
+	BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
+	BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
+
 	for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
-		err |= __get_user(((long *)regs)[i],
-				  &((long __user *)(&sc->regs))[i]);
+		err |= __get_user(regs->regs[i], &sc->gregs[i]);
 
 	regs->faultnum = INT_SWINT_1_SIGRETURN;
 
-	err |= __get_user(*pr0, &sc->regs.regs[0]);
+	err |= __get_user(*pr0, &sc->gregs[0]);
 	return err;
 }
 
@@ -112,8 +118,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
 	int i, err = 0;
 
 	for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
-		err |= __put_user(((long *)regs)[i],
-				  &((long __user *)(&sc->regs))[i]);
+		err |= __put_user(regs->regs[i], &sc->gregs[i]);
 
 	return err;
 }
@@ -203,19 +208,17 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 	 * Set up registers for signal handler.
 	 * Registers that we don't modify keep the value they had from
 	 * user-space at the time we took the signal.
+	 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+	 * since some things rely on this (e.g. glibc's debug/segfault.c).
 	 */
 	regs->pc = (unsigned long) ka->sa.sa_handler;
 	regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
 	regs->sp = (unsigned long) frame;
 	regs->lr = restorer;
 	regs->regs[0] = (unsigned long) usig;
-
-	if (ka->sa.sa_flags & SA_SIGINFO) {
-		/* Need extra arguments, so mark to restore caller-saves. */
-		regs->regs[1] = (unsigned long) &frame->info;
-		regs->regs[2] = (unsigned long) &frame->uc;
-		regs->flags |= PT_FLAGS_CALLER_SAVES;
-	}
+	regs->regs[1] = (unsigned long) &frame->info;
+	regs->regs[2] = (unsigned long) &frame->uc;
+	regs->flags |= PT_FLAGS_CALLER_SAVES;
 
 	/*
 	 * Notify any tracer that was single-stepping it.

+ 1 - 1
arch/tile/kernel/stack.c

@@ -175,7 +175,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
 			pr_err("  <received signal %d>\n",
 			       frame->info.si_signo);
 		}
-		return &frame->uc.uc_mcontext.regs;
+		return (struct pt_regs *)&frame->uc.uc_mcontext;
 	}
 	return NULL;
 }

+ 1 - 1
arch/x86/Makefile

@@ -74,7 +74,7 @@ endif
 
 ifdef CONFIG_CC_STACKPROTECTOR
 	cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
-        ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y)
+        ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
                 stackp-y := -fstack-protector
                 KBUILD_CFLAGS += $(stackp-y)
         else

+ 14 - 8
arch/x86/ia32/ia32entry.S

@@ -50,7 +50,12 @@
 	/*
 	 * Reload arg registers from stack in case ptrace changed them.
 	 * We don't reload %eax because syscall_trace_enter() returned
-	 * the value it wants us to use in the table lookup.
+	 * the %rax value we should see.  Instead, we just truncate that
+	 * value to 32 bits again as we did on entry from user mode.
+	 * If it's a new value set by user_regset during entry tracing,
+	 * this matches the normal truncation of the user-mode value.
+	 * If it's -1 to make us punt the syscall, then (u32)-1 is still
+	 * an appropriately invalid value.
 	 */
 	.macro LOAD_ARGS32 offset, _r9=0
 	.if \_r9
@@ -60,6 +65,7 @@
 	movl \offset+48(%rsp),%edx
 	movl \offset+56(%rsp),%esi
 	movl \offset+64(%rsp),%edi
+	movl %eax,%eax			/* zero extension */
 	.endm
 	
 	.macro CFI_STARTPROC32 simple
@@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target)
 	testl  $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
 	CFI_REMEMBER_STATE
 	jnz  sysenter_tracesys
-	cmpl	$(IA32_NR_syscalls-1),%eax
+	cmpq	$(IA32_NR_syscalls-1),%rax
 	ja	ia32_badsys
 sysenter_do_call:
 	IA32_ARG_FIXUP
@@ -195,7 +201,7 @@ sysexit_from_sys_call:
 	movl $AUDIT_ARCH_I386,%edi	/* 1st arg: audit arch */
 	call audit_syscall_entry
 	movl RAX-ARGOFFSET(%rsp),%eax	/* reload syscall number */
-	cmpl $(IA32_NR_syscalls-1),%eax
+	cmpq $(IA32_NR_syscalls-1),%rax
 	ja ia32_badsys
 	movl %ebx,%edi			/* reload 1st syscall arg */
 	movl RCX-ARGOFFSET(%rsp),%esi	/* reload 2nd syscall arg */
@@ -248,7 +254,7 @@ sysenter_tracesys:
 	call	syscall_trace_enter
 	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
 	RESTORE_REST
-	cmpl	$(IA32_NR_syscalls-1),%eax
+	cmpq	$(IA32_NR_syscalls-1),%rax
 	ja	int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
 	jmp	sysenter_do_call
 	CFI_ENDPROC
@@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target)
 	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
 	CFI_REMEMBER_STATE
 	jnz   cstar_tracesys
-	cmpl $IA32_NR_syscalls-1,%eax
+	cmpq $IA32_NR_syscalls-1,%rax
 	ja  ia32_badsys
 cstar_do_call:
 	IA32_ARG_FIXUP 1
@@ -367,7 +373,7 @@ cstar_tracesys:
 	LOAD_ARGS32 ARGOFFSET, 1  /* reload args from stack in case ptrace changed it */
 	RESTORE_REST
 	xchgl %ebp,%r9d
-	cmpl $(IA32_NR_syscalls-1),%eax
+	cmpq $(IA32_NR_syscalls-1),%rax
 	ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
 	jmp cstar_do_call
 END(ia32_cstar_target)
@@ -425,7 +431,7 @@ ENTRY(ia32_syscall)
 	orl   $TS_COMPAT,TI_status(%r10)
 	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
 	jnz ia32_tracesys
-	cmpl $(IA32_NR_syscalls-1),%eax
+	cmpq $(IA32_NR_syscalls-1),%rax
 	ja ia32_badsys
 ia32_do_call:
 	IA32_ARG_FIXUP
@@ -444,7 +450,7 @@ ia32_tracesys:
 	call syscall_trace_enter
 	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
 	RESTORE_REST
-	cmpl $(IA32_NR_syscalls-1),%eax
+	cmpq $(IA32_NR_syscalls-1),%rax
 	ja  int_ret_from_sys_call	/* ia32_tracesys has set RAX(%rsp) */
 	jmp ia32_do_call
 END(ia32_syscall)

+ 1 - 1
arch/x86/include/asm/compat.h

@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 	return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	return (void __user *)regs->sp - len;

+ 2 - 2
arch/x86/include/asm/cpufeature.h

@@ -296,6 +296,7 @@ extern const char * const x86_power_flags[32];
 
 #endif /* CONFIG_X86_64 */
 
+#if __GNUC__ >= 4
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
  * These are only valid after alternatives have run, but will statically
@@ -304,7 +305,7 @@ extern const char * const x86_power_flags[32];
  */
 static __always_inline __pure bool __static_cpu_has(u16 bit)
 {
-#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
 		asm goto("1: jmp %l[t_no]\n"
 			 "2:\n"
 			 ".section .altinstructions,\"a\"\n"
@@ -345,7 +346,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
 #endif
 }
 
-#if __GNUC__ >= 4
 #define static_cpu_has(bit)					\
 (								\
 	__builtin_constant_p(boot_cpu_has(bit)) ?		\

+ 0 - 1
arch/x86/include/asm/hpet.h

@@ -68,7 +68,6 @@ extern unsigned long force_hpet_address;
 extern u8 hpet_blockid;
 extern int hpet_force_user;
 extern u8 hpet_msi_disable;
-extern u8 hpet_readback_cmp;
 extern int is_hpet_enabled(void);
 extern int hpet_enable(void);
 extern void hpet_disable(void);

+ 3 - 3
arch/x86/kernel/apic/x2apic_uv_x.c

@@ -698,9 +698,11 @@ void __init uv_system_init(void)
 		for (j = 0; j < 64; j++) {
 			if (!test_bit(j, &present))
 				continue;
-			uv_blade_info[blade].pnode = (i * 64 + j);
+			pnode = (i * 64 + j);
+			uv_blade_info[blade].pnode = pnode;
 			uv_blade_info[blade].nr_possible_cpus = 0;
 			uv_blade_info[blade].nr_online_cpus = 0;
+			max_pnode = max(pnode, max_pnode);
 			blade++;
 		}
 	}
@@ -738,7 +740,6 @@ void __init uv_system_init(void)
 		uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
 		uv_node_to_blade[nid] = blade;
 		uv_cpu_to_blade[cpu] = blade;
-		max_pnode = max(pnode, max_pnode);
 	}
 
 	/* Add blade/pnode info for nodes without cpus */
@@ -750,7 +751,6 @@ void __init uv_system_init(void)
 		pnode = (paddr >> m_val) & pnode_mask;
 		blade = boot_pnode_to_blade(pnode);
 		uv_node_to_blade[nid] = blade;
-		max_pnode = max(pnode, max_pnode);
 	}
 
 	map_gru_high(max_pnode);

+ 0 - 18
arch/x86/kernel/early-quirks.c

@@ -18,7 +18,6 @@
 #include <asm/apic.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
-#include <asm/hpet.h>
 
 static void __init fix_hypertransport_config(int num, int slot, int func)
 {
@@ -192,21 +191,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
 }
 #endif
 
-/*
- * Force the read back of the CMP register in hpet_next_event()
- * to work around the problem that the CMP register write seems to be
- * delayed. See hpet_next_event() for details.
- *
- * We do this on all SMBUS incarnations for now until we have more
- * information about the affected chipsets.
- */
-static void __init ati_hpet_bugs(int num, int slot, int func)
-{
-#ifdef CONFIG_HPET_TIMER
-	hpet_readback_cmp = 1;
-#endif
-}
-
 #define QFLAG_APPLY_ONCE 	0x1
 #define QFLAG_APPLIED		0x2
 #define QFLAG_DONE		(QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -236,8 +220,6 @@ static struct chipset early_qrk[] __initdata = {
 	  PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
 	{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
 	  PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
-	{ PCI_VENDOR_ID_ATI, PCI_ANY_ID,
-	  PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
 	{}
 };
 

+ 17 - 14
arch/x86/kernel/hpet.c

@@ -35,7 +35,6 @@
 unsigned long				hpet_address;
 u8					hpet_blockid; /* OS timer block num */
 u8					hpet_msi_disable;
-u8					hpet_readback_cmp;
 
 #ifdef CONFIG_PCI_MSI
 static unsigned long			hpet_num_timers;
@@ -395,23 +394,27 @@ static int hpet_next_event(unsigned long delta,
 	 * at that point and we would wait for the next hpet interrupt
 	 * forever. We found out that reading the CMP register back
 	 * forces the transfer so we can rely on the comparison with
-	 * the counter register below.
+	 * the counter register below. If the read back from the
+	 * compare register does not match the value we programmed
+	 * then we might have a real hardware problem. We can not do
+	 * much about it here, but at least alert the user/admin with
+	 * a prominent warning.
 	 *
-	 * That works fine on those ATI chipsets, but on newer Intel
-	 * chipsets (ICH9...) this triggers due to an erratum: Reading
-	 * the comparator immediately following a write is returning
-	 * the old value.
+	 * An erratum on some chipsets (ICH9,..), results in
+	 * comparator read immediately following a write returning old
+	 * value. Workaround for this is to read this value second
+	 * time, when first read returns old value.
 	 *
-	 * We restrict the read back to the affected ATI chipsets (set
-	 * by quirks) and also run it with hpet=verbose for debugging
-	 * purposes.
+	 * In fact the write to the comparator register is delayed up
+	 * to two HPET cycles so the workaround we tried to restrict
+	 * the readback to those known to be borked ATI chipsets
+	 * failed miserably. So we give up on optimizations forever
+	 * and penalize all HPET incarnations unconditionally.
 	 */
-	if (hpet_readback_cmp || hpet_verbose) {
-		u32 cmp = hpet_readl(HPET_Tn_CMP(timer));
-
-		if (cmp != cnt)
+	if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
+		if (hpet_readl(HPET_Tn_CMP(timer)) != cnt)
 			printk_once(KERN_WARNING
-			    "hpet: compare register read back failed.\n");
+				"hpet: compare register read back failed.\n");
 	}
 
 	return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;

+ 1 - 1
drivers/Makefile

@@ -50,7 +50,7 @@ obj-$(CONFIG_SPI)		+= spi/
 obj-y				+= net/
 obj-$(CONFIG_ATM)		+= atm/
 obj-$(CONFIG_FUSION)		+= message/
-obj-$(CONFIG_FIREWIRE)		+= firewire/
+obj-y				+= firewire/
 obj-y				+= ieee1394/
 obj-$(CONFIG_UIO)		+= uio/
 obj-y				+= cdrom/

+ 79 - 6
drivers/dca/dca-core.c

@@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
 
 static LIST_HEAD(dca_domains);
 
+static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
+
+static int dca_providers_blocked;
+
 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
 	kfree(domain);
 }
 
+static int dca_provider_ioat_ver_3_0(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
+}
+
+static void unregister_dca_providers(void)
+{
+	struct dca_provider *dca, *_dca;
+	struct list_head unregistered_providers;
+	struct dca_domain *domain;
+	unsigned long flags;
+
+	blocking_notifier_call_chain(&dca_provider_chain,
+				     DCA_PROVIDER_REMOVE, NULL);
+
+	INIT_LIST_HEAD(&unregistered_providers);
+
+	spin_lock_irqsave(&dca_lock, flags);
+
+	if (list_empty(&dca_domains)) {
+		spin_unlock_irqrestore(&dca_lock, flags);
+		return;
+	}
+
+	/* at this point only one domain in the list is expected */
+	domain = list_first_entry(&dca_domains, struct dca_domain, node);
+	if (!domain)
+		return;
+
+	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
+		list_del(&dca->node);
+		list_add(&dca->node, &unregistered_providers);
+	}
+
+	dca_free_domain(domain);
+
+	spin_unlock_irqrestore(&dca_lock, flags);
+
+	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
+		dca_sysfs_remove_provider(dca);
+		list_del(&dca->node);
+	}
+}
+
 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
 {
 	struct dca_domain *domain;
@@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
 	domain = dca_find_domain(rc);
 
 	if (!domain) {
-		domain = dca_allocate_domain(rc);
-		if (domain)
-			list_add(&domain->node, &dca_domains);
+		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
+			dca_providers_blocked = 1;
+		} else {
+			domain = dca_allocate_domain(rc);
+			if (domain)
+				list_add(&domain->node, &dca_domains);
+		}
 	}
 
 	return domain;
@@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
 }
 EXPORT_SYMBOL_GPL(free_dca_provider);
 
-static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
-
 /**
  * register_dca_provider - register a dca provider
  * @dca - struct created by alloc_dca_provider()
@@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
 	unsigned long flags;
 	struct dca_domain *domain;
 
+	spin_lock_irqsave(&dca_lock, flags);
+	if (dca_providers_blocked) {
+		spin_unlock_irqrestore(&dca_lock, flags);
+		return -ENODEV;
+	}
+	spin_unlock_irqrestore(&dca_lock, flags);
+
 	err = dca_sysfs_add_provider(dca, dev);
 	if (err)
 		return err;
@@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
 	spin_lock_irqsave(&dca_lock, flags);
 	domain = dca_get_domain(dev);
 	if (!domain) {
-		spin_unlock_irqrestore(&dca_lock, flags);
+		if (dca_providers_blocked) {
+			spin_unlock_irqrestore(&dca_lock, flags);
+			dca_sysfs_remove_provider(dca);
+			unregister_dca_providers();
+		} else {
+			spin_unlock_irqrestore(&dca_lock, flags);
+		}
 		return -ENODEV;
 	}
 	list_add(&dca->node, &domain->dca_providers);

+ 1 - 0
drivers/firewire/ohci.c

@@ -263,6 +263,7 @@ static const struct {
 	{PCI_VENDOR_ID_JMICRON,	PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
 	{PCI_VENDOR_ID_NEC,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
 	{PCI_VENDOR_ID_VIA,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
+	{PCI_VENDOR_ID_RICOH,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
 	{PCI_VENDOR_ID_APPLE,	PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
 };
 

+ 5 - 5
drivers/gpu/drm/drm_crtc_helper.c

@@ -103,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 		if (connector->funcs->force)
 			connector->funcs->force(connector);
 	} else {
-		connector->status = connector->funcs->detect(connector);
-		drm_helper_hpd_irq_event(dev);
+		connector->status = connector->funcs->detect(connector, true);
+		drm_kms_helper_poll_enable(dev);
 	}
 
 	if (connector->status == connector_status_disconnected) {
@@ -637,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 		mode_changed = true;
 
 	if (mode_changed) {
-		old_fb = set->crtc->fb;
-		set->crtc->fb = set->fb;
 		set->crtc->enabled = (set->mode != NULL);
 		if (set->mode != NULL) {
 			DRM_DEBUG_KMS("attempting to set mode from"
 					" userspace\n");
 			drm_mode_debug_printmodeline(set->mode);
+			old_fb = set->crtc->fb;
+			set->crtc->fb = set->fb;
 			if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
 						      set->x, set->y,
 						      old_fb)) {
@@ -866,7 +866,7 @@ static void output_poll_execute(struct work_struct *work)
 		    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
 			continue;
 
-		status = connector->funcs->detect(connector);
+		status = connector->funcs->detect(connector, false);
 		if (old_status != status)
 			changed = true;
 	}

+ 4 - 0
drivers/gpu/drm/drm_pci.c

@@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 	dev->hose = pdev->sysdata;
 #endif
 
+	mutex_lock(&drm_global_mutex);
+
 	if ((ret = drm_fill_in_dev(dev, ent, driver))) {
 		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
 		goto err_g2;
@@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
 		 driver->date, pci_name(pdev), dev->primary->index);
 
+	mutex_unlock(&drm_global_mutex);
 	return 0;
 
 err_g4:
@@ -210,6 +213,7 @@ err_g2:
 	pci_disable_device(pdev);
 err_g1:
 	kfree(dev);
+	mutex_unlock(&drm_global_mutex);
 	return ret;
 }
 EXPORT_SYMBOL(drm_get_pci_dev);

+ 5 - 0
drivers/gpu/drm/drm_platform.c

@@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
 	dev->platformdev = platdev;
 	dev->dev = &platdev->dev;
 
+	mutex_lock(&drm_global_mutex);
+
 	ret = drm_fill_in_dev(dev, NULL, driver);
 
 	if (ret) {
@@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
 
 	list_add_tail(&dev->driver_item, &driver->device_list);
 
+	mutex_unlock(&drm_global_mutex);
+
 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
 		 driver->date, dev->primary->index);
@@ -100,6 +104,7 @@ err_g2:
 		drm_put_minor(&dev->control);
 err_g1:
 	kfree(dev);
+	mutex_unlock(&drm_global_mutex);
 	return ret;
 }
 EXPORT_SYMBOL(drm_get_platform_dev);

+ 1 - 1
drivers/gpu/drm/drm_sysfs.c

@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device,
 	struct drm_connector *connector = to_drm_connector(device);
 	enum drm_connector_status status;
 
-	status = connector->funcs->detect(connector);
+	status = connector->funcs->detect(connector, true);
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			drm_get_connector_status_name(status));
 }

+ 5 - 1
drivers/gpu/drm/i915/intel_crt.c

@@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
 	return status;
 }
 
-static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
 	if (intel_crt_detect_ddc(encoder))
 		return connector_status_connected;
 
+	if (!force)
+		return connector->status;
+
 	/* for pre-945g platforms use load detect */
 	if (encoder->crtc && encoder->crtc->enabled) {
 		status = intel_crt_load_detect(encoder->crtc, intel_encoder);

+ 1 - 1
drivers/gpu/drm/i915/intel_dp.c

@@ -1386,7 +1386,7 @@ ironlake_dp_detect(struct drm_connector *connector)
  * \return false if DP port is disconnected.
  */
 static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector)
+intel_dp_detect(struct drm_connector *connector, bool force)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

+ 2 - 1
drivers/gpu/drm/i915/intel_dvo.c

@@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
  *
  * Unimplemented.
  */
-static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
 	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);

+ 1 - 1
drivers/gpu/drm/i915/intel_hdmi.c

@@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
 }
 
 static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector)
+intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);

+ 5 - 2
drivers/gpu/drm/i915/intel_lvds.c

@@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
  * connected and closed means disconnected.  We also send hotplug events as
  * needed, using lid status notification from the input layer.
  */
-static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
 {
 	struct drm_device *dev = connector->dev;
 	enum drm_connector_status status = connector_status_connected;
@@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
 	 * the LID nofication event.
 	 */
 	if (connector)
-		connector->status = connector->funcs->detect(connector);
+		connector->status = connector->funcs->detect(connector,
+							     false);
+
 	/* Don't force modeset on machines where it causes a GPU lockup */
 	if (dmi_check_system(intel_no_modeset_on_lid))
 		return NOTIFY_OK;

+ 3 - 2
drivers/gpu/drm/i915/intel_sdvo.c

@@ -1417,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev)
 	if (!analog_connector)
 		return false;
 
-	if (analog_connector->funcs->detect(analog_connector) ==
+	if (analog_connector->funcs->detect(analog_connector, false) ==
 			connector_status_disconnected)
 		return false;
 
@@ -1486,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
 	return status;
 }
 
-static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
 	uint16_t response;
 	struct drm_encoder *encoder = intel_attached_encoder(connector);

+ 5 - 6
drivers/gpu/drm/i915/intel_tv.c

@@ -1341,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
  * we have a pipe programmed in order to probe the TV.
  */
 static enum drm_connector_status
-intel_tv_detect(struct drm_connector *connector)
+intel_tv_detect(struct drm_connector *connector, bool force)
 {
 	struct drm_display_mode mode;
 	struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1353,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector)
 
 	if (encoder->crtc && encoder->crtc->enabled) {
 		type = intel_tv_detect_type(intel_tv);
-	} else {
+	} else if (force) {
 		struct drm_crtc *crtc;
 		int dpms_mode;
 
@@ -1364,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector)
 			intel_release_load_detect_pipe(&intel_tv->base, connector,
 						       dpms_mode);
 		} else
-			type = -1;
-	}
-
-	intel_tv->type = type;
+			return connector_status_unknown;
+	} else
+		return connector->status;
 
 	if (type < 0)
 		return connector_status_disconnected;

+ 3 - 3
drivers/gpu/drm/nouveau/nouveau_connector.c

@@ -168,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
 }
 
 static enum drm_connector_status
-nouveau_connector_detect(struct drm_connector *connector)
+nouveau_connector_detect(struct drm_connector *connector, bool force)
 {
 	struct drm_device *dev = connector->dev;
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
@@ -246,7 +246,7 @@ detect_analog:
 }
 
 static enum drm_connector_status
-nouveau_connector_detect_lvds(struct drm_connector *connector)
+nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -267,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector)
 
 	/* Try retrieving EDID via DDC */
 	if (!dev_priv->vbios.fp_no_ddc) {
-		status = nouveau_connector_detect(connector);
+		status = nouveau_connector_detect(connector, force);
 		if (status == connector_status_connected)
 			goto out;
 	}

+ 3 - 2
drivers/gpu/drm/radeon/atombios_crtc.c

@@ -539,14 +539,15 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 					pll->algo = PLL_ALGO_LEGACY;
 					pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
 				}
-				/* There is some evidence (often anecdotal) that RV515 LVDS
+				/* There is some evidence (often anecdotal) that RV515/RV620 LVDS
 				 * (on some boards at least) prefers the legacy algo.  I'm not
 				 * sure whether this should handled generically or on a
 				 * case-by-case quirk basis.  Both algos should work fine in the
 				 * majority of cases.
 				 */
 				if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
-				    (rdev->family == CHIP_RV515)) {
+				    ((rdev->family == CHIP_RV515) ||
+				     (rdev->family == CHIP_RV620))) {
 					/* allow the user to overrride just in case */
 					if (radeon_new_pll == 1)
 						pll->algo = PLL_ALGO_NEW;

+ 19 - 8
drivers/gpu/drm/radeon/evergreen.c

@@ -1160,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 									EVERGREEN_MAX_BACKENDS_MASK));
 			break;
 		}
-	} else
-		gb_backend_map =
-			evergreen_get_tile_pipe_to_backend_map(rdev,
-							       rdev->config.evergreen.max_tile_pipes,
-							       rdev->config.evergreen.max_backends,
-							       ((EVERGREEN_MAX_BACKENDS_MASK <<
-								 rdev->config.evergreen.max_backends) &
-								EVERGREEN_MAX_BACKENDS_MASK));
+	} else {
+		switch (rdev->family) {
+		case CHIP_CYPRESS:
+		case CHIP_HEMLOCK:
+			gb_backend_map = 0x66442200;
+			break;
+		case CHIP_JUNIPER:
+			gb_backend_map = 0x00006420;
+			break;
+		default:
+			gb_backend_map =
+				evergreen_get_tile_pipe_to_backend_map(rdev,
+								       rdev->config.evergreen.max_tile_pipes,
+								       rdev->config.evergreen.max_backends,
+								       ((EVERGREEN_MAX_BACKENDS_MASK <<
+									 rdev->config.evergreen.max_backends) &
+									EVERGREEN_MAX_BACKENDS_MASK));
+		}
+	}
 
 	rdev->config.evergreen.tile_config = gb_addr_config;
 	WREG32(GB_BACKEND_MAP, gb_backend_map);

+ 7 - 17
drivers/gpu/drm/radeon/r100.c

@@ -2020,18 +2020,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
 		return false;
 	}
 	elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
-	if (elapsed >= 3000) {
-		/* very likely the improbable case where current
-		 * rptr is equal to last recorded, a while ago, rptr
-		 * this is more likely a false positive update tracking
-		 * information which should force us to be recall at
-		 * latter point
-		 */
-		lockup->last_cp_rptr = cp->rptr;
-		lockup->last_jiffies = jiffies;
-		return false;
-	}
-	if (elapsed >= 1000) {
+	if (elapsed >= 10000) {
 		dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
 		return true;
 	}
@@ -3308,13 +3297,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
 	unsigned long size;
 	unsigned prim_walk;
 	unsigned nverts;
+	unsigned num_cb = track->num_cb;
 
-	for (i = 0; i < track->num_cb; i++) {
+	if (!track->zb_cb_clear && !track->color_channel_mask &&
+	    !track->blend_read_enable)
+		num_cb = 0;
+
+	for (i = 0; i < num_cb; i++) {
 		if (track->cb[i].robj == NULL) {
-			if (!(track->zb_cb_clear || track->color_channel_mask ||
-			      track->blend_read_enable)) {
-				continue;
-			}
 			DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
 			return -EINVAL;
 		}

+ 25 - 0
drivers/gpu/drm/radeon/r600_blit_kms.c

@@ -1,3 +1,28 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #include "drmP.h"
 #include "drm.h"
 #include "radeon_drm.h"

+ 24 - 0
drivers/gpu/drm/radeon/r600_blit_shaders.h

@@ -1,3 +1,27 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
 
 #ifndef R600_BLIT_SHADERS_H
 #define R600_BLIT_SHADERS_H

+ 2 - 3
drivers/gpu/drm/radeon/r600_cs.c

@@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 i
 	/* using get ib will give us the offset into the mipmap bo */
 	word0 = radeon_get_ib_value(p, idx + 3) << 8;
 	if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
-		dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
-			w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
-		return -EINVAL;
+		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+		  w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
 	}
 	return 0;
 }

+ 47 - 0
drivers/gpu/drm/radeon/radeon_combios.c

@@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
 			/* PowerMac8,1 ? */
 			/* imac g5 isight */
 			rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+		} else if ((rdev->pdev->device == 0x4a48) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4a48)) {
+			/* Mac X800 */
+			rdev->mode_info.connector_table = CT_MAC_X800;
 		} else
 #endif /* CONFIG_PPC_PMAC */
 #ifdef CONFIG_PPC64
@@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
 					    CONNECTOR_OBJECT_ID_VGA,
 					    &hpd);
 		break;
+	case CT_MAC_X800:
+		DRM_INFO("Connector Table: %d (mac x800)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+					    &hpd);
+		break;
 	default:
 		DRM_INFO("Connector table: %d (invalid)\n",
 			 rdev->mode_info.connector_table);

+ 10 - 5
drivers/gpu/drm/radeon/radeon_connectors.c

@@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
 	return MODE_OK;
 }
 
-static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
 {
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
 	return MODE_OK;
 }
 
-static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
 {
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	struct drm_encoder *encoder;
@@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector,
 	return MODE_OK;
 }
 
-static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
 {
 	struct drm_encoder *encoder;
 	struct drm_encoder_helper_funcs *encoder_funcs;
@@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
  * we have to check if this analog encoder is shared with anyone else (TV)
  * if its shared we have to set the other connector to disconnected.
  */
-static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
 {
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	struct drm_encoder *encoder = NULL;
@@ -972,7 +976,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
 	return ret;
 }
 
-static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
 {
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	enum drm_connector_status ret = connector_status_disconnected;

+ 5 - 4
drivers/gpu/drm/radeon/radeon_display.c

@@ -1140,17 +1140,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
 				radeon_crtc->rmx_type = radeon_encoder->rmx_type;
 			else
 				radeon_crtc->rmx_type = RMX_OFF;
-			src_v = crtc->mode.vdisplay;
-			dst_v = radeon_crtc->native_mode.vdisplay;
-			src_h = crtc->mode.hdisplay;
-			dst_h = radeon_crtc->native_mode.vdisplay;
 			/* copy native mode */
 			memcpy(&radeon_crtc->native_mode,
 			       &radeon_encoder->native_mode,
 				sizeof(struct drm_display_mode));
+			src_v = crtc->mode.vdisplay;
+			dst_v = radeon_crtc->native_mode.vdisplay;
+			src_h = crtc->mode.hdisplay;
+			dst_h = radeon_crtc->native_mode.hdisplay;
 
 			/* fix up for overscan on hdmi */
 			if (ASIC_IS_AVIVO(rdev) &&
+			    (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
 			    ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
 			     ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
 			      drm_detect_hdmi_monitor(radeon_connector->edid) &&

+ 2 - 1
drivers/gpu/drm/radeon/radeon_mode.h

@@ -204,7 +204,7 @@ struct radeon_i2c_chan {
 
 /* mostly for macs, but really any system without connector tables */
 enum radeon_connector_table {
-	CT_NONE,
+	CT_NONE = 0,
 	CT_GENERIC,
 	CT_IBOOK,
 	CT_POWERBOOK_EXTERNAL,
@@ -215,6 +215,7 @@ enum radeon_connector_table {
 	CT_IMAC_G5_ISIGHT,
 	CT_EMAC,
 	CT_RN50_POWER,
+	CT_MAC_X800,
 };
 
 enum radeon_dvo_chip {

+ 4 - 3
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c

@@ -335,7 +335,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
 }
 
 static enum drm_connector_status
-	vmw_ldu_connector_detect(struct drm_connector *connector)
+	vmw_ldu_connector_detect(struct drm_connector *connector,
+				 bool force)
 {
 	if (vmw_connector_to_ldu(connector)->pref_active)
 		return connector_status_connected;
@@ -516,7 +517,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
 	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
 			   DRM_MODE_CONNECTOR_LVDS);
-	connector->status = vmw_ldu_connector_detect(connector);
+	connector->status = vmw_ldu_connector_detect(connector, true);
 
 	drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
 			 DRM_MODE_ENCODER_LVDS);
@@ -610,7 +611,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
 			ldu->pref_height = 600;
 			ldu->pref_active = false;
 		}
-		con->status = vmw_ldu_connector_detect(con);
+		con->status = vmw_ldu_connector_detect(con, true);
 	}
 
 	mutex_unlock(&dev->mode_config.mutex);

+ 3 - 1
drivers/hid/hid-core.c

@@ -1285,8 +1285,11 @@ static const struct hid_device_id hid_blacklist[] = {
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1578,7 +1581,6 @@ static const struct hid_device_id hid_ignore_list[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },

+ 4 - 0
drivers/hid/hid-ids.h

@@ -105,6 +105,7 @@
 
 #define USB_VENDOR_ID_ASUS		0x0486
 #define USB_DEVICE_ID_ASUS_T91MT	0x0185
+#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO	0x0186
 
 #define USB_VENDOR_ID_ASUSTEK		0x0b05
 #define USB_DEVICE_ID_ASUSTEK_LCM	0x1726
@@ -128,6 +129,7 @@
 
 #define USB_VENDOR_ID_BTC		0x046e
 #define USB_DEVICE_ID_BTC_EMPREX_REMOTE	0x5578
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2	0x5577
 
 #define USB_VENDOR_ID_CANDO		0x2087
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH	0x0a01
@@ -149,6 +151,7 @@
 
 #define USB_VENDOR_ID_CHICONY		0x04f2
 #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD	0x0418
+#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH	0xb19d
 
 #define USB_VENDOR_ID_CIDC		0x1677
 
@@ -507,6 +510,7 @@
 #define USB_VENDOR_ID_UCLOGIC		0x5543
 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209	0x0042
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U	0x0003
+#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5	0x6001
 
 #define USB_VENDOR_ID_VERNIER		0x08f7
 #define USB_DEVICE_ID_VERNIER_LABPRO	0x0001

+ 1 - 0
drivers/hid/hid-mosart.c

@@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev)
 
 static const struct hid_device_id mosart_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, mosart_devices);

+ 1 - 0
drivers/hid/hid-topseed.c

@@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 static const struct hid_device_id ts_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
 	{ }
 };

+ 7 - 1
drivers/hid/usbhid/hid-core.c

@@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
 		}
 	} else {
 		int skipped_report_id = 0;
+		int report_id = buf[0];
 		if (buf[0] == 0x0) {
 			/* Don't send the Report ID */
 			buf++;
@@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
 		ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 			HID_REQ_SET_REPORT,
 			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-			((report_type + 1) << 8) | *buf,
+			((report_type + 1) << 8) | report_id,
 			interface->desc.bInterfaceNumber, buf, count,
 			USB_CTRL_SET_TIMEOUT);
 		/* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = {
 	{ }
 };
 
+struct usb_interface *usbhid_find_interface(int minor)
+{
+	return usb_find_interface(&hid_driver, minor);
+}
+
 static struct hid_driver hid_usb_driver = {
 	.name = "generic-usb",
 	.id_table = hid_usb_table,

+ 4 - 0
drivers/hid/usbhid/hid-quirks.c

@@ -33,6 +33,7 @@ static const struct hid_blacklist {
 	{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+	{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +70,7 @@ static const struct hid_blacklist {
 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
+	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
 	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
 
@@ -77,6 +79,8 @@ static const struct hid_blacklist {
 
 	{ USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
 
+	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
+
 	{ 0, 0 }
 };
 

+ 1 - 1
drivers/hid/usbhid/hiddev.c

@@ -270,7 +270,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
 	struct hiddev *hiddev;
 	int res;
 
-	intf = usb_find_interface(&hiddev_driver, iminor(inode));
+	intf = usbhid_find_interface(iminor(inode));
 	if (!intf)
 		return -ENODEV;
 	hid = usb_get_intfdata(intf);

+ 1 - 0
drivers/hid/usbhid/usbhid.h

@@ -42,6 +42,7 @@ void usbhid_submit_report
 (struct hid_device *hid, struct hid_report *report, unsigned char dir);
 int usbhid_get_power(struct hid_device *hid);
 void usbhid_put_power(struct hid_device *hid);
+struct usb_interface *usbhid_find_interface(int minor);
 
 /* iofl flags */
 #define HID_CTRL_RUNNING	1

+ 24 - 19
drivers/hwmon/adm1031.c

@@ -79,7 +79,7 @@ struct adm1031_data {
 	int chip_type;
 	char valid;		/* !=0 if following fields are valid */
 	unsigned long last_updated;	/* In jiffies */
-	unsigned int update_rate;	/* In milliseconds */
+	unsigned int update_interval;	/* In milliseconds */
 	/* The chan_select_table contains the possible configurations for
 	 * auto fan control.
 	 */
@@ -743,23 +743,23 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
 static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
 
-/* Update Rate */
-static const unsigned int update_rates[] = {
+/* Update Interval */
+static const unsigned int update_intervals[] = {
 	16000, 8000, 4000, 2000, 1000, 500, 250, 125,
 };
 
-static ssize_t show_update_rate(struct device *dev,
-				struct device_attribute *attr, char *buf)
+static ssize_t show_update_interval(struct device *dev,
+				    struct device_attribute *attr, char *buf)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct adm1031_data *data = i2c_get_clientdata(client);
 
-	return sprintf(buf, "%u\n", data->update_rate);
+	return sprintf(buf, "%u\n", data->update_interval);
 }
 
-static ssize_t set_update_rate(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
+static ssize_t set_update_interval(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@ static ssize_t set_update_rate(struct device *dev,
 	if (err)
 		return err;
 
-	/* find the nearest update rate from the table */
-	for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
-		if (val >= update_rates[i])
+	/*
+	 * Find the nearest update interval from the table.
+	 * Use it to determine the matching update rate.
+	 */
+	for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
+		if (val >= update_intervals[i])
 			break;
 	}
-	/* if not found, we point to the last entry (lowest update rate) */
+	/* if not found, we point to the last entry (lowest update interval) */
 
 	/* set the new update rate while preserving other settings */
 	reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@ static ssize_t set_update_rate(struct device *dev,
 	adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
 
 	mutex_lock(&data->update_lock);
-	data->update_rate = update_rates[i];
+	data->update_interval = update_intervals[i];
 	mutex_unlock(&data->update_lock);
 
 	return count;
 }
 
-static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
-		   set_update_rate);
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+		   set_update_interval);
 
 static struct attribute *adm1031_attributes[] = {
 	&sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@ static struct attribute *adm1031_attributes[] = {
 
 	&sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
 
-	&dev_attr_update_rate.attr,
+	&dev_attr_update_interval.attr,
 	&dev_attr_alarms.attr,
 
 	NULL
@@ -981,7 +984,8 @@ static void adm1031_init_client(struct i2c_client *client)
 	mask = ADM1031_UPDATE_RATE_MASK;
 	read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
 	i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
-	data->update_rate = update_rates[i];
+	/* Save it as update interval */
+	data->update_interval = update_intervals[i];
 }
 
 static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
 
 	mutex_lock(&data->update_lock);
 
-	next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+	next_update = data->last_updated
+	  + msecs_to_jiffies(data->update_interval);
 	if (time_after(jiffies, next_update) || !data->valid) {
 
 		dev_dbg(&client->dev, "Starting adm1031 update\n");

+ 0 - 1
drivers/hwmon/emc1403.c

@@ -308,7 +308,6 @@ static int emc1403_probe(struct i2c_client *client,
 	res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
 	if (res) {
 		dev_warn(&client->dev, "create group failed\n");
-		hwmon_device_unregister(data->hwmon_dev);
 		goto thermal_error1;
 	}
 	data->hwmon_dev = hwmon_device_register(&client->dev);

+ 3 - 3
drivers/hwmon/f75375s.c

@@ -79,7 +79,7 @@ enum chips { f75373, f75375 };
 #define F75375_REG_PWM2_DROP_DUTY	0x6C
 
 #define FAN_CTRL_LINEAR(nr)		(4 + nr)
-#define FAN_CTRL_MODE(nr)		(5 + ((nr) * 2))
+#define FAN_CTRL_MODE(nr)		(4 + ((nr) * 2))
 
 /*
  * Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
 		return -EINVAL;
 
 	fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
-	fanmode = ~(3 << FAN_CTRL_MODE(nr));
+	fanmode &= ~(3 << FAN_CTRL_MODE(nr));
 
 	switch (val) {
 	case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
 
 	mutex_lock(&data->update_lock);
 	conf = f75375_read8(client, F75375_REG_CONFIG1);
-	conf = ~(1 << FAN_CTRL_LINEAR(nr));
+	conf &= ~(1 << FAN_CTRL_LINEAR(nr));
 
 	if (val == 0)
 		conf |= (1 << FAN_CTRL_LINEAR(nr)) ;

+ 2 - 2
drivers/hwmon/lis3lv02d_i2c.c

@@ -121,7 +121,7 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
 {
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
-	if (!lis3->pdata->wakeup_flags)
+	if (!lis3->pdata || !lis3->pdata->wakeup_flags)
 		lis3lv02d_poweroff(lis3);
 	return 0;
 }
@@ -130,7 +130,7 @@ static int lis3lv02d_i2c_resume(struct i2c_client *client)
 {
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
-	if (!lis3->pdata->wakeup_flags)
+	if (!lis3->pdata || !lis3->pdata->wakeup_flags)
 		lis3lv02d_poweron(lis3);
 	return 0;
 }

+ 2 - 2
drivers/hwmon/lis3lv02d_spi.c

@@ -92,7 +92,7 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
 {
 	struct lis3lv02d *lis3 = spi_get_drvdata(spi);
 
-	if (!lis3->pdata->wakeup_flags)
+	if (!lis3->pdata || !lis3->pdata->wakeup_flags)
 		lis3lv02d_poweroff(&lis3_dev);
 
 	return 0;
@@ -102,7 +102,7 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
 {
 	struct lis3lv02d *lis3 = spi_get_drvdata(spi);
 
-	if (!lis3->pdata->wakeup_flags)
+	if (!lis3->pdata || !lis3->pdata->wakeup_flags)
 		lis3lv02d_poweron(lis3);
 
 	return 0;

+ 11 - 10
drivers/hwmon/lm95241.c

@@ -91,7 +91,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev);
 struct lm95241_data {
 	struct device *hwmon_dev;
 	struct mutex update_lock;
-	unsigned long last_updated, rate; /* in jiffies */
+	unsigned long last_updated, interval; /* in jiffies */
 	char valid; /* zero until following fields are valid */
 	/* registers values */
 	u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@ show_temp(local);
 show_temp(remote1);
 show_temp(remote2);
 
-static ssize_t show_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
 			 char *buf)
 {
 	struct lm95241_data *data = lm95241_update_device(dev);
 
-	snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ);
+	snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
 	return strlen(buf);
 }
 
-static ssize_t set_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct lm95241_data *data = i2c_get_clientdata(client);
 
-	strict_strtol(buf, 10, &data->rate);
-	data->rate = data->rate * HZ / 1000;
+	strict_strtol(buf, 10, &data->interval);
+	data->interval = data->interval * HZ / 1000;
 
 	return count;
 }
@@ -286,7 +286,8 @@ static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
 static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
 static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
 static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
-static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate);
+static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
+		   set_interval);
 
 static struct attribute *lm95241_attributes[] = {
 	&dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@ static struct attribute *lm95241_attributes[] = {
 	&dev_attr_temp3_min.attr,
 	&dev_attr_temp2_max.attr,
 	&dev_attr_temp3_max.attr,
-	&dev_attr_rate.attr,
+	&dev_attr_update_interval.attr,
 	NULL
 };
 
@@ -376,7 +377,7 @@ static void lm95241_init_client(struct i2c_client *client)
 {
 	struct lm95241_data *data = i2c_get_clientdata(client);
 
-	data->rate = HZ;    /* 1 sec default */
+	data->interval = HZ;    /* 1 sec default */
 	data->valid = 0;
 	data->config = CFG_CR0076;
 	data->model = 0;
@@ -410,7 +411,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
 
 	mutex_lock(&data->update_lock);
 
-	if (time_after(jiffies, data->last_updated + data->rate) ||
+	if (time_after(jiffies, data->last_updated + data->interval) ||
 	    !data->valid) {
 		dev_dbg(&client->dev, "Updating lm95241 data.\n");
 		data->local_h =

+ 1 - 0
drivers/hwmon/w83627ehf.c

@@ -127,6 +127,7 @@ superio_enter(int ioreg)
 static inline void
 superio_exit(int ioreg)
 {
+	outb(0xaa, ioreg);
 	outb(0x02, ioreg);
 	outb(0x02, ioreg + 1);
 }

+ 3 - 9
drivers/ide/ide-probe.c

@@ -1444,14 +1444,6 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
 			ide_acpi_port_init_devices(hwif);
 	}
 
-	ide_host_for_each_port(i, hwif, host) {
-		if (hwif == NULL)
-			continue;
-
-		if (hwif->present)
-			hwif_register_devices(hwif);
-	}
-
 	ide_host_for_each_port(i, hwif, host) {
 		if (hwif == NULL)
 			continue;
@@ -1459,8 +1451,10 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
 		ide_sysfs_register_port(hwif);
 		ide_proc_register_port(hwif);
 
-		if (hwif->present)
+		if (hwif->present) {
 			ide_proc_port_register_devices(hwif);
+			hwif_register_devices(hwif);
+		}
 	}
 
 	return j ? 0 : -1;

+ 4 - 2
drivers/md/md.c

@@ -1643,7 +1643,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
 		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
 		if (rdev->sb_size & bmask)
 			rdev->sb_size = (rdev->sb_size | bmask) + 1;
-	}
+	} else
+		max_dev = le32_to_cpu(sb->max_dev);
+
 	for (i=0; i<max_dev;i++)
 		sb->dev_roles[i] = cpu_to_le16(0xfffe);
 	
@@ -7069,7 +7071,7 @@ void md_check_recovery(mddev_t *mddev)
 	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
 		return;
 	if ( ! (
-		(mddev->flags && !mddev->external) ||
+		(mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
 		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
 		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
 		(mddev->external == 0 && mddev->safemode == 1) ||

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است