Steven Whitehouse 19 жил өмнө
parent
commit
2b3d6e2f23
63 өөрчлөгдсөн 856 нэмэгдсэн , 409 устгасан
  1. 270 78
      Documentation/memory-barriers.txt
  2. 1 1
      Makefile
  3. 1 1
      arch/alpha/Kconfig
  4. 4 4
      arch/arm/mach-ep93xx/ts72xx.c
  5. 1 1
      arch/arm/mach-imx/irq.c
  6. 1 4
      arch/arm/mach-integrator/integrator_cp.c
  7. 1 0
      arch/arm/mach-pxa/spitz.c
  8. 8 0
      arch/arm/mach-sa1100/neponset.c
  9. 2 3
      arch/arm/mach-versatile/core.c
  10. 20 3
      arch/i386/kernel/acpi/earlyquirk.c
  11. 7 4
      arch/i386/kernel/setup.c
  12. 10 0
      arch/powerpc/kernel/prom_init.c
  13. 10 1
      arch/powerpc/kernel/signal_32.c
  14. 2 0
      arch/powerpc/kernel/signal_64.c
  15. 2 2
      arch/powerpc/mm/hash_native_64.c
  16. 5 6
      arch/powerpc/platforms/cell/setup.c
  17. 8 0
      arch/powerpc/platforms/pseries/setup.c
  18. 11 0
      arch/sparc/kernel/smp.c
  19. 118 6
      arch/sparc64/kernel/pci_sun4v.c
  20. 35 0
      arch/sparc64/kernel/smp.c
  21. 0 1
      arch/sparc64/kernel/sparc64_ksyms.c
  22. 7 4
      arch/sparc64/kernel/traps.c
  23. 25 5
      arch/x86_64/kernel/io_apic.c
  24. 6 7
      block/as-iosched.c
  25. 7 14
      block/cfq-iosched.c
  26. 6 7
      block/deadline-iosched.c
  27. 34 21
      block/elevator.c
  28. 3 4
      block/noop-iosched.c
  29. 4 1
      drivers/acpi/processor_perflib.c
  30. 4 2
      drivers/cdrom/cdrom.c
  31. 1 1
      drivers/char/Makefile
  32. 3 1
      drivers/char/n_tty.c
  33. 2 0
      drivers/message/fusion/mptspi.c
  34. 37 35
      drivers/message/i2o/exec-osm.c
  35. 1 3
      drivers/message/i2o/iop.c
  36. 4 1
      drivers/net/e1000/e1000_ethtool.c
  37. 2 6
      drivers/net/e1000/e1000_main.c
  38. 37 16
      drivers/net/sky2.c
  39. 48 96
      drivers/net/tg3.c
  40. 2 1
      drivers/net/tg3.h
  41. 22 9
      drivers/net/wireless/bcm43xx/bcm43xx_dma.c
  42. 8 5
      drivers/pci/pci-driver.c
  43. 16 2
      drivers/pci/pci.c
  44. 3 0
      drivers/scsi/sata_mv.c
  45. 3 0
      drivers/usb/host/ohci-pxa27x.c
  46. 1 1
      drivers/video/console/fbcon.c
  47. 3 2
      fs/bio.c
  48. 2 1
      fs/debugfs/inode.c
  49. 2 0
      fs/locks.c
  50. 2 0
      include/asm-arm/arch-pxa/ohci.h
  51. 1 1
      include/asm-powerpc/cputable.h
  52. 8 7
      include/asm-s390/futex.h
  53. 1 1
      include/linux/elevator.h
  54. 4 1
      include/linux/i2o.h
  55. 1 0
      include/linux/mempolicy.h
  56. 1 1
      include/linux/pci-acpi.h
  57. 0 8
      kernel/exit.c
  58. 22 26
      kernel/posix-cpu-timers.c
  59. 2 0
      mm/shmem.c
  60. 1 1
      mm/vmscan.c
  61. 1 0
      net/dccp/ackvec.c
  62. 1 0
      net/ipv4/ip_forward.c
  63. 1 3
      net/ipv4/tcp_input.c

+ 270 - 78
Documentation/memory-barriers.txt

@@ -19,6 +19,7 @@ Contents:
      - Control dependencies.
      - Control dependencies.
      - SMP barrier pairing.
      - SMP barrier pairing.
      - Examples of memory barrier sequences.
      - Examples of memory barrier sequences.
+     - Read memory barriers vs load speculation.
 
 
  (*) Explicit kernel barriers.
  (*) Explicit kernel barriers.
 
 
@@ -248,7 +249,7 @@ And there are a number of things that _must_ or _must_not_ be assumed:
      we may get either of:
      we may get either of:
 
 
 	STORE *A = X; Y = LOAD *A;
 	STORE *A = X; Y = LOAD *A;
-	STORE *A = Y;
+	STORE *A = Y = X;
 
 
 
 
 =========================
 =========================
@@ -344,9 +345,12 @@ Memory barriers come in four basic varieties:
 
 
  (4) General memory barriers.
  (4) General memory barriers.
 
 
-     A general memory barrier is a combination of both a read memory barrier
-     and a write memory barrier.  It is a partial ordering over both loads and
-     stores.
+     A general memory barrier gives a guarantee that all the LOAD and STORE
+     operations specified before the barrier will appear to happen before all
+     the LOAD and STORE operations specified after the barrier with respect to
+     the other components of the system.
+
+     A general memory barrier is a partial ordering over both loads and stores.
 
 
      General memory barriers imply both read and write memory barriers, and so
      General memory barriers imply both read and write memory barriers, and so
      can substitute for either.
      can substitute for either.
@@ -546,9 +550,9 @@ write barrier, though, again, a general barrier is viable:
 	===============	===============
 	===============	===============
 	a = 1;
 	a = 1;
 	<write barrier>
 	<write barrier>
-	b = 2;		x = a;
+	b = 2;		x = b;
 			<read barrier>
 			<read barrier>
-			y = b;
+			y = a;
 
 
 Or:
 Or:
 
 
@@ -563,6 +567,18 @@ Or:
 Basically, the read barrier always has to be there, even though it can be of
 Basically, the read barrier always has to be there, even though it can be of
 the "weaker" type.
 the "weaker" type.
 
 
+[!] Note that the stores before the write barrier would normally be expected to
+match the loads after the read barrier or data dependency barrier, and vice
+versa:
+
+	CPU 1                           CPU 2
+	===============                 ===============
+	a = 1;           }----   --->{  v = c
+	b = 2;           }    \ /    {  w = d
+	<write barrier>        \        <read barrier>
+	c = 3;           }    / \    {  x = a;
+	d = 4;           }----   --->{  y = b;
+
 
 
 EXAMPLES OF MEMORY BARRIER SEQUENCES
 EXAMPLES OF MEMORY BARRIER SEQUENCES
 ------------------------------------
 ------------------------------------
@@ -600,8 +616,8 @@ STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
 	|       |       +------+
 	|       |       +------+
 	+-------+       :      :
 	+-------+       :      :
 	                   |
 	                   |
-	                   | Sequence in which stores committed to memory system
-	                   | by CPU 1
+	                   | Sequence in which stores are committed to the
+	                   | memory system by CPU 1
 	                   V
 	                   V
 
 
 
 
@@ -683,14 +699,12 @@ then the following will occur:
 	                               |        :       :       |       |
 	                               |        :       :       |       |
 	                               |        :       :       | CPU 2 |
 	                               |        :       :       | CPU 2 |
 	                               |        +-------+       |       |
 	                               |        +-------+       |       |
-	                                \       | X->9  |------>|       |
-	                                 \      +-------+       |       |
-	                                  ----->| B->2  |       |       |
-	                                        +-------+       |       |
-	     Makes sure all effects --->    ddddddddddddddddd   |       |
-	     prior to the store of C            +-------+       |       |
-	     are perceptible to                 | B->2  |------>|       |
-	     successive loads                   +-------+       |       |
+	                               |        | X->9  |------>|       |
+	                               |        +-------+       |       |
+	  Makes sure all effects --->   \   ddddddddddddddddd   |       |
+	  prior to the store of C        \      +-------+       |       |
+	  are perceptible to              ----->| B->2  |------>|       |
+	  subsequent loads                      +-------+       |       |
 	                                        :       :       +-------+
 	                                        :       :       +-------+
 
 
 
 
@@ -699,73 +713,239 @@ following sequence of events:
 
 
 	CPU 1			CPU 2
 	CPU 1			CPU 2
 	=======================	=======================
 	=======================	=======================
+		{ A = 0, B = 9 }
 	STORE A=1
 	STORE A=1
-	STORE B=2
-	STORE C=3
 	<write barrier>
 	<write barrier>
-	STORE D=4
-	STORE E=5
-				LOAD A
+	STORE B=2
 				LOAD B
 				LOAD B
-				LOAD C
-				LOAD D
-				LOAD E
+				LOAD A
 
 
 Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
 Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
 some effectively random order, despite the write barrier issued by CPU 1:
 some effectively random order, despite the write barrier issued by CPU 1:
 
 
-	+-------+       :      :
-	|       |       +------+
-	|       |------>| C=3  | }
-	|       |  :    +------+ }
-	|       |  :    | A=1  | }
-	|       |  :    +------+ }
-	| CPU 1 |  :    | B=2  | }---
-	|       |       +------+ }   \
-	|       |   wwwwwwwwwwwww}    \
-	|       |       +------+ }     \          :       :       +-------+
-	|       |  :    | E=5  | }      \         +-------+       |       |
-	|       |  :    +------+ }       \      { | C->3  |------>|       |
-	|       |------>| D=4  | }        \     { +-------+    :  |       |
-	|       |       +------+           \    { | E->5  |    :  |       |
-	+-------+       :      :            \   { +-------+    :  |       |
-	                           Transfer  -->{ | A->1  |    :  | CPU 2 |
-	                          from CPU 1    { +-------+    :  |       |
-	                           to CPU 2     { | D->4  |    :  |       |
-	                                        { +-------+    :  |       |
-	                                        { | B->2  |------>|       |
-	                                          +-------+       |       |
-	                                          :       :       +-------+
-
-
-If, however, a read barrier were to be placed between the load of C and the
-load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
-perceived correctly by CPU 2.
+	+-------+       :      :                :       :
+	|       |       +------+                +-------+
+	|       |------>| A=1  |------      --->| A->0  |
+	|       |       +------+      \         +-------+
+	| CPU 1 |   wwwwwwwwwwwwwwww   \    --->| B->9  |
+	|       |       +------+        |       +-------+
+	|       |------>| B=2  |---     |       :       :
+	|       |       +------+   \    |       :       :       +-------+
+	+-------+       :      :    \   |       +-------+       |       |
+	                             ---------->| B->2  |------>|       |
+	                                |       +-------+       | CPU 2 |
+	                                |       | A->0  |------>|       |
+	                                |       +-------+       |       |
+	                                |       :       :       +-------+
+	                                 \      :       :
+	                                  \     +-------+
+	                                   ---->| A->1  |
+	                                        +-------+
+	                                        :       :
 
 
-	+-------+       :      :
-	|       |       +------+
-	|       |------>| C=3  | }
-	|       |  :    +------+ }
-	|       |  :    | A=1  | }---
-	|       |  :    +------+ }   \
-	| CPU 1 |  :    | B=2  | }    \
-	|       |       +------+       \
-	|       |   wwwwwwwwwwwwwwww    \
-	|       |       +------+         \        :       :       +-------+
-	|       |  :    | E=5  | }        \       +-------+       |       |
-	|       |  :    +------+ }---      \    { | C->3  |------>|       |
-	|       |------>| D=4  | }   \      \   { +-------+    :  |       |
-	|       |       +------+      \      -->{ | B->2  |    :  |       |
-	+-------+       :      :       \        { +-------+    :  |       |
-	                                \       { | A->1  |    :  | CPU 2 |
-	                                 \        +-------+       |       |
-	   At this point the read ---->   \   rrrrrrrrrrrrrrrrr   |       |
-	   barrier causes all effects      \      +-------+       |       |
-	   prior to the storage of C        \   { | E->5  |    :  |       |
-	   to be perceptible to CPU 2        -->{ +-------+    :  |       |
-	                                        { | D->4  |------>|       |
-	                                          +-------+       |       |
-	                                          :       :       +-------+
+
+If, however, a read barrier were to be placed between the load of E and the
+load of A on CPU 2:
+
+	CPU 1			CPU 2
+	=======================	=======================
+		{ A = 0, B = 9 }
+	STORE A=1
+	<write barrier>
+	STORE B=2
+				LOAD B
+				<read barrier>
+				LOAD A
+
+then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
+2:
+
+	+-------+       :      :                :       :
+	|       |       +------+                +-------+
+	|       |------>| A=1  |------      --->| A->0  |
+	|       |       +------+      \         +-------+
+	| CPU 1 |   wwwwwwwwwwwwwwww   \    --->| B->9  |
+	|       |       +------+        |       +-------+
+	|       |------>| B=2  |---     |       :       :
+	|       |       +------+   \    |       :       :       +-------+
+	+-------+       :      :    \   |       +-------+       |       |
+	                             ---------->| B->2  |------>|       |
+	                                |       +-------+       | CPU 2 |
+	                                |       :       :       |       |
+	                                |       :       :       |       |
+	  At this point the read ---->   \  rrrrrrrrrrrrrrrrr   |       |
+	  barrier causes all effects      \     +-------+       |       |
+	  prior to the storage of B        ---->| A->1  |------>|       |
+	  to be perceptible to CPU 2            +-------+       |       |
+	                                        :       :       +-------+
+
+
+To illustrate this more completely, consider what could happen if the code
+contained a load of A either side of the read barrier:
+
+	CPU 1			CPU 2
+	=======================	=======================
+		{ A = 0, B = 9 }
+	STORE A=1
+	<write barrier>
+	STORE B=2
+				LOAD B
+				LOAD A [first load of A]
+				<read barrier>
+				LOAD A [second load of A]
+
+Even though the two loads of A both occur after the load of B, they may both
+come up with different values:
+
+	+-------+       :      :                :       :
+	|       |       +------+                +-------+
+	|       |------>| A=1  |------      --->| A->0  |
+	|       |       +------+      \         +-------+
+	| CPU 1 |   wwwwwwwwwwwwwwww   \    --->| B->9  |
+	|       |       +------+        |       +-------+
+	|       |------>| B=2  |---     |       :       :
+	|       |       +------+   \    |       :       :       +-------+
+	+-------+       :      :    \   |       +-------+       |       |
+	                             ---------->| B->2  |------>|       |
+	                                |       +-------+       | CPU 2 |
+	                                |       :       :       |       |
+	                                |       :       :       |       |
+	                                |       +-------+       |       |
+	                                |       | A->0  |------>| 1st   |
+	                                |       +-------+       |       |
+	  At this point the read ---->   \  rrrrrrrrrrrrrrrrr   |       |
+	  barrier causes all effects      \     +-------+       |       |
+	  prior to the storage of B        ---->| A->1  |------>| 2nd   |
+	  to be perceptible to CPU 2            +-------+       |       |
+	                                        :       :       +-------+
+
+
+But it may be that the update to A from CPU 1 becomes perceptible to CPU 2
+before the read barrier completes anyway:
+
+	+-------+       :      :                :       :
+	|       |       +------+                +-------+
+	|       |------>| A=1  |------      --->| A->0  |
+	|       |       +------+      \         +-------+
+	| CPU 1 |   wwwwwwwwwwwwwwww   \    --->| B->9  |
+	|       |       +------+        |       +-------+
+	|       |------>| B=2  |---     |       :       :
+	|       |       +------+   \    |       :       :       +-------+
+	+-------+       :      :    \   |       +-------+       |       |
+	                             ---------->| B->2  |------>|       |
+	                                |       +-------+       | CPU 2 |
+	                                |       :       :       |       |
+	                                 \      :       :       |       |
+	                                  \     +-------+       |       |
+	                                   ---->| A->1  |------>| 1st   |
+	                                        +-------+       |       |
+	                                    rrrrrrrrrrrrrrrrr   |       |
+	                                        +-------+       |       |
+	                                        | A->1  |------>| 2nd   |
+	                                        +-------+       |       |
+	                                        :       :       +-------+
+
+
+The guarantee is that the second load will always come up with A == 1 if the
+load of B came up with B == 2.  No such guarantee exists for the first load of
+A; that may come up with either A == 0 or A == 1.
+
+
+READ MEMORY BARRIERS VS LOAD SPECULATION
+----------------------------------------
+
+Many CPUs speculate with loads: that is they see that they will need to load an
+item from memory, and they find a time where they're not using the bus for any
+other loads, and so do the load in advance - even though they haven't actually
+got to that point in the instruction execution flow yet.  This permits the
+actual load instruction to potentially complete immediately because the CPU
+already has the value to hand.
+
+It may turn out that the CPU didn't actually need the value - perhaps because a
+branch circumvented the load - in which case it can discard the value or just
+cache it for later use.
+
+Consider:
+
+	CPU 1	   		CPU 2
+	=======================	=======================
+	 	   		LOAD B
+	 	   		DIVIDE		} Divide instructions generally
+	 	   		DIVIDE		} take a long time to perform
+	 	   		LOAD A
+
+Which might appear as this:
+
+	                                        :       :       +-------+
+	                                        +-------+       |       |
+	                                    --->| B->2  |------>|       |
+	                                        +-------+       | CPU 2 |
+	                                        :       :DIVIDE |       |
+	                                        +-------+       |       |
+	The CPU being busy doing a --->     --->| A->0  |~~~~   |       |
+	division speculates on the              +-------+   ~   |       |
+	LOAD of A                               :       :   ~   |       |
+	                                        :       :DIVIDE |       |
+	                                        :       :   ~   |       |
+	Once the divisions are complete -->     :       :   ~-->|       |
+	the CPU can then perform the            :       :       |       |
+	LOAD with immediate effect              :       :       +-------+
+
+
+Placing a read barrier or a data dependency barrier just before the second
+load:
+
+	CPU 1	   		CPU 2
+	=======================	=======================
+	 	   		LOAD B
+	 	   		DIVIDE
+	 	   		DIVIDE
+				<read barrier>
+	 	   		LOAD A
+
+will force any value speculatively obtained to be reconsidered to an extent
+dependent on the type of barrier used.  If there was no change made to the
+speculated memory location, then the speculated value will just be used:
+
+	                                        :       :       +-------+
+	                                        +-------+       |       |
+	                                    --->| B->2  |------>|       |
+	                                        +-------+       | CPU 2 |
+	                                        :       :DIVIDE |       |
+	                                        +-------+       |       |
+	The CPU being busy doing a --->     --->| A->0  |~~~~   |       |
+	division speculates on the              +-------+   ~   |       |
+	LOAD of A                               :       :   ~   |       |
+	                                        :       :DIVIDE |       |
+	                                        :       :   ~   |       |
+	                                        :       :   ~   |       |
+	                                    rrrrrrrrrrrrrrrr~   |       |
+	                                        :       :   ~   |       |
+	                                        :       :   ~-->|       |
+	                                        :       :       |       |
+	                                        :       :       +-------+
+
+
+but if there was an update or an invalidation from another CPU pending, then
+the speculation will be cancelled and the value reloaded:
+
+	                                        :       :       +-------+
+	                                        +-------+       |       |
+	                                    --->| B->2  |------>|       |
+	                                        +-------+       | CPU 2 |
+	                                        :       :DIVIDE |       |
+	                                        +-------+       |       |
+	The CPU being busy doing a --->     --->| A->0  |~~~~   |       |
+	division speculates on the              +-------+   ~   |       |
+	LOAD of A                               :       :   ~   |       |
+	                                        :       :DIVIDE |       |
+	                                        :       :   ~   |       |
+	                                        :       :   ~   |       |
+	                                    rrrrrrrrrrrrrrrrr   |       |
+	                                        +-------+       |       |
+	The speculation is discarded --->   --->| A->1  |------>|       |
+	and an updated value is                 +-------+       |       |
+	retrieved                               :       :       +-------+
 
 
 
 
 ========================
 ========================
@@ -901,7 +1081,7 @@ IMPLICIT KERNEL MEMORY BARRIERS
 ===============================
 ===============================
 
 
 Some of the other functions in the linux kernel imply memory barriers, amongst
 Some of the other functions in the linux kernel imply memory barriers, amongst
-which are locking, scheduling and memory allocation functions.
+which are locking and scheduling functions.
 
 
 This specification is a _minimum_ guarantee; any particular architecture may
 This specification is a _minimum_ guarantee; any particular architecture may
 provide more substantial guarantees, but these may not be relied upon outside
 provide more substantial guarantees, but these may not be relied upon outside
@@ -966,6 +1146,20 @@ equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
     barriers is that the effects instructions outside of a critical section may
     barriers is that the effects instructions outside of a critical section may
     seep into the inside of the critical section.
     seep into the inside of the critical section.
 
 
+A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
+because it is possible for an access preceding the LOCK to happen after the
+LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
+two accesses can themselves then cross:
+
+	*A = a;
+	LOCK
+	UNLOCK
+	*B = b;
+
+may occur as:
+
+	LOCK, STORE *B, STORE *A, UNLOCK
+
 Locks and semaphores may not provide any guarantee of ordering on UP compiled
 Locks and semaphores may not provide any guarantee of ordering on UP compiled
 systems, and so cannot be counted on in such a situation to actually achieve
 systems, and so cannot be counted on in such a situation to actually achieve
 anything at all - especially with respect to I/O accesses - unless combined
 anything at all - especially with respect to I/O accesses - unless combined
@@ -1016,8 +1210,6 @@ Other functions that imply barriers:
 
 
  (*) schedule() and similar imply full memory barriers.
  (*) schedule() and similar imply full memory barriers.
 
 
- (*) Memory allocation and release functions imply full memory barriers.
-
 
 
 =================================
 =================================
 INTER-CPU LOCKING BARRIER EFFECTS
 INTER-CPU LOCKING BARRIER EFFECTS

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 17
 SUBLEVEL = 17
-EXTRAVERSION =-rc6
+EXTRAVERSION =
 NAME=Crazed Snow-Weasel
 NAME=Crazed Snow-Weasel
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 1
arch/alpha/Kconfig

@@ -453,7 +453,7 @@ config ALPHA_IRONGATE
 
 
 config GENERIC_HWEIGHT
 config GENERIC_HWEIGHT
 	bool
 	bool
-	default y if !ALPHA_EV6 && !ALPHA_EV67
+	default y if !ALPHA_EV67
 
 
 config ALPHA_AVANTI
 config ALPHA_AVANTI
 	bool
 	bool

+ 4 - 4
arch/arm/mach-ep93xx/ts72xx.c

@@ -111,21 +111,21 @@ static void __init ts72xx_map_io(void)
 	}
 	}
 }
 }
 
 
-static unsigned char ts72xx_rtc_readb(unsigned long addr)
+static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
 {
 {
 	__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
 	__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
 	return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
 	return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
 }
 }
 
 
-static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr)
+static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr)
 {
 {
 	__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
 	__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
 	__raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
 	__raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
 }
 }
 
 
 static struct m48t86_ops ts72xx_rtc_ops = {
 static struct m48t86_ops ts72xx_rtc_ops = {
-	.readb			= ts72xx_rtc_readb,
-	.writeb			= ts72xx_rtc_writeb,
+	.readbyte		= ts72xx_rtc_readbyte,
+	.writebyte		= ts72xx_rtc_writebyte,
 };
 };
 
 
 static struct platform_device ts72xx_rtc_device = {
 static struct platform_device ts72xx_rtc_device = {

+ 1 - 1
arch/arm/mach-imx/irq.c

@@ -127,7 +127,7 @@ static void
 imx_gpio_ack_irq(unsigned int irq)
 imx_gpio_ack_irq(unsigned int irq)
 {
 {
 	DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq);
 	DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq);
-	ISR(IRQ_TO_REG(irq)) |= 1 << ((irq - IRQ_GPIOA(0)) % 32);
+	ISR(IRQ_TO_REG(irq)) = 1 << ((irq - IRQ_GPIOA(0)) % 32);
 }
 }
 
 
 static void
 static void

+ 1 - 4
arch/arm/mach-integrator/integrator_cp.c

@@ -232,8 +232,6 @@ static void __init intcp_init_irq(void)
 	for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) {
 	for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) {
 		if (i == 11)
 		if (i == 11)
 			i = 22;
 			i = 22;
-		if (i == IRQ_CP_CPPLDINT)
-			i++;
 		if (i == 29)
 		if (i == 29)
 			break;
 			break;
 		set_irq_chip(i, &pic_chip);
 		set_irq_chip(i, &pic_chip);
@@ -259,8 +257,7 @@ static void __init intcp_init_irq(void)
 		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
 		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
 	}
 	}
 
 
-	set_irq_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
-	pic_unmask_irq(IRQ_CP_CPPLDINT);
+	set_irq_chained_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
 }
 }
 
 
 /*
 /*

+ 1 - 0
arch/arm/mach-pxa/spitz.c

@@ -371,6 +371,7 @@ static int spitz_ohci_init(struct device *dev)
 static struct pxaohci_platform_data spitz_ohci_platform_data = {
 static struct pxaohci_platform_data spitz_ohci_platform_data = {
 	.port_mode	= PMM_NPS_MODE,
 	.port_mode	= PMM_NPS_MODE,
 	.init		= spitz_ohci_init,
 	.init		= spitz_ohci_init,
+	.power_budget	= 150,
 };
 };
 
 
 
 

+ 8 - 0
arch/arm/mach-sa1100/neponset.c

@@ -59,6 +59,14 @@ neponset_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *reg
 		if (irr & (IRR_ETHERNET | IRR_USAR)) {
 		if (irr & (IRR_ETHERNET | IRR_USAR)) {
 			desc->chip->mask(irq);
 			desc->chip->mask(irq);
 
 
+			/*
+			 * Ack the interrupt now to prevent re-entering
+			 * this neponset handler.  Again, this is safe
+			 * since we'll check the IRR register prior to
+			 * leaving.
+			 */
+			desc->chip->ack(irq);
+
 			if (irr & IRR_ETHERNET) {
 			if (irr & IRR_ETHERNET) {
 				d = irq_desc + IRQ_NEPONSET_SMC9196;
 				d = irq_desc + IRQ_NEPONSET_SMC9196;
 				desc_handle_irq(IRQ_NEPONSET_SMC9196, d, regs);
 				desc_handle_irq(IRQ_NEPONSET_SMC9196, d, regs);

+ 2 - 3
arch/arm/mach-versatile/core.c

@@ -112,10 +112,9 @@ void __init versatile_init_irq(void)
 {
 {
 	unsigned int i;
 	unsigned int i;
 
 
-	vic_init(VA_VIC_BASE, IRQ_VIC_START, ~(1 << 31));
+	vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0);
 
 
-	set_irq_handler(IRQ_VICSOURCE31, sic_handle_irq);
-	enable_irq(IRQ_VICSOURCE31);
+	set_irq_chained_handler(IRQ_VICSOURCE31, sic_handle_irq);
 
 
 	/* Do second interrupt controller */
 	/* Do second interrupt controller */
 	writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
 	writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);

+ 20 - 3
arch/i386/kernel/acpi/earlyquirk.c

@@ -5,17 +5,34 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
+#include <linux/acpi.h>
+
 #include <asm/pci-direct.h>
 #include <asm/pci-direct.h>
 #include <asm/acpi.h>
 #include <asm/acpi.h>
 #include <asm/apic.h>
 #include <asm/apic.h>
 
 
+#ifdef CONFIG_ACPI
+
+static int nvidia_hpet_detected __initdata;
+
+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+{
+	nvidia_hpet_detected = 1;
+	return 0;
+}
+#endif
+
 static int __init check_bridge(int vendor, int device)
 static int __init check_bridge(int vendor, int device)
 {
 {
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_ACPI
-	/* According to Nvidia all timer overrides are bogus. Just ignore
-	   them all. */
+	/* According to Nvidia all timer overrides are bogus unless HPET
+	   is enabled. */
 	if (vendor == PCI_VENDOR_ID_NVIDIA) {
 	if (vendor == PCI_VENDOR_ID_NVIDIA) {
-		acpi_skip_timer_override = 1;
+		nvidia_hpet_detected = 0;
+		acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
+		if (nvidia_hpet_detected == 0) {
+			acpi_skip_timer_override = 1;
+		}
 	}
 	}
 #endif
 #endif
 	if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
 	if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {

+ 7 - 4
arch/i386/kernel/setup.c

@@ -1547,15 +1547,18 @@ void __init setup_arch(char **cmdline_p)
 	if (efi_enabled)
 	if (efi_enabled)
 		efi_map_memmap();
 		efi_map_memmap();
 
 
-#ifdef CONFIG_X86_IO_APIC
-	check_acpi_pci();	/* Checks more than just ACPI actually */
-#endif
-
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_ACPI
 	/*
 	/*
 	 * Parse the ACPI tables for possible boot-time SMP configuration.
 	 * Parse the ACPI tables for possible boot-time SMP configuration.
 	 */
 	 */
 	acpi_boot_table_init();
 	acpi_boot_table_init();
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+	check_acpi_pci();	/* Checks more than just ACPI actually */
+#endif
+
+#ifdef CONFIG_ACPI
 	acpi_boot_init();
 	acpi_boot_init();
 
 
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)

+ 10 - 0
arch/powerpc/kernel/prom_init.c

@@ -822,6 +822,7 @@ static void __init prom_send_capabilities(void)
 		/* try calling the ibm,client-architecture-support method */
 		/* try calling the ibm,client-architecture-support method */
 		if (call_prom_ret("call-method", 3, 2, &ret,
 		if (call_prom_ret("call-method", 3, 2, &ret,
 				  ADDR("ibm,client-architecture-support"),
 				  ADDR("ibm,client-architecture-support"),
+				  root,
 				  ADDR(ibm_architecture_vec)) == 0) {
 				  ADDR(ibm_architecture_vec)) == 0) {
 			/* the call exists... */
 			/* the call exists... */
 			if (ret)
 			if (ret)
@@ -1622,6 +1623,15 @@ static int __init prom_find_machine_type(void)
 			if (strstr(p, RELOC("Power Macintosh")) ||
 			if (strstr(p, RELOC("Power Macintosh")) ||
 			    strstr(p, RELOC("MacRISC")))
 			    strstr(p, RELOC("MacRISC")))
 				return PLATFORM_POWERMAC;
 				return PLATFORM_POWERMAC;
+#ifdef CONFIG_PPC64
+			/* We must make sure we don't detect the IBM Cell
+			 * blades as pSeries due to some firmware issues,
+			 * so we do it here.
+			 */
+			if (strstr(p, RELOC("IBM,CBEA")) ||
+			    strstr(p, RELOC("IBM,CPBW-1.0")))
+				return PLATFORM_GENERIC;
+#endif /* CONFIG_PPC64 */
 			i += sl + 1;
 			i += sl + 1;
 		}
 		}
 	}
 	}

+ 10 - 1
arch/powerpc/kernel/signal_32.c

@@ -803,10 +803,13 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
 		if (__get_user(cmcp, &ucp->uc_regs))
 		if (__get_user(cmcp, &ucp->uc_regs))
 			return -EFAULT;
 			return -EFAULT;
 		mcp = (struct mcontext __user *)(u64)cmcp;
 		mcp = (struct mcontext __user *)(u64)cmcp;
+		/* no need to check access_ok(mcp), since mcp < 4GB */
 	}
 	}
 #else
 #else
 	if (__get_user(mcp, &ucp->uc_regs))
 	if (__get_user(mcp, &ucp->uc_regs))
 		return -EFAULT;
 		return -EFAULT;
+	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
+		return -EFAULT;
 #endif
 #endif
 	restore_sigmask(&set);
 	restore_sigmask(&set);
 	if (restore_user_regs(regs, mcp, sig))
 	if (restore_user_regs(regs, mcp, sig))
@@ -908,13 +911,14 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
 {
 {
 	struct sig_dbg_op op;
 	struct sig_dbg_op op;
 	int i;
 	int i;
+	unsigned char tmp;
 	unsigned long new_msr = regs->msr;
 	unsigned long new_msr = regs->msr;
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 	unsigned long new_dbcr0 = current->thread.dbcr0;
 	unsigned long new_dbcr0 = current->thread.dbcr0;
 #endif
 #endif
 
 
 	for (i=0; i<ndbg; i++) {
 	for (i=0; i<ndbg; i++) {
-		if (__copy_from_user(&op, dbg, sizeof(op)))
+		if (copy_from_user(&op, dbg + i, sizeof(op)))
 			return -EFAULT;
 			return -EFAULT;
 		switch (op.dbg_type) {
 		switch (op.dbg_type) {
 		case SIG_DBG_SINGLE_STEPPING:
 		case SIG_DBG_SINGLE_STEPPING:
@@ -959,6 +963,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
 	current->thread.dbcr0 = new_dbcr0;
 	current->thread.dbcr0 = new_dbcr0;
 #endif
 #endif
 
 
+	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
+	    || __get_user(tmp, (u8 __user *) ctx)
+	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
+		return -EFAULT;
+
 	/*
 	/*
 	 * If we get a fault copying the context into the kernel's
 	 * If we get a fault copying the context into the kernel's
 	 * image of the user's registers, we can't just return -EFAULT
 	 * image of the user's registers, we can't just return -EFAULT

+ 2 - 0
arch/powerpc/kernel/signal_64.c

@@ -182,6 +182,8 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
 	err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
 	err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
 	if (err)
 	if (err)
 		return err;
 		return err;
+	if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
+		return -EFAULT;
 	/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
 	/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
 	if (v_regs != 0 && (msr & MSR_VEC) != 0)
 	if (v_regs != 0 && (msr & MSR_VEC) != 0)
 		err |= __copy_from_user(current->thread.vr, v_regs,
 		err |= __copy_from_user(current->thread.vr, v_regs,

+ 2 - 2
arch/powerpc/mm/hash_native_64.c

@@ -52,7 +52,7 @@ static inline void __tlbie(unsigned long va, unsigned int psize)
 	default:
 	default:
 		penc = mmu_psize_defs[psize].penc;
 		penc = mmu_psize_defs[psize].penc;
 		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
 		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
-		va |= (0x7f >> (8 - penc)) << 12;
+		va |= penc << 12;
 		asm volatile("tlbie %0,1" : : "r" (va) : "memory");
 		asm volatile("tlbie %0,1" : : "r" (va) : "memory");
 		break;
 		break;
 	}
 	}
@@ -74,7 +74,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
 	default:
 	default:
 		penc = mmu_psize_defs[psize].penc;
 		penc = mmu_psize_defs[psize].penc;
 		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
 		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
-		va |= (0x7f >> (8 - penc)) << 12;
+		va |= penc << 12;
 		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
 		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
 			     : : "r"(va) : "memory");
 			     : : "r"(va) : "memory");
 		break;
 		break;

+ 5 - 6
arch/powerpc/platforms/cell/setup.c

@@ -125,14 +125,13 @@ static void __init cell_init_early(void)
 
 
 static int __init cell_probe(void)
 static int __init cell_probe(void)
 {
 {
-	/* XXX This is temporary, the Cell maintainer will come up with
-	 * more appropriate detection logic
-	 */
 	unsigned long root = of_get_flat_dt_root();
 	unsigned long root = of_get_flat_dt_root();
-	if (!of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
-		return 0;
 
 
-	return 1;
+	if (of_flat_dt_is_compatible(root, "IBM,CBEA") ||
+	    of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+		return 1;
+
+	return 0;
 }
 }
 
 
 /*
 /*

+ 8 - 0
arch/powerpc/platforms/pseries/setup.c

@@ -389,6 +389,7 @@ static int __init pSeries_probe_hypertas(unsigned long node,
 
 
 static int __init pSeries_probe(void)
 static int __init pSeries_probe(void)
 {
 {
+	unsigned long root = of_get_flat_dt_root();
  	char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
  	char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
  					  "device_type", NULL);
  					  "device_type", NULL);
  	if (dtype == NULL)
  	if (dtype == NULL)
@@ -396,6 +397,13 @@ static int __init pSeries_probe(void)
  	if (strcmp(dtype, "chrp"))
  	if (strcmp(dtype, "chrp"))
 		return 0;
 		return 0;
 
 
+	/* Cell blades firmware claims to be chrp while it's not. Until this
+	 * is fixed, we need to avoid those here.
+	 */
+	if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
+	    of_flat_dt_is_compatible(root, "IBM,CBEA"))
+		return 0;
+
 	DBG("pSeries detected, looking for LPAR capability...\n");
 	DBG("pSeries detected, looking for LPAR capability...\n");
 
 
 	/* Now try to figure out if we are running on LPAR */
 	/* Now try to figure out if we are running on LPAR */

+ 11 - 0
arch/sparc/kernel/smp.c

@@ -69,6 +69,17 @@ void __init smp_store_cpu_info(int id)
 						     "clock-frequency", 0);
 						     "clock-frequency", 0);
 	cpu_data(id).prom_node = cpu_node;
 	cpu_data(id).prom_node = cpu_node;
 	cpu_data(id).mid = cpu_get_hwmid(cpu_node);
 	cpu_data(id).mid = cpu_get_hwmid(cpu_node);
+
+	/* this is required to tune the scheduler correctly */
+	/* is it possible to have CPUs with different cache sizes? */
+	if (id == boot_cpu_id) {
+		int cache_line,cache_nlines;
+		cache_line = 0x20;
+		cache_line = prom_getintdefault(cpu_node, "ecache-line-size", cache_line);
+		cache_nlines = 0x8000;
+		cache_nlines = prom_getintdefault(cpu_node, "ecache-nlines", cache_nlines);
+		max_cache_size = cache_line * cache_nlines;
+	}
 	if (cpu_data(id).mid < 0)
 	if (cpu_data(id).mid < 0)
 		panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
 		panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
 }
 }

+ 118 - 6
arch/sparc64/kernel/pci_sun4v.c

@@ -599,18 +599,128 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
 
 
 /* SUN4V PCI configuration space accessors. */
 /* SUN4V PCI configuration space accessors. */
 
 
-static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
+struct pdev_entry {
+	struct pdev_entry	*next;
+	u32			devhandle;
+	unsigned int		bus;
+	unsigned int		device;
+	unsigned int		func;
+};
+
+#define PDEV_HTAB_SIZE	16
+#define PDEV_HTAB_MASK	(PDEV_HTAB_SIZE - 1)
+static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
+
+static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
 {
 {
-	if (bus == pbm->pci_first_busno) {
-		if (device == 0 && func == 0)
-			return 0;
-		return 1;
+	unsigned int val;
+
+	val = (devhandle ^ (devhandle >> 4));
+	val ^= bus;
+	val ^= device;
+	val ^= func;
+
+	return val & PDEV_HTAB_MASK;
+}
+
+static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
+{
+	struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
+	struct pdev_entry **slot;
+
+	if (!p)
+		return -ENOMEM;
+
+	slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
+	p->next = *slot;
+	*slot = p;
+
+	p->devhandle = devhandle;
+	p->bus = bus;
+	p->device = device;
+	p->func = func;
+
+	return 0;
+}
+
+/* Recursively descend into the OBP device tree, rooted at toplevel_node,
+ * looking for a PCI device matching bus and devfn.
+ */
+static int obp_find(struct linux_prom_pci_registers *pregs, int toplevel_node, unsigned int bus, unsigned int devfn)
+{
+	toplevel_node = prom_getchild(toplevel_node);
+
+	while (toplevel_node != 0) {
+		int ret = obp_find(pregs, toplevel_node, bus, devfn);
+
+		if (ret != 0)
+			return ret;
+
+		ret = prom_getproperty(toplevel_node, "reg", (char *) pregs,
+				       sizeof(*pregs) * PROMREG_MAX);
+		if (ret == 0 || ret == -1)
+			goto next_sibling;
+
+		if (((pregs[0].phys_hi >> 16) & 0xff) == bus &&
+		    ((pregs[0].phys_hi >> 8) & 0xff) == devfn)
+			break;
+
+	next_sibling:
+		toplevel_node = prom_getsibling(toplevel_node);
+	}
+
+	return toplevel_node;
+}
+
+static int pdev_htab_populate(struct pci_pbm_info *pbm)
+{
+	struct linux_prom_pci_registers pr[PROMREG_MAX];
+	u32 devhandle = pbm->devhandle;
+	unsigned int bus;
+
+	for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
+		unsigned int devfn;
+
+		for (devfn = 0; devfn < 256; devfn++) {
+			unsigned int device = PCI_SLOT(devfn);
+			unsigned int func = PCI_FUNC(devfn);
+
+			if (obp_find(pr, pbm->prom_node, bus, devfn)) {
+				int err = pdev_htab_add(devhandle, bus,
+							device, func);
+				if (err)
+					return err;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
+{
+	struct pdev_entry *p;
+
+	p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
+	while (p) {
+		if (p->devhandle == devhandle &&
+		    p->bus == bus &&
+		    p->device == device &&
+		    p->func == func)
+			break;
+
+		p = p->next;
 	}
 	}
 
 
+	return p;
+}
+
+static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
+{
 	if (bus < pbm->pci_first_busno ||
 	if (bus < pbm->pci_first_busno ||
 	    bus > pbm->pci_last_busno)
 	    bus > pbm->pci_last_busno)
 		return 1;
 		return 1;
-	return 0;
+	return pdev_find(pbm->devhandle, bus, device, func) == NULL;
 }
 }
 
 
 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
@@ -1063,6 +1173,8 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32
 
 
 	pci_sun4v_get_bus_range(pbm);
 	pci_sun4v_get_bus_range(pbm);
 	pci_sun4v_iommu_init(pbm);
 	pci_sun4v_iommu_init(pbm);
+
+	pdev_htab_populate(pbm);
 }
 }
 
 
 void sun4v_pci_init(int node, char *model_name)
 void sun4v_pci_init(int node, char *model_name)

+ 35 - 0
arch/sparc64/kernel/smp.c

@@ -1287,6 +1287,40 @@ int setup_profiling_timer(unsigned int multiplier)
 	return 0;
 	return 0;
 }
 }
 
 
+static void __init smp_tune_scheduling(void)
+{
+	int instance, node;
+	unsigned int def, smallest = ~0U;
+
+	def = ((tlb_type == hypervisor) ?
+	       (3 * 1024 * 1024) :
+	       (4 * 1024 * 1024));
+
+	instance = 0;
+	while (!cpu_find_by_instance(instance, &node, NULL)) {
+		unsigned int val;
+
+		val = prom_getintdefault(node, "ecache-size", def);
+		if (val < smallest)
+			smallest = val;
+
+		instance++;
+	}
+
+	/* Any value less than 256K is nonsense.  */
+	if (smallest < (256U * 1024U))
+		smallest = 256 * 1024;
+
+	max_cache_size = smallest;
+
+	if (smallest < 1U * 1024U * 1024U)
+		printk(KERN_INFO "Using max_cache_size of %uKB\n",
+		       smallest / 1024U);
+	else
+		printk(KERN_INFO "Using max_cache_size of %uMB\n",
+		       smallest / 1024U / 1024U);
+}
+
 /* Constrain the number of cpus to max_cpus.  */
 /* Constrain the number of cpus to max_cpus.  */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
 {
@@ -1322,6 +1356,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	}
 	}
 
 
 	smp_store_cpu_info(boot_cpu_id);
 	smp_store_cpu_info(boot_cpu_id);
+	smp_tune_scheduling();
 }
 }
 
 
 /* Set this up early so that things like the scheduler can init
 /* Set this up early so that things like the scheduler can init

+ 0 - 1
arch/sparc64/kernel/sparc64_ksyms.c

@@ -297,7 +297,6 @@ EXPORT_SYMBOL(svr4_getcontext);
 EXPORT_SYMBOL(svr4_setcontext);
 EXPORT_SYMBOL(svr4_setcontext);
 EXPORT_SYMBOL(compat_sys_ioctl);
 EXPORT_SYMBOL(compat_sys_ioctl);
 EXPORT_SYMBOL(sparc32_open);
 EXPORT_SYMBOL(sparc32_open);
-EXPORT_SYMBOL(sys_close);
 #endif
 #endif
 
 
 /* Special internal versions of library functions. */
 /* Special internal versions of library functions. */

+ 7 - 4
arch/sparc64/kernel/traps.c

@@ -1797,7 +1797,9 @@ static const char *sun4v_err_type_to_str(u32 type)
 	};
 	};
 }
 }
 
 
-static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
+extern void __show_regs(struct pt_regs * regs);
+
+static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
 {
 {
 	int cnt;
 	int cnt;
 
 
@@ -1830,6 +1832,8 @@ static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *
 	       pfx,
 	       pfx,
 	       ent->err_raddr, ent->err_size, ent->err_cpu);
 	       ent->err_raddr, ent->err_size, ent->err_cpu);
 
 
+	__show_regs(regs);
+
 	if ((cnt = atomic_read(ocnt)) != 0) {
 	if ((cnt = atomic_read(ocnt)) != 0) {
 		atomic_set(ocnt, 0);
 		atomic_set(ocnt, 0);
 		wmb();
 		wmb();
@@ -1862,7 +1866,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
 
 
 	put_cpu();
 	put_cpu();
 
 
-	sun4v_log_error(&local_copy, cpu,
+	sun4v_log_error(regs, &local_copy, cpu,
 			KERN_ERR "RESUMABLE ERROR",
 			KERN_ERR "RESUMABLE ERROR",
 			&sun4v_resum_oflow_cnt);
 			&sun4v_resum_oflow_cnt);
 }
 }
@@ -1910,7 +1914,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
 	}
 	}
 #endif
 #endif
 
 
-	sun4v_log_error(&local_copy, cpu,
+	sun4v_log_error(regs, &local_copy, cpu,
 			KERN_EMERG "NON-RESUMABLE ERROR",
 			KERN_EMERG "NON-RESUMABLE ERROR",
 			&sun4v_nonresum_oflow_cnt);
 			&sun4v_nonresum_oflow_cnt);
 
 
@@ -2200,7 +2204,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
 void die_if_kernel(char *str, struct pt_regs *regs)
 void die_if_kernel(char *str, struct pt_regs *regs)
 {
 {
 	static int die_counter;
 	static int die_counter;
-	extern void __show_regs(struct pt_regs * regs);
 	extern void smp_report_regs(void);
 	extern void smp_report_regs(void);
 	int count = 0;
 	int count = 0;
 	
 	

+ 25 - 5
arch/x86_64/kernel/io_apic.c

@@ -271,6 +271,18 @@ __setup("enable_8254_timer", setup_enable_8254_timer);
 #include <linux/pci_ids.h>
 #include <linux/pci_ids.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 
 
+
+#ifdef CONFIG_ACPI
+
+static int nvidia_hpet_detected __initdata;
+
+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+{
+	nvidia_hpet_detected = 1;
+	return 0;
+}
+#endif
+
 /* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
 /* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
    off. Check for an Nvidia or VIA PCI bridge and turn it off.
    off. Check for an Nvidia or VIA PCI bridge and turn it off.
    Use pci direct infrastructure because this runs before the PCI subsystem. 
    Use pci direct infrastructure because this runs before the PCI subsystem. 
@@ -317,11 +329,19 @@ void __init check_ioapic(void)
 					return;
 					return;
 				case PCI_VENDOR_ID_NVIDIA:
 				case PCI_VENDOR_ID_NVIDIA:
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_ACPI
-					/* All timer overrides on Nvidia
-				           seem to be wrong. Skip them. */
-					acpi_skip_timer_override = 1;
-					printk(KERN_INFO 
-	     "Nvidia board detected. Ignoring ACPI timer override.\n");
+					/*
+					 * All timer overrides on Nvidia are
+					 * wrong unless HPET is enabled.
+					 */
+					nvidia_hpet_detected = 0;
+					acpi_table_parse(ACPI_HPET,
+							nvidia_hpet_check);
+					if (nvidia_hpet_detected == 0) {
+						acpi_skip_timer_override = 1;
+						printk(KERN_INFO "Nvidia board "
+						    "detected. Ignoring ACPI "
+						    "timer override.\n");
+					}
 #endif
 #endif
 					/* RED-PEN skip them on mptables too? */
 					/* RED-PEN skip them on mptables too? */
 					return;
 					return;

+ 6 - 7
block/as-iosched.c

@@ -1648,17 +1648,17 @@ static void as_exit_queue(elevator_t *e)
  * initialize elevator private data (as_data), and alloc a arq for
  * initialize elevator private data (as_data), and alloc a arq for
  * each request on the free lists
  * each request on the free lists
  */
  */
-static int as_init_queue(request_queue_t *q, elevator_t *e)
+static void *as_init_queue(request_queue_t *q, elevator_t *e)
 {
 {
 	struct as_data *ad;
 	struct as_data *ad;
 	int i;
 	int i;
 
 
 	if (!arq_pool)
 	if (!arq_pool)
-		return -ENOMEM;
+		return NULL;
 
 
 	ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
 	ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
 	if (!ad)
 	if (!ad)
-		return -ENOMEM;
+		return NULL;
 	memset(ad, 0, sizeof(*ad));
 	memset(ad, 0, sizeof(*ad));
 
 
 	ad->q = q; /* Identify what queue the data belongs to */
 	ad->q = q; /* Identify what queue the data belongs to */
@@ -1667,7 +1667,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
 				GFP_KERNEL, q->node);
 				GFP_KERNEL, q->node);
 	if (!ad->hash) {
 	if (!ad->hash) {
 		kfree(ad);
 		kfree(ad);
-		return -ENOMEM;
+		return NULL;
 	}
 	}
 
 
 	ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
 	ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -1675,7 +1675,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
 	if (!ad->arq_pool) {
 	if (!ad->arq_pool) {
 		kfree(ad->hash);
 		kfree(ad->hash);
 		kfree(ad);
 		kfree(ad);
-		return -ENOMEM;
+		return NULL;
 	}
 	}
 
 
 	/* anticipatory scheduling helpers */
 	/* anticipatory scheduling helpers */
@@ -1696,14 +1696,13 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
 	ad->antic_expire = default_antic_expire;
 	ad->antic_expire = default_antic_expire;
 	ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
 	ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
 	ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
 	ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
-	e->elevator_data = ad;
 
 
 	ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
 	ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
 	ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
 	ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
 	if (ad->write_batch_count < 2)
 	if (ad->write_batch_count < 2)
 		ad->write_batch_count = 2;
 		ad->write_batch_count = 2;
 
 
-	return 0;
+	return ad;
 }
 }
 
 
 /*
 /*

+ 7 - 14
block/cfq-iosched.c

@@ -1323,17 +1323,12 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 	struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
 	struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
 
 
 	if (cic) {
 	if (cic) {
-		RB_CLEAR(&cic->rb_node);
-		cic->key = NULL;
-		cic->cfqq[ASYNC] = NULL;
-		cic->cfqq[SYNC] = NULL;
+		memset(cic, 0, sizeof(*cic));
+		RB_CLEAR_COLOR(&cic->rb_node);
 		cic->last_end_request = jiffies;
 		cic->last_end_request = jiffies;
-		cic->ttime_total = 0;
-		cic->ttime_samples = 0;
-		cic->ttime_mean = 0;
+		INIT_LIST_HEAD(&cic->queue_list);
 		cic->dtor = cfq_free_io_context;
 		cic->dtor = cfq_free_io_context;
 		cic->exit = cfq_exit_io_context;
 		cic->exit = cfq_exit_io_context;
-		INIT_LIST_HEAD(&cic->queue_list);
 		atomic_inc(&ioc_count);
 		atomic_inc(&ioc_count);
 	}
 	}
 
 
@@ -2251,14 +2246,14 @@ static void cfq_exit_queue(elevator_t *e)
 	kfree(cfqd);
 	kfree(cfqd);
 }
 }
 
 
-static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
 {
 {
 	struct cfq_data *cfqd;
 	struct cfq_data *cfqd;
 	int i;
 	int i;
 
 
 	cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
 	cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
 	if (!cfqd)
 	if (!cfqd)
-		return -ENOMEM;
+		return NULL;
 
 
 	memset(cfqd, 0, sizeof(*cfqd));
 	memset(cfqd, 0, sizeof(*cfqd));
 
 
@@ -2288,8 +2283,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
 	for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
 	for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
 		INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
 		INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
 
 
-	e->elevator_data = cfqd;
-
 	cfqd->queue = q;
 	cfqd->queue = q;
 
 
 	cfqd->max_queued = q->nr_requests / 4;
 	cfqd->max_queued = q->nr_requests / 4;
@@ -2316,14 +2309,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
 	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
 	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
 	cfqd->cfq_slice_idle = cfq_slice_idle;
 	cfqd->cfq_slice_idle = cfq_slice_idle;
 
 
-	return 0;
+	return cfqd;
 out_crqpool:
 out_crqpool:
 	kfree(cfqd->cfq_hash);
 	kfree(cfqd->cfq_hash);
 out_cfqhash:
 out_cfqhash:
 	kfree(cfqd->crq_hash);
 	kfree(cfqd->crq_hash);
 out_crqhash:
 out_crqhash:
 	kfree(cfqd);
 	kfree(cfqd);
-	return -ENOMEM;
+	return NULL;
 }
 }
 
 
 static void cfq_slab_kill(void)
 static void cfq_slab_kill(void)

+ 6 - 7
block/deadline-iosched.c

@@ -613,24 +613,24 @@ static void deadline_exit_queue(elevator_t *e)
  * initialize elevator private data (deadline_data), and alloc a drq for
  * initialize elevator private data (deadline_data), and alloc a drq for
  * each request on the free lists
  * each request on the free lists
  */
  */
-static int deadline_init_queue(request_queue_t *q, elevator_t *e)
+static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
 {
 {
 	struct deadline_data *dd;
 	struct deadline_data *dd;
 	int i;
 	int i;
 
 
 	if (!drq_pool)
 	if (!drq_pool)
-		return -ENOMEM;
+		return NULL;
 
 
 	dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
 	dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
 	if (!dd)
 	if (!dd)
-		return -ENOMEM;
+		return NULL;
 	memset(dd, 0, sizeof(*dd));
 	memset(dd, 0, sizeof(*dd));
 
 
 	dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
 	dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
 				GFP_KERNEL, q->node);
 				GFP_KERNEL, q->node);
 	if (!dd->hash) {
 	if (!dd->hash) {
 		kfree(dd);
 		kfree(dd);
-		return -ENOMEM;
+		return NULL;
 	}
 	}
 
 
 	dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
 	dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -638,7 +638,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
 	if (!dd->drq_pool) {
 	if (!dd->drq_pool) {
 		kfree(dd->hash);
 		kfree(dd->hash);
 		kfree(dd);
 		kfree(dd);
-		return -ENOMEM;
+		return NULL;
 	}
 	}
 
 
 	for (i = 0; i < DL_HASH_ENTRIES; i++)
 	for (i = 0; i < DL_HASH_ENTRIES; i++)
@@ -653,8 +653,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
 	dd->writes_starved = writes_starved;
 	dd->writes_starved = writes_starved;
 	dd->front_merges = 1;
 	dd->front_merges = 1;
 	dd->fifo_batch = fifo_batch;
 	dd->fifo_batch = fifo_batch;
-	e->elevator_data = dd;
-	return 0;
+	return dd;
 }
 }
 
 
 static void deadline_put_request(request_queue_t *q, struct request *rq)
 static void deadline_put_request(request_queue_t *q, struct request *rq)

+ 34 - 21
block/elevator.c

@@ -121,16 +121,16 @@ static struct elevator_type *elevator_get(const char *name)
 	return e;
 	return e;
 }
 }
 
 
-static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
+static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
 {
 {
-	int ret = 0;
+	return eq->ops->elevator_init_fn(q, eq);
+}
 
 
+static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
+			   void *data)
+{
 	q->elevator = eq;
 	q->elevator = eq;
-
-	if (eq->ops->elevator_init_fn)
-		ret = eq->ops->elevator_init_fn(q, eq);
-
-	return ret;
+	eq->elevator_data = data;
 }
 }
 
 
 static char chosen_elevator[16];
 static char chosen_elevator[16];
@@ -181,6 +181,7 @@ int elevator_init(request_queue_t *q, char *name)
 	struct elevator_type *e = NULL;
 	struct elevator_type *e = NULL;
 	struct elevator_queue *eq;
 	struct elevator_queue *eq;
 	int ret = 0;
 	int ret = 0;
+	void *data;
 
 
 	INIT_LIST_HEAD(&q->queue_head);
 	INIT_LIST_HEAD(&q->queue_head);
 	q->last_merge = NULL;
 	q->last_merge = NULL;
@@ -202,10 +203,13 @@ int elevator_init(request_queue_t *q, char *name)
 	if (!eq)
 	if (!eq)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	ret = elevator_attach(q, eq);
-	if (ret)
+	data = elevator_init_queue(q, eq);
+	if (!data) {
 		kobject_put(&eq->kobj);
 		kobject_put(&eq->kobj);
+		return -ENOMEM;
+	}
 
 
+	elevator_attach(q, eq, data);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -722,13 +726,16 @@ int elv_register_queue(struct request_queue *q)
 	return error;
 	return error;
 }
 }
 
 
+static void __elv_unregister_queue(elevator_t *e)
+{
+	kobject_uevent(&e->kobj, KOBJ_REMOVE);
+	kobject_del(&e->kobj);
+}
+
 void elv_unregister_queue(struct request_queue *q)
 void elv_unregister_queue(struct request_queue *q)
 {
 {
-	if (q) {
-		elevator_t *e = q->elevator;
-		kobject_uevent(&e->kobj, KOBJ_REMOVE);
-		kobject_del(&e->kobj);
-	}
+	if (q)
+		__elv_unregister_queue(q->elevator);
 }
 }
 
 
 int elv_register(struct elevator_type *e)
 int elv_register(struct elevator_type *e)
@@ -780,6 +787,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
 static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 {
 {
 	elevator_t *old_elevator, *e;
 	elevator_t *old_elevator, *e;
+	void *data;
 
 
 	/*
 	/*
 	 * Allocate new elevator
 	 * Allocate new elevator
@@ -788,6 +796,12 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 	if (!e)
 	if (!e)
 		return 0;
 		return 0;
 
 
+	data = elevator_init_queue(q, e);
+	if (!data) {
+		kobject_put(&e->kobj);
+		return 0;
+	}
+
 	/*
 	/*
 	 * Turn on BYPASS and drain all requests w/ elevator private data
 	 * Turn on BYPASS and drain all requests w/ elevator private data
 	 */
 	 */
@@ -806,19 +820,19 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 		elv_drain_elevator(q);
 		elv_drain_elevator(q);
 	}
 	}
 
 
-	spin_unlock_irq(q->queue_lock);
-
 	/*
 	/*
-	 * unregister old elevator data
+	 * Remember old elevator.
 	 */
 	 */
-	elv_unregister_queue(q);
 	old_elevator = q->elevator;
 	old_elevator = q->elevator;
 
 
 	/*
 	/*
 	 * attach and start new elevator
 	 * attach and start new elevator
 	 */
 	 */
-	if (elevator_attach(q, e))
-		goto fail;
+	elevator_attach(q, e, data);
+
+	spin_unlock_irq(q->queue_lock);
+
+	__elv_unregister_queue(old_elevator);
 
 
 	if (elv_register_queue(q))
 	if (elv_register_queue(q))
 		goto fail_register;
 		goto fail_register;
@@ -837,7 +851,6 @@ fail_register:
 	 */
 	 */
 	elevator_exit(e);
 	elevator_exit(e);
 	e = NULL;
 	e = NULL;
-fail:
 	q->elevator = old_elevator;
 	q->elevator = old_elevator;
 	elv_register_queue(q);
 	elv_register_queue(q);
 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);

+ 3 - 4
block/noop-iosched.c

@@ -65,16 +65,15 @@ noop_latter_request(request_queue_t *q, struct request *rq)
 	return list_entry(rq->queuelist.next, struct request, queuelist);
 	return list_entry(rq->queuelist.next, struct request, queuelist);
 }
 }
 
 
-static int noop_init_queue(request_queue_t *q, elevator_t *e)
+static void *noop_init_queue(request_queue_t *q, elevator_t *e)
 {
 {
 	struct noop_data *nd;
 	struct noop_data *nd;
 
 
 	nd = kmalloc(sizeof(*nd), GFP_KERNEL);
 	nd = kmalloc(sizeof(*nd), GFP_KERNEL);
 	if (!nd)
 	if (!nd)
-		return -ENOMEM;
+		return NULL;
 	INIT_LIST_HEAD(&nd->queue);
 	INIT_LIST_HEAD(&nd->queue);
-	e->elevator_data = nd;
-	return 0;
+	return nd;
 }
 }
 
 
 static void noop_exit_queue(elevator_t *e)
 static void noop_exit_queue(elevator_t *e)

+ 4 - 1
drivers/acpi/processor_perflib.c

@@ -577,6 +577,8 @@ acpi_processor_register_performance(struct acpi_processor_performance
 		return_VALUE(-EBUSY);
 		return_VALUE(-EBUSY);
 	}
 	}
 
 
+	WARN_ON(!performance);
+
 	pr->performance = performance;
 	pr->performance = performance;
 
 
 	if (acpi_processor_get_performance_info(pr)) {
 	if (acpi_processor_get_performance_info(pr)) {
@@ -609,7 +611,8 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
 		return_VOID;
 		return_VOID;
 	}
 	}
 
 
-	kfree(pr->performance->states);
+	if (pr->performance)
+		kfree(pr->performance->states);
 	pr->performance = NULL;
 	pr->performance = NULL;
 
 
 	acpi_cpufreq_remove_file(pr);
 	acpi_cpufreq_remove_file(pr);

+ 4 - 2
drivers/cdrom/cdrom.c

@@ -1009,9 +1009,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
 		if (fp->f_mode & FMODE_WRITE) {
 		if (fp->f_mode & FMODE_WRITE) {
 			ret = -EROFS;
 			ret = -EROFS;
 			if (cdrom_open_write(cdi))
 			if (cdrom_open_write(cdi))
-				goto err;
+				goto err_release;
 			if (!CDROM_CAN(CDC_RAM))
 			if (!CDROM_CAN(CDC_RAM))
-				goto err;
+				goto err_release;
 			ret = 0;
 			ret = 0;
 			cdi->media_written = 0;
 			cdi->media_written = 0;
 		}
 		}
@@ -1026,6 +1026,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
 	    not be mounting, but opening with O_NONBLOCK */
 	    not be mounting, but opening with O_NONBLOCK */
 	check_disk_change(ip->i_bdev);
 	check_disk_change(ip->i_bdev);
 	return 0;
 	return 0;
+err_release:
+	cdi->ops->release(cdi);
 err:
 err:
 	cdi->use_count--;
 	cdi->use_count--;
 	return ret;
 	return ret;

+ 1 - 1
drivers/char/Makefile

@@ -41,9 +41,9 @@ obj-$(CONFIG_N_HDLC)		+= n_hdlc.o
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_SX)		+= sx.o generic_serial.o
 obj-$(CONFIG_SX)		+= sx.o generic_serial.o
 obj-$(CONFIG_RIO)		+= rio/ generic_serial.o
 obj-$(CONFIG_RIO)		+= rio/ generic_serial.o
-obj-$(CONFIG_HVC_DRIVER)	+= hvc_console.o
 obj-$(CONFIG_HVC_CONSOLE)	+= hvc_vio.o hvsi.o
 obj-$(CONFIG_HVC_CONSOLE)	+= hvc_vio.o hvsi.o
 obj-$(CONFIG_HVC_RTAS)		+= hvc_rtas.o
 obj-$(CONFIG_HVC_RTAS)		+= hvc_rtas.o
+obj-$(CONFIG_HVC_DRIVER)	+= hvc_console.o
 obj-$(CONFIG_RAW_DRIVER)	+= raw.o
 obj-$(CONFIG_RAW_DRIVER)	+= raw.o
 obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
 obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
 obj-$(CONFIG_MMTIMER)		+= mmtimer.o
 obj-$(CONFIG_MMTIMER)		+= mmtimer.o

+ 3 - 1
drivers/char/n_tty.c

@@ -1384,8 +1384,10 @@ do_it_again:
 		 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
 		 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
 		 * we won't get any more characters.
 		 * we won't get any more characters.
 		 */
 		 */
-		if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
+		if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
+			n_tty_set_room(tty);
 			check_unthrottle(tty);
 			check_unthrottle(tty);
+		}
 
 
 		if (b - buf >= minimum)
 		if (b - buf >= minimum)
 			break;
 			break;

+ 2 - 0
drivers/message/fusion/mptspi.c

@@ -831,6 +831,7 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
 	return rc;
 	return rc;
 }
 }
 
 
+#ifdef CONFIG_PM
 /*
 /*
  * spi module resume handler
  * spi module resume handler
  */
  */
@@ -846,6 +847,7 @@ mptspi_resume(struct pci_dev *pdev)
 
 
 	return rc;
 	return rc;
 }
 }
+#endif
 
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

+ 37 - 35
drivers/message/i2o/exec-osm.c

@@ -55,6 +55,7 @@ struct i2o_exec_wait {
 	u32 m;			/* message id */
 	u32 m;			/* message id */
 	struct i2o_message *msg;	/* pointer to the reply message */
 	struct i2o_message *msg;	/* pointer to the reply message */
 	struct list_head list;	/* node in global wait list */
 	struct list_head list;	/* node in global wait list */
+	spinlock_t lock;	/* lock before modifying */
 };
 };
 
 
 /* Work struct needed to handle LCT NOTIFY replies */
 /* Work struct needed to handle LCT NOTIFY replies */
@@ -87,6 +88,7 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
 		return NULL;
 		return NULL;
 
 
 	INIT_LIST_HEAD(&wait->list);
 	INIT_LIST_HEAD(&wait->list);
+	spin_lock_init(&wait->lock);
 
 
 	return wait;
 	return wait;
 };
 };
@@ -125,6 +127,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
 	DECLARE_WAIT_QUEUE_HEAD(wq);
 	DECLARE_WAIT_QUEUE_HEAD(wq);
 	struct i2o_exec_wait *wait;
 	struct i2o_exec_wait *wait;
 	static u32 tcntxt = 0x80000000;
 	static u32 tcntxt = 0x80000000;
+	long flags;
 	int rc = 0;
 	int rc = 0;
 
 
 	wait = i2o_exec_wait_alloc();
 	wait = i2o_exec_wait_alloc();
@@ -146,33 +149,28 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
 	wait->tcntxt = tcntxt++;
 	wait->tcntxt = tcntxt++;
 	msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
 	msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
 
 
+	wait->wq = &wq;
+	/*
+	 * we add elements to the head, because if a entry in the list will
+	 * never be removed, we have to iterate over it every time
+	 */
+	list_add(&wait->list, &i2o_exec_wait_list);
+
 	/*
 	/*
 	 * Post the message to the controller. At some point later it will
 	 * Post the message to the controller. At some point later it will
 	 * return. If we time out before it returns then complete will be zero.
 	 * return. If we time out before it returns then complete will be zero.
 	 */
 	 */
 	i2o_msg_post(c, msg);
 	i2o_msg_post(c, msg);
 
 
-	if (!wait->complete) {
-		wait->wq = &wq;
-		/*
-		 * we add elements add the head, because if a entry in the list
-		 * will never be removed, we have to iterate over it every time
-		 */
-		list_add(&wait->list, &i2o_exec_wait_list);
-
-		wait_event_interruptible_timeout(wq, wait->complete,
-						 timeout * HZ);
+	wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);
 
 
-		wait->wq = NULL;
-	}
+	spin_lock_irqsave(&wait->lock, flags);
 
 
-	barrier();
+	wait->wq = NULL;
 
 
-	if (wait->complete) {
+	if (wait->complete)
 		rc = le32_to_cpu(wait->msg->body[0]) >> 24;
 		rc = le32_to_cpu(wait->msg->body[0]) >> 24;
-		i2o_flush_reply(c, wait->m);
-		i2o_exec_wait_free(wait);
-	} else {
+	else {
 		/*
 		/*
 		 * We cannot remove it now. This is important. When it does
 		 * We cannot remove it now. This is important. When it does
 		 * terminate (which it must do if the controller has not
 		 * terminate (which it must do if the controller has not
@@ -186,6 +184,13 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
 		rc = -ETIMEDOUT;
 		rc = -ETIMEDOUT;
 	}
 	}
 
 
+	spin_unlock_irqrestore(&wait->lock, flags);
+
+	if (rc != -ETIMEDOUT) {
+		i2o_flush_reply(c, wait->m);
+		i2o_exec_wait_free(wait);
+	}
+
 	return rc;
 	return rc;
 };
 };
 
 
@@ -213,7 +218,6 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
 {
 {
 	struct i2o_exec_wait *wait, *tmp;
 	struct i2o_exec_wait *wait, *tmp;
 	unsigned long flags;
 	unsigned long flags;
-	static spinlock_t lock = SPIN_LOCK_UNLOCKED;
 	int rc = 1;
 	int rc = 1;
 
 
 	/*
 	/*
@@ -223,23 +227,24 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
 	 * already expired. Not much we can do about that except log it for
 	 * already expired. Not much we can do about that except log it for
 	 * debug purposes, increase timeout, and recompile.
 	 * debug purposes, increase timeout, and recompile.
 	 */
 	 */
-	spin_lock_irqsave(&lock, flags);
 	list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
 	list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
 		if (wait->tcntxt == context) {
 		if (wait->tcntxt == context) {
-			list_del(&wait->list);
+			spin_lock_irqsave(&wait->lock, flags);
 
 
-			spin_unlock_irqrestore(&lock, flags);
+			list_del(&wait->list);
 
 
 			wait->m = m;
 			wait->m = m;
 			wait->msg = msg;
 			wait->msg = msg;
 			wait->complete = 1;
 			wait->complete = 1;
 
 
-			barrier();
-
-			if (wait->wq) {
-				wake_up_interruptible(wait->wq);
+			if (wait->wq)
 				rc = 0;
 				rc = 0;
-			} else {
+			else
+				rc = -1;
+
+			spin_unlock_irqrestore(&wait->lock, flags);
+
+			if (rc) {
 				struct device *dev;
 				struct device *dev;
 
 
 				dev = &c->pdev->dev;
 				dev = &c->pdev->dev;
@@ -248,15 +253,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
 					 c->name);
 					 c->name);
 				i2o_dma_free(dev, &wait->dma);
 				i2o_dma_free(dev, &wait->dma);
 				i2o_exec_wait_free(wait);
 				i2o_exec_wait_free(wait);
-				rc = -1;
-			}
+			} else
+				wake_up_interruptible(wait->wq);
 
 
 			return rc;
 			return rc;
 		}
 		}
 	}
 	}
 
 
-	spin_unlock_irqrestore(&lock, flags);
-
 	osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
 	osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
 		 context);
 		 context);
 
 
@@ -322,14 +325,9 @@ static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
 static int i2o_exec_probe(struct device *dev)
 static int i2o_exec_probe(struct device *dev)
 {
 {
 	struct i2o_device *i2o_dev = to_i2o_device(dev);
 	struct i2o_device *i2o_dev = to_i2o_device(dev);
-	struct i2o_controller *c = i2o_dev->iop;
 
 
 	i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
 	i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
 
 
-	c->exec = i2o_dev;
-
-	i2o_exec_lct_notify(c, c->lct->change_ind + 1);
-
 	device_create_file(dev, &dev_attr_vendor_id);
 	device_create_file(dev, &dev_attr_vendor_id);
 	device_create_file(dev, &dev_attr_product_id);
 	device_create_file(dev, &dev_attr_product_id);
 
 
@@ -523,6 +521,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
 	struct device *dev;
 	struct device *dev;
 	struct i2o_message *msg;
 	struct i2o_message *msg;
 
 
+	down(&c->lct_lock);
+
 	dev = &c->pdev->dev;
 	dev = &c->pdev->dev;
 
 
 	if (i2o_dma_realloc
 	if (i2o_dma_realloc
@@ -545,6 +545,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
 
 
 	i2o_msg_post(c, msg);
 	i2o_msg_post(c, msg);
 
 
+	up(&c->lct_lock);
+
 	return 0;
 	return 0;
 };
 };
 
 

+ 1 - 3
drivers/message/i2o/iop.c

@@ -804,8 +804,6 @@ void i2o_iop_remove(struct i2o_controller *c)
 
 
 	/* Ask the IOP to switch to RESET state */
 	/* Ask the IOP to switch to RESET state */
 	i2o_iop_reset(c);
 	i2o_iop_reset(c);
-
-	put_device(&c->device);
 }
 }
 
 
 /**
 /**
@@ -1059,7 +1057,7 @@ struct i2o_controller *i2o_iop_alloc(void)
 
 
 	snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
 	snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
 	if (i2o_pool_alloc
 	if (i2o_pool_alloc
-	    (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
+	    (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4 + sizeof(u32),
 	     I2O_MSG_INPOOL_MIN)) {
 	     I2O_MSG_INPOOL_MIN)) {
 		kfree(c);
 		kfree(c);
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);

+ 4 - 1
drivers/net/e1000/e1000_ethtool.c

@@ -870,13 +870,16 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
 	*data = 0;
 	*data = 0;
 
 
 	/* Hook up test interrupt handler just for this test */
 	/* Hook up test interrupt handler just for this test */
- 	if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
+	if (!request_irq(irq, &e1000_test_intr, SA_PROBEIRQ, netdev->name,
+	                 netdev)) {
  		shared_int = FALSE;
  		shared_int = FALSE;
  	} else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
  	} else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
 			      netdev->name, netdev)){
 			      netdev->name, netdev)){
 		*data = 1;
 		*data = 1;
 		return -1;
 		return -1;
 	}
 	}
+	DPRINTK(PROBE,INFO, "testing %s interrupt\n",
+	        (shared_int ? "shared" : "unshared"));
 
 
 	/* Disable all the interrupts */
 	/* Disable all the interrupts */
 	E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
 	E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);

+ 2 - 6
drivers/net/e1000/e1000_main.c

@@ -3519,7 +3519,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 	buffer_info = &rx_ring->buffer_info[i];
 	buffer_info = &rx_ring->buffer_info[i];
 
 
 	while (rx_desc->status & E1000_RXD_STAT_DD) {
 	while (rx_desc->status & E1000_RXD_STAT_DD) {
-		struct sk_buff *skb, *next_skb;
+		struct sk_buff *skb;
 		u8 status;
 		u8 status;
 #ifdef CONFIG_E1000_NAPI
 #ifdef CONFIG_E1000_NAPI
 		if (*work_done >= work_to_do)
 		if (*work_done >= work_to_do)
@@ -3537,8 +3537,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 		prefetch(next_rxd);
 		prefetch(next_rxd);
 
 
 		next_buffer = &rx_ring->buffer_info[i];
 		next_buffer = &rx_ring->buffer_info[i];
-		next_skb = next_buffer->skb;
-		prefetch(next_skb->data - NET_IP_ALIGN);
 
 
 		cleaned = TRUE;
 		cleaned = TRUE;
 		cleaned_count++;
 		cleaned_count++;
@@ -3668,7 +3666,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 	struct e1000_buffer *buffer_info, *next_buffer;
 	struct e1000_buffer *buffer_info, *next_buffer;
 	struct e1000_ps_page *ps_page;
 	struct e1000_ps_page *ps_page;
 	struct e1000_ps_page_dma *ps_page_dma;
 	struct e1000_ps_page_dma *ps_page_dma;
-	struct sk_buff *skb, *next_skb;
+	struct sk_buff *skb;
 	unsigned int i, j;
 	unsigned int i, j;
 	uint32_t length, staterr;
 	uint32_t length, staterr;
 	int cleaned_count = 0;
 	int cleaned_count = 0;
@@ -3697,8 +3695,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 		prefetch(next_rxd);
 		prefetch(next_rxd);
 
 
 		next_buffer = &rx_ring->buffer_info[i];
 		next_buffer = &rx_ring->buffer_info[i];
-		next_skb = next_buffer->skb;
-		prefetch(next_skb->data - NET_IP_ALIGN);
 
 
 		cleaned = TRUE;
 		cleaned = TRUE;
 		cleaned_count++;
 		cleaned_count++;

+ 37 - 16
drivers/net/sky2.c

@@ -187,12 +187,11 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
 	return v;
 	return v;
 }
 }
 
 
-static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
+static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
 {
 {
 	u16 power_control;
 	u16 power_control;
 	u32 reg1;
 	u32 reg1;
 	int vaux;
 	int vaux;
-	int ret = 0;
 
 
 	pr_debug("sky2_set_power_state %d\n", state);
 	pr_debug("sky2_set_power_state %d\n", state);
 	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -275,12 +274,10 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
 		break;
 		break;
 	default:
 	default:
 		printk(KERN_ERR PFX "Unknown power state %d\n", state);
 		printk(KERN_ERR PFX "Unknown power state %d\n", state);
-		ret = -1;
 	}
 	}
 
 
 	sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
 	sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
 	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
-	return ret;
 }
 }
 
 
 static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
 static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
@@ -2164,6 +2161,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
 /* If idle then force a fake soft NAPI poll once a second
 /* If idle then force a fake soft NAPI poll once a second
  * to work around cases where sharing an edge triggered interrupt.
  * to work around cases where sharing an edge triggered interrupt.
  */
  */
+static inline void sky2_idle_start(struct sky2_hw *hw)
+{
+	if (idle_timeout > 0)
+		mod_timer(&hw->idle_timer,
+			  jiffies + msecs_to_jiffies(idle_timeout));
+}
+
 static void sky2_idle(unsigned long arg)
 static void sky2_idle(unsigned long arg)
 {
 {
 	struct sky2_hw *hw = (struct sky2_hw *) arg;
 	struct sky2_hw *hw = (struct sky2_hw *) arg;
@@ -2183,6 +2187,9 @@ static int sky2_poll(struct net_device *dev0, int *budget)
 	int work_done = 0;
 	int work_done = 0;
 	u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
 	u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
 
 
+	if (!~status)
+		goto out;
+
 	if (status & Y2_IS_HW_ERR)
 	if (status & Y2_IS_HW_ERR)
 		sky2_hw_intr(hw);
 		sky2_hw_intr(hw);
 
 
@@ -2219,7 +2226,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
 
 
 	if (sky2_more_work(hw))
 	if (sky2_more_work(hw))
 		return 1;
 		return 1;
-
+out:
 	netif_rx_complete(dev0);
 	netif_rx_complete(dev0);
 
 
 	sky2_read32(hw, B0_Y2_SP_LISR);
 	sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2248,8 +2255,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
 static void sky2_netpoll(struct net_device *dev)
 static void sky2_netpoll(struct net_device *dev)
 {
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_port *sky2 = netdev_priv(dev);
+	struct net_device *dev0 = sky2->hw->dev[0];
 
 
-	sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL);
+	if (netif_running(dev) && __netif_rx_schedule_prep(dev0))
+		__netif_rx_schedule(dev0);
 }
 }
 #endif
 #endif
 
 
@@ -3350,9 +3359,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
 	sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
 	sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
 
 
 	setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
 	setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
-	if (idle_timeout > 0)
-		mod_timer(&hw->idle_timer,
-			  jiffies + msecs_to_jiffies(idle_timeout));
+	sky2_idle_start(hw);
 
 
 	pci_set_drvdata(pdev, hw);
 	pci_set_drvdata(pdev, hw);
 
 
@@ -3425,8 +3432,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 {
 	struct sky2_hw *hw = pci_get_drvdata(pdev);
 	struct sky2_hw *hw = pci_get_drvdata(pdev);
 	int i;
 	int i;
+	pci_power_t pstate = pci_choose_state(pdev, state);
+
+	if (!(pstate == PCI_D3hot || pstate == PCI_D3cold))
+		return -EINVAL;
+
+	del_timer_sync(&hw->idle_timer);
 
 
-	for (i = 0; i < 2; i++) {
+	for (i = 0; i < hw->ports; i++) {
 		struct net_device *dev = hw->dev[i];
 		struct net_device *dev = hw->dev[i];
 
 
 		if (dev) {
 		if (dev) {
@@ -3435,10 +3448,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
 
 
 			sky2_down(dev);
 			sky2_down(dev);
 			netif_device_detach(dev);
 			netif_device_detach(dev);
+			netif_poll_disable(dev);
 		}
 		}
 	}
 	}
 
 
-	return sky2_set_power_state(hw, pci_choose_state(pdev, state));
+	sky2_write32(hw, B0_IMSK, 0);
+	pci_save_state(pdev);
+	sky2_set_power_state(hw, pstate);
+	return 0;
 }
 }
 
 
 static int sky2_resume(struct pci_dev *pdev)
 static int sky2_resume(struct pci_dev *pdev)
@@ -3448,27 +3465,31 @@ static int sky2_resume(struct pci_dev *pdev)
 
 
 	pci_restore_state(pdev);
 	pci_restore_state(pdev);
 	pci_enable_wake(pdev, PCI_D0, 0);
 	pci_enable_wake(pdev, PCI_D0, 0);
-	err = sky2_set_power_state(hw, PCI_D0);
-	if (err)
-		goto out;
+	sky2_set_power_state(hw, PCI_D0);
 
 
 	err = sky2_reset(hw);
 	err = sky2_reset(hw);
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 
-	for (i = 0; i < 2; i++) {
+	sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
+
+	for (i = 0; i < hw->ports; i++) {
 		struct net_device *dev = hw->dev[i];
 		struct net_device *dev = hw->dev[i];
 		if (dev && netif_running(dev)) {
 		if (dev && netif_running(dev)) {
 			netif_device_attach(dev);
 			netif_device_attach(dev);
+			netif_poll_enable(dev);
+
 			err = sky2_up(dev);
 			err = sky2_up(dev);
 			if (err) {
 			if (err) {
 				printk(KERN_ERR PFX "%s: could not up: %d\n",
 				printk(KERN_ERR PFX "%s: could not up: %d\n",
 				       dev->name, err);
 				       dev->name, err);
 				dev_close(dev);
 				dev_close(dev);
-				break;
+				goto out;
 			}
 			}
 		}
 		}
 	}
 	}
+
+	sky2_idle_start(hw);
 out:
 out:
 	return err;
 	return err;
 }
 }

+ 48 - 96
drivers/net/tg3.c

@@ -69,8 +69,8 @@
 
 
 #define DRV_MODULE_NAME		"tg3"
 #define DRV_MODULE_NAME		"tg3"
 #define PFX DRV_MODULE_NAME	": "
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"3.58"
-#define DRV_MODULE_RELDATE	"May 22, 2006"
+#define DRV_MODULE_VERSION	"3.59"
+#define DRV_MODULE_RELDATE	"June 8, 2006"
 
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
 #define TG3_DEF_RX_MODE		0
@@ -4485,9 +4485,8 @@ static void tg3_disable_nvram_access(struct tg3 *tp)
 /* tp->lock is held. */
 /* tp->lock is held. */
 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
 {
 {
-	if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
-		tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
-			      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
 
 
 	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
 	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
 		switch (kind) {
 		switch (kind) {
@@ -4568,13 +4567,12 @@ static int tg3_chip_reset(struct tg3 *tp)
 	void (*write_op)(struct tg3 *, u32, u32);
 	void (*write_op)(struct tg3 *, u32, u32);
 	int i;
 	int i;
 
 
-	if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
-		tg3_nvram_lock(tp);
-		/* No matching tg3_nvram_unlock() after this because
-		 * chip reset below will undo the nvram lock.
-		 */
-		tp->nvram_lock_cnt = 0;
-	}
+	tg3_nvram_lock(tp);
+
+	/* No matching tg3_nvram_unlock() after this because
+	 * chip reset below will undo the nvram lock.
+	 */
+	tp->nvram_lock_cnt = 0;
 
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
@@ -4727,20 +4725,25 @@ static int tg3_chip_reset(struct tg3 *tp)
 		tw32_f(MAC_MODE, 0);
 		tw32_f(MAC_MODE, 0);
 	udelay(40);
 	udelay(40);
 
 
-	if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
-		/* Wait for firmware initialization to complete. */
-		for (i = 0; i < 100000; i++) {
-			tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
-			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
-				break;
-			udelay(10);
-		}
-		if (i >= 100000) {
-			printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
-			       "firmware will not restart magic=%08x\n",
-			       tp->dev->name, val);
-			return -ENODEV;
-		}
+	/* Wait for firmware initialization to complete. */
+	for (i = 0; i < 100000; i++) {
+		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+			break;
+		udelay(10);
+	}
+
+	/* Chip might not be fitted with firmare.  Some Sun onboard
+	 * parts are configured like that.  So don't signal the timeout
+	 * of the above loop as an error, but do report the lack of
+	 * running firmware once.
+	 */
+	if (i >= 100000 &&
+	    !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
+		tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
+
+		printk(KERN_INFO PFX "%s: No firmware running.\n",
+		       tp->dev->name);
 	}
 	}
 
 
 	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
 	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
@@ -9075,9 +9078,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
 {
 {
 	int j;
 	int j;
 
 
-	if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
-		return;
-
 	tw32_f(GRC_EEPROM_ADDR,
 	tw32_f(GRC_EEPROM_ADDR,
 	     (EEPROM_ADDR_FSM_RESET |
 	     (EEPROM_ADDR_FSM_RESET |
 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -9210,11 +9210,6 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
 {
 {
 	int ret;
 	int ret;
 
 
-	if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
-		printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
-		return -EINVAL;
-	}
-
 	if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
 	if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
 		return tg3_nvram_read_using_eeprom(tp, offset, val);
 		return tg3_nvram_read_using_eeprom(tp, offset, val);
 
 
@@ -9447,11 +9442,6 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
 {
 {
 	int ret;
 	int ret;
 
 
-	if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
-		printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
-		return -EINVAL;
-	}
-
 	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
 	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
@@ -9578,15 +9568,19 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
 			       tp->misc_host_ctrl);
 			       tp->misc_host_ctrl);
 
 
+	/* The memory arbiter has to be enabled in order for SRAM accesses
+	 * to succeed.  Normally on powerup the tg3 chip firmware will make
+	 * sure it is enabled, but other entities such as system netboot
+	 * code might disable it.
+	 */
+	val = tr32(MEMARB_MODE);
+	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
 	tp->phy_id = PHY_ID_INVALID;
 	tp->phy_id = PHY_ID_INVALID;
 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
 
 
-	/* Do not even try poking around in here on Sun parts.  */
-	if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
-		/* All SUN chips are built-in LOMs. */
-		tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
-		return;
-	}
+	/* Assume an onboard device by default.  */
+	tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
 
 
 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
@@ -9686,6 +9680,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
 
 
 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
 			tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
 			tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
+		else
+			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
 
 
 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
 			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
 			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
@@ -9834,16 +9830,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
 	int i;
 	int i;
 	u32 magic;
 	u32 magic;
 
 
-	if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
-		/* Sun decided not to put the necessary bits in the
-		 * NVRAM of their onboard tg3 parts :(
-		 */
-		strcpy(tp->board_part_number, "Sun 570X");
-		return;
-	}
-
 	if (tg3_nvram_read_swab(tp, 0x0, &magic))
 	if (tg3_nvram_read_swab(tp, 0x0, &magic))
-		return;
+		goto out_not_found;
 
 
 	if (magic == TG3_EEPROM_MAGIC) {
 	if (magic == TG3_EEPROM_MAGIC) {
 		for (i = 0; i < 256; i += 4) {
 		for (i = 0; i < 256; i += 4) {
@@ -9874,6 +9862,9 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
 					break;
 					break;
 				msleep(1);
 				msleep(1);
 			}
 			}
+			if (!(tmp16 & 0x8000))
+				goto out_not_found;
+
 			pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
 			pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
 					      &tmp);
 					      &tmp);
 			tmp = cpu_to_le32(tmp);
 			tmp = cpu_to_le32(tmp);
@@ -9965,37 +9956,6 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
 	}
 	}
 }
 }
 
 
-#ifdef CONFIG_SPARC64
-static int __devinit tg3_is_sun_570X(struct tg3 *tp)
-{
-	struct pci_dev *pdev = tp->pdev;
-	struct pcidev_cookie *pcp = pdev->sysdata;
-
-	if (pcp != NULL) {
-		int node = pcp->prom_node;
-		u32 venid;
-		int err;
-
-		err = prom_getproperty(node, "subsystem-vendor-id",
-				       (char *) &venid, sizeof(venid));
-		if (err == 0 || err == -1)
-			return 0;
-		if (venid == PCI_VENDOR_ID_SUN)
-			return 1;
-
-		/* TG3 chips onboard the SunBlade-2500 don't have the
-		 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
-		 * are distinguishable from non-Sun variants by being
-		 * named "network" by the firmware.  Non-Sun cards will
-		 * show up as being named "ethernet".
-		 */
-		if (!strcmp(pcp->prom_name, "network"))
-			return 1;
-	}
-	return 0;
-}
-#endif
-
 static int __devinit tg3_get_invariants(struct tg3 *tp)
 static int __devinit tg3_get_invariants(struct tg3 *tp)
 {
 {
 	static struct pci_device_id write_reorder_chipsets[] = {
 	static struct pci_device_id write_reorder_chipsets[] = {
@@ -10012,11 +9972,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 	u16 pci_cmd;
 	u16 pci_cmd;
 	int err;
 	int err;
 
 
-#ifdef CONFIG_SPARC64
-	if (tg3_is_sun_570X(tp))
-		tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
-#endif
-
 	/* Force memory write invalidate off.  If we leave it on,
 	/* Force memory write invalidate off.  If we leave it on,
 	 * then on 5700_BX chips we have to enable a workaround.
 	 * then on 5700_BX chips we have to enable a workaround.
 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -10312,8 +10267,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 	if (tp->write32 == tg3_write_indirect_reg32 ||
 	if (tp->write32 == tg3_write_indirect_reg32 ||
 	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
 	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) ||
-	    (tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
 		tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
 		tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
 
 
 	/* Get eeprom hw config before calling tg3_set_power_state().
 	/* Get eeprom hw config before calling tg3_set_power_state().
@@ -10594,8 +10548,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
 #endif
 #endif
 
 
 	mac_offset = 0x7c;
 	mac_offset = 0x7c;
-	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
-	     !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
+	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
 			mac_offset = 0xcc;
 			mac_offset = 0xcc;
@@ -10622,8 +10575,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
 	}
 	}
 	if (!addr_ok) {
 	if (!addr_ok) {
 		/* Next, try NVRAM. */
 		/* Next, try NVRAM. */
-		if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
-		    !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
+		if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
 		    !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
 		    !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
 			dev->dev_addr[0] = ((hi >> 16) & 0xff);
 			dev->dev_addr[0] = ((hi >> 16) & 0xff);
 			dev->dev_addr[1] = ((hi >> 24) & 0xff);
 			dev->dev_addr[1] = ((hi >> 24) & 0xff);

+ 2 - 1
drivers/net/tg3.h

@@ -2184,7 +2184,7 @@ struct tg3 {
 #define TG3_FLAG_INIT_COMPLETE		0x80000000
 #define TG3_FLAG_INIT_COMPLETE		0x80000000
 	u32				tg3_flags2;
 	u32				tg3_flags2;
 #define TG3_FLG2_RESTART_TIMER		0x00000001
 #define TG3_FLG2_RESTART_TIMER		0x00000001
-#define TG3_FLG2_SUN_570X		0x00000002
+/*					0x00000002 available */
 #define TG3_FLG2_NO_ETH_WIRE_SPEED	0x00000004
 #define TG3_FLG2_NO_ETH_WIRE_SPEED	0x00000004
 #define TG3_FLG2_IS_5788		0x00000008
 #define TG3_FLG2_IS_5788		0x00000008
 #define TG3_FLG2_MAX_RXPEND_64		0x00000010
 #define TG3_FLG2_MAX_RXPEND_64		0x00000010
@@ -2216,6 +2216,7 @@ struct tg3 {
 #define TG3_FLG2_HW_TSO			(TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
 #define TG3_FLG2_HW_TSO			(TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
 #define TG3_FLG2_1SHOT_MSI		0x10000000
 #define TG3_FLG2_1SHOT_MSI		0x10000000
 #define TG3_FLG2_PHY_JITTER_BUG		0x20000000
 #define TG3_FLG2_PHY_JITTER_BUG		0x20000000
+#define TG3_FLG2_NO_FWARE_REPORTED	0x40000000
 
 
 	u32				split_mode_max_reqs;
 	u32				split_mode_max_reqs;
 #define SPLIT_MODE_5704_MAX_REQ		3
 #define SPLIT_MODE_5704_MAX_REQ		3

+ 22 - 9
drivers/net/wireless/bcm43xx/bcm43xx_dma.c

@@ -624,25 +624,28 @@ err_destroy_tx0:
 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
 			   int slot)
 			   int slot)
 {
 {
-	u16 cookie = 0x0000;
+	u16 cookie = 0xF000;
 
 
 	/* Use the upper 4 bits of the cookie as
 	/* Use the upper 4 bits of the cookie as
 	 * DMA controller ID and store the slot number
 	 * DMA controller ID and store the slot number
-	 * in the lower 12 bits
+	 * in the lower 12 bits.
+	 * Note that the cookie must never be 0, as this
+	 * is a special value used in RX path.
 	 */
 	 */
 	switch (ring->mmio_base) {
 	switch (ring->mmio_base) {
 	default:
 	default:
 		assert(0);
 		assert(0);
 	case BCM43xx_MMIO_DMA1_BASE:
 	case BCM43xx_MMIO_DMA1_BASE:
+		cookie = 0xA000;
 		break;
 		break;
 	case BCM43xx_MMIO_DMA2_BASE:
 	case BCM43xx_MMIO_DMA2_BASE:
-		cookie = 0x1000;
+		cookie = 0xB000;
 		break;
 		break;
 	case BCM43xx_MMIO_DMA3_BASE:
 	case BCM43xx_MMIO_DMA3_BASE:
-		cookie = 0x2000;
+		cookie = 0xC000;
 		break;
 		break;
 	case BCM43xx_MMIO_DMA4_BASE:
 	case BCM43xx_MMIO_DMA4_BASE:
-		cookie = 0x3000;
+		cookie = 0xD000;
 		break;
 		break;
 	}
 	}
 	assert(((u16)slot & 0xF000) == 0x0000);
 	assert(((u16)slot & 0xF000) == 0x0000);
@@ -660,16 +663,16 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
 	struct bcm43xx_dmaring *ring = NULL;
 	struct bcm43xx_dmaring *ring = NULL;
 
 
 	switch (cookie & 0xF000) {
 	switch (cookie & 0xF000) {
-	case 0x0000:
+	case 0xA000:
 		ring = dma->tx_ring0;
 		ring = dma->tx_ring0;
 		break;
 		break;
-	case 0x1000:
+	case 0xB000:
 		ring = dma->tx_ring1;
 		ring = dma->tx_ring1;
 		break;
 		break;
-	case 0x2000:
+	case 0xC000:
 		ring = dma->tx_ring2;
 		ring = dma->tx_ring2;
 		break;
 		break;
-	case 0x3000:
+	case 0xD000:
 		ring = dma->tx_ring3;
 		ring = dma->tx_ring3;
 		break;
 		break;
 	default:
 	default:
@@ -839,8 +842,18 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
 		/* We received an xmit status. */
 		/* We received an xmit status. */
 		struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
 		struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
 		struct bcm43xx_xmitstatus stat;
 		struct bcm43xx_xmitstatus stat;
+		int i = 0;
 
 
 		stat.cookie = le16_to_cpu(hw->cookie);
 		stat.cookie = le16_to_cpu(hw->cookie);
+		while (stat.cookie == 0) {
+			if (unlikely(++i >= 10000)) {
+				assert(0);
+				break;
+			}
+			udelay(2);
+			barrier();
+			stat.cookie = le16_to_cpu(hw->cookie);
+		}
 		stat.flags = hw->flags;
 		stat.flags = hw->flags;
 		stat.cnt1 = hw->cnt1;
 		stat.cnt1 = hw->cnt1;
 		stat.cnt2 = hw->cnt2;
 		stat.cnt2 = hw->cnt2;

+ 8 - 5
drivers/pci/pci-driver.c

@@ -285,9 +285,9 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
  * Default resume method for devices that have no driver provided resume,
  * Default resume method for devices that have no driver provided resume,
  * or not even a driver at all.
  * or not even a driver at all.
  */
  */
-static void pci_default_resume(struct pci_dev *pci_dev)
+static int pci_default_resume(struct pci_dev *pci_dev)
 {
 {
-	int retval;
+	int retval = 0;
 
 
 	/* restore the PCI config space */
 	/* restore the PCI config space */
 	pci_restore_state(pci_dev);
 	pci_restore_state(pci_dev);
@@ -297,18 +297,21 @@ static void pci_default_resume(struct pci_dev *pci_dev)
 	/* if the device was busmaster before the suspend, make it busmaster again */
 	/* if the device was busmaster before the suspend, make it busmaster again */
 	if (pci_dev->is_busmaster)
 	if (pci_dev->is_busmaster)
 		pci_set_master(pci_dev);
 		pci_set_master(pci_dev);
+
+	return retval;
 }
 }
 
 
 static int pci_device_resume(struct device * dev)
 static int pci_device_resume(struct device * dev)
 {
 {
+	int error;
 	struct pci_dev * pci_dev = to_pci_dev(dev);
 	struct pci_dev * pci_dev = to_pci_dev(dev);
 	struct pci_driver * drv = pci_dev->driver;
 	struct pci_driver * drv = pci_dev->driver;
 
 
 	if (drv && drv->resume)
 	if (drv && drv->resume)
-		drv->resume(pci_dev);
+		error = drv->resume(pci_dev);
 	else
 	else
-		pci_default_resume(pci_dev);
-	return 0;
+		error = pci_default_resume(pci_dev);
+	return error;
 }
 }
 
 
 static void pci_device_shutdown(struct device *dev)
 static void pci_device_shutdown(struct device *dev)

+ 16 - 2
drivers/pci/pci.c

@@ -461,9 +461,23 @@ int
 pci_restore_state(struct pci_dev *dev)
 pci_restore_state(struct pci_dev *dev)
 {
 {
 	int i;
 	int i;
+	int val;
 
 
-	for (i = 0; i < 16; i++)
-		pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]);
+	/*
+	 * The Base Address register should be programmed before the command
+	 * register(s)
+	 */
+	for (i = 15; i >= 0; i--) {
+		pci_read_config_dword(dev, i * 4, &val);
+		if (val != dev->saved_config_space[i]) {
+			printk(KERN_DEBUG "PM: Writing back config space on "
+				"device %s at offset %x (was %x, writing %x)\n",
+				pci_name(dev), i,
+				val, (int)dev->saved_config_space[i]);
+			pci_write_config_dword(dev,i * 4,
+				dev->saved_config_space[i]);
+		}
+	}
 	pci_restore_msi_state(dev);
 	pci_restore_msi_state(dev);
 	pci_restore_msix_state(dev);
 	pci_restore_msix_state(dev);
 	return 0;
 	return 0;

+ 3 - 0
drivers/scsi/sata_mv.c

@@ -2035,6 +2035,7 @@ static void mv_phy_reset(struct ata_port *ap)
 static void mv_eng_timeout(struct ata_port *ap)
 static void mv_eng_timeout(struct ata_port *ap)
 {
 {
 	struct ata_queued_cmd *qc;
 	struct ata_queued_cmd *qc;
+	unsigned long flags;
 
 
 	printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
 	printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
 	DPRINTK("All regs @ start of eng_timeout\n");
 	DPRINTK("All regs @ start of eng_timeout\n");
@@ -2046,8 +2047,10 @@ static void mv_eng_timeout(struct ata_port *ap)
 	       ap->host_set->mmio_base, ap, qc, qc->scsicmd,
 	       ap->host_set->mmio_base, ap, qc, qc->scsicmd,
 	       &qc->scsicmd->cmnd);
 	       &qc->scsicmd->cmnd);
 
 
+	spin_lock_irqsave(&ap->host_set->lock, flags);
 	mv_err_intr(ap, 0);
 	mv_err_intr(ap, 0);
 	mv_stop_and_reset(ap);
 	mv_stop_and_reset(ap);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
 
 
 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
 	if (qc->flags & ATA_QCFLAG_ACTIVE) {
 	if (qc->flags & ATA_QCFLAG_ACTIVE) {

+ 3 - 0
drivers/usb/host/ohci-pxa27x.c

@@ -185,6 +185,9 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
 	/* Select Power Management Mode */
 	/* Select Power Management Mode */
 	pxa27x_ohci_select_pmm(inf->port_mode);
 	pxa27x_ohci_select_pmm(inf->port_mode);
 
 
+	if (inf->power_budget)
+		hcd->power_budget = inf->power_budget;
+
 	ohci_hcd_init(hcd_to_ohci(hcd));
 	ohci_hcd_init(hcd_to_ohci(hcd));
 
 
 	retval = usb_add_hcd(hcd, pdev->resource[1].start, SA_INTERRUPT);
 	retval = usb_add_hcd(hcd, pdev->resource[1].start, SA_INTERRUPT);

+ 1 - 1
drivers/video/console/fbcon.c

@@ -1745,7 +1745,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
 					fbcon_redraw_move(vc, p, 0, t, count);
 					fbcon_redraw_move(vc, p, 0, t, count);
 				ypan_up_redraw(vc, t, count);
 				ypan_up_redraw(vc, t, count);
 				if (vc->vc_rows - b > 0)
 				if (vc->vc_rows - b > 0)
-					fbcon_redraw_move(vc, p, b - count,
+					fbcon_redraw_move(vc, p, b,
 							  vc->vc_rows - b, b);
 							  vc->vc_rows - b, b);
 			} else
 			} else
 				fbcon_redraw_move(vc, p, t + count, b - t - count, t);
 				fbcon_redraw_move(vc, p, t + count, b - t - count, t);

+ 3 - 2
fs/bio.c

@@ -654,9 +654,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
 				     write_to_vm, 0, &pages[cur_page], NULL);
 				     write_to_vm, 0, &pages[cur_page], NULL);
 		up_read(&current->mm->mmap_sem);
 		up_read(&current->mm->mmap_sem);
 
 
-		if (ret < local_nr_pages)
+		if (ret < local_nr_pages) {
+			ret = -EFAULT;
 			goto out_unmap;
 			goto out_unmap;
-
+		}
 
 
 		offset = uaddr & ~PAGE_MASK;
 		offset = uaddr & ~PAGE_MASK;
 		for (j = cur_page; j < page_limit; j++) {
 		for (j = cur_page; j < page_limit; j++) {

+ 2 - 1
fs/debugfs/inode.c

@@ -67,12 +67,13 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
 static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
 static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
 			 int mode, dev_t dev)
 			 int mode, dev_t dev)
 {
 {
-	struct inode *inode = debugfs_get_inode(dir->i_sb, mode, dev);
+	struct inode *inode;
 	int error = -EPERM;
 	int error = -EPERM;
 
 
 	if (dentry->d_inode)
 	if (dentry->d_inode)
 		return -EEXIST;
 		return -EEXIST;
 
 
+	inode = debugfs_get_inode(dir->i_sb, mode, dev);
 	if (inode) {
 	if (inode) {
 		d_instantiate(dentry, inode);
 		d_instantiate(dentry, inode);
 		dget(dentry);
 		dget(dentry);

+ 2 - 0
fs/locks.c

@@ -755,6 +755,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
 	if (request->fl_type == F_UNLCK)
 	if (request->fl_type == F_UNLCK)
 		goto out;
 		goto out;
 
 
+	error = -ENOMEM;
 	new_fl = locks_alloc_lock();
 	new_fl = locks_alloc_lock();
 	if (new_fl == NULL)
 	if (new_fl == NULL)
 		goto out;
 		goto out;
@@ -781,6 +782,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
 	locks_copy_lock(new_fl, request);
 	locks_copy_lock(new_fl, request);
 	locks_insert_lock(&inode->i_flock, new_fl);
 	locks_insert_lock(&inode->i_flock, new_fl);
 	new_fl = NULL;
 	new_fl = NULL;
+	error = 0;
 
 
 out:
 out:
 	unlock_kernel();
 	unlock_kernel();

+ 2 - 0
include/asm-arm/arch-pxa/ohci.h

@@ -11,6 +11,8 @@ struct pxaohci_platform_data {
 #define PMM_NPS_MODE           1
 #define PMM_NPS_MODE           1
 #define PMM_GLOBAL_MODE        2
 #define PMM_GLOBAL_MODE        2
 #define PMM_PERPORT_MODE       3
 #define PMM_PERPORT_MODE       3
+
+	int power_budget;
 };
 };
 
 
 extern void pxa_set_ohci_info(struct pxaohci_platform_data *info);
 extern void pxa_set_ohci_info(struct pxaohci_platform_data *info);

+ 1 - 1
include/asm-powerpc/cputable.h

@@ -329,7 +329,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
 #define CPU_FTRS_CELL	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
 #define CPU_FTRS_CELL	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
-	    CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO)
+	    CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE)
 #define CPU_FTRS_COMPATIBLE	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
 #define CPU_FTRS_COMPATIBLE	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
 #endif
 #endif

+ 8 - 7
include/asm-s390/futex.h

@@ -11,23 +11,24 @@
 #define __futex_atomic_fixup \
 #define __futex_atomic_fixup \
 		     ".section __ex_table,\"a\"\n"			\
 		     ".section __ex_table,\"a\"\n"			\
 		     "   .align 4\n"					\
 		     "   .align 4\n"					\
-		     "   .long  0b,2b,1b,2b\n"				\
+		     "   .long  0b,4b,2b,4b,3b,4b\n"			\
 		     ".previous"
 		     ".previous"
 #else /* __s390x__ */
 #else /* __s390x__ */
 #define __futex_atomic_fixup \
 #define __futex_atomic_fixup \
 		     ".section __ex_table,\"a\"\n"			\
 		     ".section __ex_table,\"a\"\n"			\
 		     "   .align 8\n"					\
 		     "   .align 8\n"					\
-		     "   .quad  0b,2b,1b,2b\n"				\
+		     "   .quad  0b,4b,2b,4b,3b,4b\n"			\
 		     ".previous"
 		     ".previous"
 #endif /* __s390x__ */
 #endif /* __s390x__ */
 
 
 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)	\
 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)	\
-	asm volatile("   l   %1,0(%6)\n"				\
-		     "0: " insn						\
-		     "   cs  %1,%2,0(%6)\n"				\
-		     "1: jl  0b\n"					\
+	asm volatile("   sacf 256\n"					\
+		     "0: l   %1,0(%6)\n"				\
+		     "1: " insn						\
+		     "2: cs  %1,%2,0(%6)\n"				\
+		     "3: jl  1b\n"					\
 		     "   lhi %0,0\n"					\
 		     "   lhi %0,0\n"					\
-		     "2:\n"						\
+		     "4: sacf 0\n"					\
 		     __futex_atomic_fixup				\
 		     __futex_atomic_fixup				\
 		     : "=d" (ret), "=&d" (oldval), "=&d" (newval),	\
 		     : "=d" (ret), "=&d" (oldval), "=&d" (newval),	\
 		       "=m" (*uaddr)					\
 		       "=m" (*uaddr)					\

+ 1 - 1
include/linux/elevator.h

@@ -21,7 +21,7 @@ typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
 typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
 typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
 typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
 typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
 
 
-typedef int (elevator_init_fn) (request_queue_t *, elevator_t *);
+typedef void *(elevator_init_fn) (request_queue_t *, elevator_t *);
 typedef void (elevator_exit_fn) (elevator_t *);
 typedef void (elevator_exit_fn) (elevator_t *);
 
 
 struct elevator_ops
 struct elevator_ops

+ 4 - 1
include/linux/i2o.h

@@ -1114,8 +1114,11 @@ static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
 
 
 	mmsg->mfa = readl(c->in_port);
 	mmsg->mfa = readl(c->in_port);
 	if (unlikely(mmsg->mfa >= c->in_queue.len)) {
 	if (unlikely(mmsg->mfa >= c->in_queue.len)) {
+		u32 mfa = mmsg->mfa;
+
 		mempool_free(mmsg, c->in_msg.mempool);
 		mempool_free(mmsg, c->in_msg.mempool);
-		if(mmsg->mfa == I2O_QUEUE_EMPTY)
+
+		if (mfa == I2O_QUEUE_EMPTY)
 			return ERR_PTR(-EBUSY);
 			return ERR_PTR(-EBUSY);
 		return ERR_PTR(-EFAULT);
 		return ERR_PTR(-EFAULT);
 	}
 	}

+ 1 - 0
include/linux/mempolicy.h

@@ -36,6 +36,7 @@
 #include <linux/nodemask.h>
 #include <linux/nodemask.h>
 
 
 struct vm_area_struct;
 struct vm_area_struct;
+struct mm_struct;
 
 
 #ifdef CONFIG_NUMA
 #ifdef CONFIG_NUMA
 
 

+ 1 - 1
include/linux/pci-acpi.h

@@ -50,7 +50,7 @@
 extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
 extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
 extern acpi_status pci_osc_support_set(u32 flags);
 extern acpi_status pci_osc_support_set(u32 flags);
 #else
 #else
-#if !defined(acpi_status)
+#if !defined(AE_ERROR)
 typedef u32 		acpi_status;
 typedef u32 		acpi_status;
 #define AE_ERROR      	(acpi_status) (0x0001)
 #define AE_ERROR      	(acpi_status) (0x0001)
 #endif    
 #endif    

+ 0 - 8
kernel/exit.c

@@ -881,14 +881,6 @@ fastcall NORET_TYPE void do_exit(long code)
 
 
 	tsk->flags |= PF_EXITING;
 	tsk->flags |= PF_EXITING;
 
 
-	/*
-	 * Make sure we don't try to process any timer firings
-	 * while we are already exiting.
-	 */
- 	tsk->it_virt_expires = cputime_zero;
- 	tsk->it_prof_expires = cputime_zero;
-	tsk->it_sched_expires = 0;
-
 	if (unlikely(in_atomic()))
 	if (unlikely(in_atomic()))
 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
 				current->comm, current->pid,
 				current->comm, current->pid,

+ 22 - 26
kernel/posix-cpu-timers.c

@@ -555,9 +555,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
 	struct cpu_timer_list *next;
 	struct cpu_timer_list *next;
 	unsigned long i;
 	unsigned long i;
 
 
-	if (CPUCLOCK_PERTHREAD(timer->it_clock)	&& (p->flags & PF_EXITING))
-		return;
-
 	head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
 	head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
 		p->cpu_timers : p->signal->cpu_timers);
 		p->cpu_timers : p->signal->cpu_timers);
 	head += CPUCLOCK_WHICH(timer->it_clock);
 	head += CPUCLOCK_WHICH(timer->it_clock);
@@ -1173,6 +1170,9 @@ static void check_process_timers(struct task_struct *tsk,
 		}
 		}
 		t = tsk;
 		t = tsk;
 		do {
 		do {
+			if (unlikely(t->flags & PF_EXITING))
+				continue;
+
 			ticks = cputime_add(cputime_add(t->utime, t->stime),
 			ticks = cputime_add(cputime_add(t->utime, t->stime),
 					    prof_left);
 					    prof_left);
 			if (!cputime_eq(prof_expires, cputime_zero) &&
 			if (!cputime_eq(prof_expires, cputime_zero) &&
@@ -1193,11 +1193,7 @@ static void check_process_timers(struct task_struct *tsk,
 					      t->it_sched_expires > sched)) {
 					      t->it_sched_expires > sched)) {
 				t->it_sched_expires = sched;
 				t->it_sched_expires = sched;
 			}
 			}
-
-			do {
-				t = next_thread(t);
-			} while (unlikely(t->flags & PF_EXITING));
-		} while (t != tsk);
+		} while ((t = next_thread(t)) != tsk);
 	}
 	}
 }
 }
 
 
@@ -1289,30 +1285,30 @@ void run_posix_cpu_timers(struct task_struct *tsk)
 
 
 #undef	UNEXPIRED
 #undef	UNEXPIRED
 
 
-	BUG_ON(tsk->exit_state);
-
 	/*
 	/*
 	 * Double-check with locks held.
 	 * Double-check with locks held.
 	 */
 	 */
 	read_lock(&tasklist_lock);
 	read_lock(&tasklist_lock);
-	spin_lock(&tsk->sighand->siglock);
+	if (likely(tsk->signal != NULL)) {
+		spin_lock(&tsk->sighand->siglock);
 
 
-	/*
-	 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
-	 * all the timers that are firing, and put them on the firing list.
-	 */
-	check_thread_timers(tsk, &firing);
-	check_process_timers(tsk, &firing);
+		/*
+		 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
+		 * all the timers that are firing, and put them on the firing list.
+		 */
+		check_thread_timers(tsk, &firing);
+		check_process_timers(tsk, &firing);
 
 
-	/*
-	 * We must release these locks before taking any timer's lock.
-	 * There is a potential race with timer deletion here, as the
-	 * siglock now protects our private firing list.  We have set
-	 * the firing flag in each timer, so that a deletion attempt
-	 * that gets the timer lock before we do will give it up and
-	 * spin until we've taken care of that timer below.
-	 */
-	spin_unlock(&tsk->sighand->siglock);
+		/*
+		 * We must release these locks before taking any timer's lock.
+		 * There is a potential race with timer deletion here, as the
+		 * siglock now protects our private firing list.  We have set
+		 * the firing flag in each timer, so that a deletion attempt
+		 * that gets the timer lock before we do will give it up and
+		 * spin until we've taken care of that timer below.
+		 */
+		spin_unlock(&tsk->sighand->siglock);
+	}
 	read_unlock(&tasklist_lock);
 	read_unlock(&tasklist_lock);
 
 
 	/*
 	/*

+ 2 - 0
mm/shmem.c

@@ -1780,6 +1780,7 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
 	if (!simple_empty(dentry))
 	if (!simple_empty(dentry))
 		return -ENOTEMPTY;
 		return -ENOTEMPTY;
 
 
+	dentry->d_inode->i_nlink--;
 	dir->i_nlink--;
 	dir->i_nlink--;
 	return shmem_unlink(dir, dentry);
 	return shmem_unlink(dir, dentry);
 }
 }
@@ -2102,6 +2103,7 @@ static int shmem_fill_super(struct super_block *sb,
 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
 	sb->s_magic = TMPFS_MAGIC;
 	sb->s_magic = TMPFS_MAGIC;
 	sb->s_op = &shmem_ops;
 	sb->s_op = &shmem_ops;
+	sb->s_time_gran = 1;
 
 
 	inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
 	inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
 	if (!inode)
 	if (!inode)

+ 1 - 1
mm/vmscan.c

@@ -1061,7 +1061,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
 loop_again:
 loop_again:
 	total_scanned = 0;
 	total_scanned = 0;
 	nr_reclaimed = 0;
 	nr_reclaimed = 0;
-	sc.may_writepage = !laptop_mode,
+	sc.may_writepage = !laptop_mode;
 	sc.nr_mapped = read_page_state(nr_mapped);
 	sc.nr_mapped = read_page_state(nr_mapped);
 
 
 	inc_page_state(pageoutrun);
 	inc_page_state(pageoutrun);

+ 1 - 0
net/dccp/ackvec.c

@@ -452,6 +452,7 @@ found:
 					      (unsigned long long)
 					      (unsigned long long)
 					      avr->dccpavr_ack_ackno);
 					      avr->dccpavr_ack_ackno);
 				dccp_ackvec_throw_record(av, avr);
 				dccp_ackvec_throw_record(av, avr);
+				break;
 			}
 			}
 			/*
 			/*
 			 * If it wasn't received, continue scanning... we might
 			 * If it wasn't received, continue scanning... we might

+ 1 - 0
net/ipv4/ip_forward.c

@@ -116,6 +116,7 @@ sr_failed:
 
 
 too_many_hops:
 too_many_hops:
         /* Tell the sender its packet died... */
         /* Tell the sender its packet died... */
+        IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
         icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
         icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
 drop:
 drop:
 	kfree_skb(skb);
 	kfree_skb(skb);

+ 1 - 3
net/ipv4/tcp_input.c

@@ -1649,7 +1649,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
 	 * Hence, we can detect timed out packets during fast
 	 * Hence, we can detect timed out packets during fast
 	 * retransmit without falling to slow start.
 	 * retransmit without falling to slow start.
 	 */
 	 */
-	if (tcp_head_timedout(sk, tp)) {
+	if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
 		struct sk_buff *skb;
 		struct sk_buff *skb;
 
 
 		skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
 		skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
@@ -1662,8 +1662,6 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 				tp->lost_out += tcp_skb_pcount(skb);
 				tp->lost_out += tcp_skb_pcount(skb);
-				if (IsReno(tp))
-					tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1);
 
 
 				/* clear xmit_retrans hint */
 				/* clear xmit_retrans hint */
 				if (tp->retransmit_skb_hint &&
 				if (tp->retransmit_skb_hint &&