Browse Source

Merge branch 'upstream'

Jeff Garzik 19 years ago
parent
commit
6e07e16404
100 changed files with 3845 additions and 928 deletions
  1. 12 7
      Documentation/feature-removal-schedule.txt
  2. 71 0
      Documentation/leds-class.txt
  3. 1913 0
      Documentation/memory-barriers.txt
  4. 36 0
      Documentation/networking/bcm43xx.txt
  5. 0 2
      arch/alpha/kernel/alpha_ksyms.c
  6. 1 1
      arch/alpha/kernel/core_marvel.c
  7. 10 0
      arch/arm/Kconfig
  8. 44 0
      arch/arm/Kconfig-nommu
  9. 7 2
      arch/arm/Makefile
  10. 106 0
      arch/arm/boot/compressed/head.S
  11. 8 2
      arch/arm/common/sharpsl_pm.c
  12. 1 1
      arch/arm/kernel/entry-armv.S
  13. 217 0
      arch/arm/kernel/head-common.S
  14. 83 0
      arch/arm/kernel/head-nommu.S
  15. 1 206
      arch/arm/kernel/head.S
  16. 0 1
      arch/arm/kernel/process.c
  17. 1 1
      arch/arm/kernel/signal.h
  18. 5 4
      arch/arm/kernel/traps.c
  19. 11 0
      arch/arm/mach-pxa/corgi.c
  20. 11 0
      arch/arm/mach-pxa/spitz.c
  21. 9 0
      arch/arm/mach-pxa/tosa.c
  22. 1 0
      arch/arm/mm/proc-xsc3.S
  23. 0 2
      arch/arm26/kernel/armksyms.c
  24. 0 2
      arch/frv/kernel/frv_ksyms.c
  25. 0 2
      arch/h8300/kernel/h8300_ksyms.c
  26. 21 1
      arch/i386/kernel/apic.c
  27. 2 2
      arch/i386/kernel/cpu/mcheck/mce.c
  28. 1 1
      arch/i386/kernel/io_apic.c
  29. 0 1
      arch/i386/kernel/process.c
  30. 2 0
      arch/i386/kernel/syscall_table.S
  31. 1 1
      arch/i386/kernel/traps.c
  32. 1 1
      arch/i386/kernel/vsyscall-sigreturn.S
  33. 1 0
      arch/ia64/kernel/entry.S
  34. 1 0
      arch/ia64/kernel/gate.lds.S
  35. 162 103
      arch/ia64/kernel/iosapic.c
  36. 4 4
      arch/ia64/kernel/palinfo.c
  37. 1 1
      arch/ia64/kernel/time.c
  38. 367 0
      arch/ia64/kernel/topology.c
  39. 9 9
      arch/ia64/kernel/vmlinux.lds.S
  40. 7 1
      arch/ia64/mm/init.c
  41. 3 3
      arch/ia64/mm/ioremap.c
  42. 7 5
      arch/ia64/mm/tlb.c
  43. 6 2
      arch/ia64/sn/kernel/sn2/sn_hwperf.c
  44. 0 1
      arch/m68k/kernel/m68k_ksyms.c
  45. 0 2
      arch/m68knommu/kernel/m68k_ksyms.c
  46. 0 1
      arch/mips/kernel/process.c
  47. 1 5
      arch/parisc/Kconfig
  48. 74 86
      arch/parisc/configs/712_defconfig
  49. 2 2
      arch/parisc/configs/a500_defconfig
  50. 6 6
      arch/parisc/configs/b180_defconfig
  51. 108 144
      arch/parisc/configs/c3000_defconfig
  52. 110 55
      arch/parisc/defconfig
  53. 3 3
      arch/parisc/kernel/cache.c
  54. 39 6
      arch/parisc/kernel/entry.S
  55. 2 2
      arch/parisc/kernel/pacache.S
  56. 0 19
      arch/parisc/kernel/parisc_ksyms.c
  57. 2 3
      arch/parisc/kernel/pdc_chassis.c
  58. 1 1
      arch/parisc/kernel/perf.c
  59. 1 1
      arch/parisc/kernel/syscall_table.S
  60. 0 4
      arch/parisc/lib/iomap.c
  61. 3 4
      arch/parisc/mm/init.c
  62. 9 45
      arch/parisc/mm/ioremap.c
  63. 2 2
      arch/powerpc/kernel/crash_dump.c
  64. 0 1
      arch/powerpc/kernel/process.c
  65. 1 1
      arch/powerpc/kernel/vdso32/sigtramp.S
  66. 1 1
      arch/powerpc/kernel/vdso64/sigtramp.S
  67. 3 3
      arch/s390/kernel/smp.c
  68. 1 1
      arch/sh/kernel/cpu/init.c
  69. 1 1
      arch/sh/kernel/setup.c
  70. 3 0
      arch/um/Kconfig
  71. 3 4
      arch/um/Makefile
  72. 1 1
      arch/um/Makefile-x86_64
  73. 1 12
      arch/um/drivers/daemon_kern.c
  74. 4 4
      arch/um/drivers/harddog_kern.c
  75. 5 5
      arch/um/drivers/hostaudio_kern.c
  76. 1 12
      arch/um/drivers/mcast_kern.c
  77. 139 1
      arch/um/drivers/mconsole_kern.c
  78. 1 12
      arch/um/drivers/pcap_kern.c
  79. 1 12
      arch/um/drivers/slip_kern.c
  80. 2 13
      arch/um/drivers/slirp_kern.c
  81. 1 1
      arch/um/drivers/ubd_kern.c
  82. 5 1
      arch/um/include/kern_util.h
  83. 6 12
      arch/um/include/line.h
  84. 0 1
      arch/um/include/mem_user.h
  85. 9 1
      arch/um/include/os.h
  86. 3 2
      arch/um/include/sysdep-i386/checksum.h
  87. 5 0
      arch/um/include/sysdep-i386/ptrace.h
  88. 32 0
      arch/um/include/sysdep-i386/tls.h
  89. 29 0
      arch/um/include/sysdep-x86_64/tls.h
  90. 4 1
      arch/um/include/user_util.h
  91. 3 13
      arch/um/kernel/exec_kern.c
  92. 1 1
      arch/um/kernel/mem.c
  93. 19 7
      arch/um/kernel/process_kern.c
  94. 25 19
      arch/um/kernel/ptrace.c
  95. 11 0
      arch/um/kernel/skas/process_kern.c
  96. 2 2
      arch/um/kernel/syscall_kern.c
  97. 4 4
      arch/um/kernel/trap_kern.c
  98. 8 2
      arch/um/kernel/tt/process_kern.c
  99. 2 5
      arch/um/os-Linux/Makefile
  100. 1 12
      arch/um/os-Linux/drivers/ethertap_kern.c

+ 12 - 7
Documentation/feature-removal-schedule.txt

@@ -127,13 +127,6 @@ Who:	Christoph Hellwig <hch@lst.de>
 
 ---------------------------
 
-What:	EXPORT_SYMBOL(lookup_hash)
-When:	January 2006
-Why:	Too low-level interface.  Use lookup_one_len or lookup_create instead.
-Who:	Christoph Hellwig <hch@lst.de>
-
----------------------------
-
 What:	CONFIG_FORCED_INLINING
 When:	June 2006
 Why:	Config option is there to see if gcc is good enough. (in january
@@ -241,3 +234,15 @@ Why:	The USB subsystem has changed a lot over time, and it has been
 Who:	Greg Kroah-Hartman <gregkh@suse.de>
 
 ---------------------------
+
+What:	find_trylock_page
+When:	January 2007
+Why:	The interface no longer has any callers left in the kernel. It
+	is an odd interface (compared with other find_*_page functions), in
+	that it does not take a refcount to the page, only the page lock.
+	It should be replaced with find_get_page or find_lock_page if possible.
+	This feature removal can be reevaluated if users of the interface
+	cannot cleanly use something else.
+Who:	Nick Piggin <npiggin@suse.de>
+
+---------------------------

+ 71 - 0
Documentation/leds-class.txt

@@ -0,0 +1,71 @@
+LED handling under Linux
+========================
+
+If you're reading this and thinking about keyboard leds, these are
+handled by the input subsystem and the led class is *not* needed.
+
+In its simplest form, the LED class just allows control of LEDs from
+userspace. LEDs appear in /sys/class/leds/. The brightness file will
+set the brightness of the LED (taking a value 0-255). Most LEDs don't
+have hardware brightness support so will just be turned on for non-zero
+brightness settings.
+
+The class also introduces the optional concept of an LED trigger. A trigger
+is a kernel based source of led events. Triggers can either be simple or
+complex. A simple trigger isn't configurable and is designed to slot into
+existing subsystems with minimal additional code. Examples are the ide-disk,
+nand-disk and sharpsl-charge triggers. With led triggers disabled, the code
+optimises away.
+
+Complex triggers whilst available to all LEDs have LED specific
+parameters and work on a per LED basis. The timer trigger is an example.
+
+You can change triggers in a similar manner to the way an IO scheduler
+is chosen (via /sys/class/leds/<device>/trigger). Trigger specific
+parameters can appear in /sys/class/leds/<device> once a given trigger is
+selected.
+
+
+Design Philosophy
+=================
+
+The underlying design philosophy is simplicity. LEDs are simple devices
+and the aim is to keep a small amount of code giving as much functionality
+as possible.  Please keep this in mind when suggesting enhancements.
+
+
+LED Device Naming
+=================
+
+Is currently of the form:
+
+"devicename:colour"
+
+There have been calls for LED properties such as colour to be exported as
+individual led class attributes. As a solution which doesn't incur as much
+overhead, I suggest these become part of the device name. The naming scheme
+above leaves scope for further attributes should they be needed.
+
+
+Known Issues
+============
+
+The LED Trigger core cannot be a module as the simple trigger functions
+would cause nightmare dependency issues. I see this as a minor issue
+compared to the benefits the simple trigger functionality brings. The
+rest of the LED subsystem can be modular.
+
+Some leds can be programmed to flash in hardware. As this isn't a generic
+LED device property, this should be exported as a device specific sysfs
+attribute rather than part of the class if this functionality is required.
+
+
+Future Development
+==================
+
+At the moment, a trigger can't be created specifically for a single LED.
+There are a number of cases where a trigger might only be mappable to a
+particular LED (ACPI?). The addition of triggers provided by the LED driver
+should cover this option and be possible to add without breaking the
+current interface.
+

+ 1913 - 0
Documentation/memory-barriers.txt

@@ -0,0 +1,1913 @@
+			 ============================
+			 LINUX KERNEL MEMORY BARRIERS
+			 ============================
+
+By: David Howells <dhowells@redhat.com>
+
+Contents:
+
+ (*) Abstract memory access model.
+
+     - Device operations.
+     - Guarantees.
+
+ (*) What are memory barriers?
+
+     - Varieties of memory barrier.
+     - What may not be assumed about memory barriers?
+     - Data dependency barriers.
+     - Control dependencies.
+     - SMP barrier pairing.
+     - Examples of memory barrier sequences.
+
+ (*) Explicit kernel barriers.
+
+     - Compiler barrier.
+     - The CPU memory barriers.
+     - MMIO write barrier.
+
+ (*) Implicit kernel memory barriers.
+
+     - Locking functions.
+     - Interrupt disabling functions.
+     - Miscellaneous functions.
+
+ (*) Inter-CPU locking barrier effects.
+
+     - Locks vs memory accesses.
+     - Locks vs I/O accesses.
+
+ (*) Where are memory barriers needed?
+
+     - Interprocessor interaction.
+     - Atomic operations.
+     - Accessing devices.
+     - Interrupts.
+
+ (*) Kernel I/O barrier effects.
+
+ (*) Assumed minimum execution ordering model.
+
+ (*) The effects of the cpu cache.
+
+     - Cache coherency.
+     - Cache coherency vs DMA.
+     - Cache coherency vs MMIO.
+
+ (*) The things CPUs get up to.
+
+     - And then there's the Alpha.
+
+ (*) References.
+
+
+============================
+ABSTRACT MEMORY ACCESS MODEL
+============================
+
+Consider the following abstract model of the system:
+
+		            :                :
+		            :                :
+		            :                :
+		+-------+   :   +--------+   :   +-------+
+		|       |   :   |        |   :   |       |
+		|       |   :   |        |   :   |       |
+		| CPU 1 |<----->| Memory |<----->| CPU 2 |
+		|       |   :   |        |   :   |       |
+		|       |   :   |        |   :   |       |
+		+-------+   :   +--------+   :   +-------+
+		    ^       :       ^        :       ^
+		    |       :       |        :       |
+		    |       :       |        :       |
+		    |       :       v        :       |
+		    |       :   +--------+   :       |
+		    |       :   |        |   :       |
+		    |       :   |        |   :       |
+		    +---------->| Device |<----------+
+		            :   |        |   :
+		            :   |        |   :
+		            :   +--------+   :
+		            :                :
+
+Each CPU executes a program that generates memory access operations.  In the
+abstract CPU, memory operation ordering is very relaxed, and a CPU may actually
+perform the memory operations in any order it likes, provided program causality
+appears to be maintained.  Similarly, the compiler may also arrange the
+instructions it emits in any order it likes, provided it doesn't affect the
+apparent operation of the program.
+
+So in the above diagram, the effects of the memory operations performed by a
+CPU are perceived by the rest of the system as the operations cross the
+interface between the CPU and rest of the system (the dotted lines).
+
+
+For example, consider the following sequence of events:
+
+	CPU 1		CPU 2
+	===============	===============
+	{ A == 1; B == 2 }
+	A = 3;		x = A;
+	B = 4;		y = B;
+
+The set of accesses as seen by the memory system in the middle can be arranged
+in 24 different combinations:
+
+	STORE A=3,	STORE B=4,	x=LOAD A->3,	y=LOAD B->4
+	STORE A=3,	STORE B=4,	y=LOAD B->4,	x=LOAD A->3
+	STORE A=3,	x=LOAD A->3,	STORE B=4,	y=LOAD B->4
+	STORE A=3,	x=LOAD A->3,	y=LOAD B->2,	STORE B=4
+	STORE A=3,	y=LOAD B->2,	STORE B=4,	x=LOAD A->3
+	STORE A=3,	y=LOAD B->2,	x=LOAD A->3,	STORE B=4
+	STORE B=4,	STORE A=3,	x=LOAD A->3,	y=LOAD B->4
+	STORE B=4, ...
+	...
+
+and can thus result in four different combinations of values:
+
+	x == 1, y == 2
+	x == 1, y == 4
+	x == 3, y == 2
+	x == 3, y == 4
+
+
+Furthermore, the stores committed by a CPU to the memory system may not be
+perceived by the loads made by another CPU in the same order as the stores were
+committed.
+
+
+As a further example, consider this sequence of events:
+
+	CPU 1		CPU 2
+	===============	===============
+	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
+	B = 4;		Q = P;
+	P = &B		D = *Q;
+
+There is an obvious data dependency here, as the value loaded into D depends on
+the address retrieved from P by CPU 2.  At the end of the sequence, any of the
+following results are possible:
+
+	(Q == &A) and (D == 1)
+	(Q == &B) and (D == 2)
+	(Q == &B) and (D == 4)
+
+Note that CPU 2 will never try and load C into D because the CPU will load P
+into Q before issuing the load of *Q.
+
+
+DEVICE OPERATIONS
+-----------------
+
+Some devices present their control interfaces as collections of memory
+locations, but the order in which the control registers are accessed is very
+important.  For instance, imagine an ethernet card with a set of internal
+registers that are accessed through an address port register (A) and a data
+port register (D).  To read internal register 5, the following code might then
+be used:
+
+	*A = 5;
+	x = *D;
+
+but this might show up as either of the following two sequences:
+
+	STORE *A = 5, x = LOAD *D
+	x = LOAD *D, STORE *A = 5
+
+the second of which will almost certainly result in a malfunction, since it set
+the address _after_ attempting to read the register.
+
+
+GUARANTEES
+----------
+
+There are some minimal guarantees that may be expected of a CPU:
+
+ (*) On any given CPU, dependent memory accesses will be issued in order, with
+     respect to itself.  This means that for:
+
+	Q = P; D = *Q;
+
+     the CPU will issue the following memory operations:
+
+	Q = LOAD P, D = LOAD *Q
+
+     and always in that order.
+
+ (*) Overlapping loads and stores within a particular CPU will appear to be
+     ordered within that CPU.  This means that for:
+
+	a = *X; *X = b;
+
+     the CPU will only issue the following sequence of memory operations:
+
+	a = LOAD *X, STORE *X = b
+
+     And for:
+
+	*X = c; d = *X;
+
+     the CPU will only issue:
+
+	STORE *X = c, d = LOAD *X
+
+     (Loads and stores overlap if they are targetted at overlapping pieces of
+     memory).
+
+And there are a number of things that _must_ or _must_not_ be assumed:
+
+ (*) It _must_not_ be assumed that independent loads and stores will be issued
+     in the order given.  This means that for:
+
+	X = *A; Y = *B; *D = Z;
+
+     we may get any of the following sequences:
+
+	X = LOAD *A,  Y = LOAD *B,  STORE *D = Z
+	X = LOAD *A,  STORE *D = Z, Y = LOAD *B
+	Y = LOAD *B,  X = LOAD *A,  STORE *D = Z
+	Y = LOAD *B,  STORE *D = Z, X = LOAD *A
+	STORE *D = Z, X = LOAD *A,  Y = LOAD *B
+	STORE *D = Z, Y = LOAD *B,  X = LOAD *A
+
+ (*) It _must_ be assumed that overlapping memory accesses may be merged or
+     discarded.  This means that for:
+
+	X = *A; Y = *(A + 4);
+
+     we may get any one of the following sequences:
+
+	X = LOAD *A; Y = LOAD *(A + 4);
+	Y = LOAD *(A + 4); X = LOAD *A;
+	{X, Y} = LOAD {*A, *(A + 4) };
+
+     And for:
+
+	*A = X; Y = *A;
+
+     we may get either of:
+
+	STORE *A = X; Y = LOAD *A;
+	STORE *A = Y;
+
+
+=========================
+WHAT ARE MEMORY BARRIERS?
+=========================
+
+As can be seen above, independent memory operations are effectively performed
+in random order, but this can be a problem for CPU-CPU interaction and for I/O.
+What is required is some way of intervening to instruct the compiler and the
+CPU to restrict the order.
+
+Memory barriers are such interventions.  They impose a perceived partial
+ordering between the memory operations specified on either side of the barrier.
+They request that the sequence of memory events generated appears to other
+parts of the system as if the barrier is effective on that CPU.
+
+
+VARIETIES OF MEMORY BARRIER
+---------------------------
+
+Memory barriers come in four basic varieties:
+
+ (1) Write (or store) memory barriers.
+
+     A write memory barrier gives a guarantee that all the STORE operations
+     specified before the barrier will appear to happen before all the STORE
+     operations specified after the barrier with respect to the other
+     components of the system.
+
+     A write barrier is a partial ordering on stores only; it is not required
+     to have any effect on loads.
+
+     A CPU can be viewed as as commiting a sequence of store operations to the
+     memory system as time progresses.  All stores before a write barrier will
+     occur in the sequence _before_ all the stores after the write barrier.
+
+     [!] Note that write barriers should normally be paired with read or data
+     dependency barriers; see the "SMP barrier pairing" subsection.
+
+
+ (2) Data dependency barriers.
+
+     A data dependency barrier is a weaker form of read barrier.  In the case
+     where two loads are performed such that the second depends on the result
+     of the first (eg: the first load retrieves the address to which the second
+     load will be directed), a data dependency barrier would be required to
+     make sure that the target of the second load is updated before the address
+     obtained by the first load is accessed.
+
+     A data dependency barrier is a partial ordering on interdependent loads
+     only; it is not required to have any effect on stores, independent loads
+     or overlapping loads.
+
+     As mentioned in (1), the other CPUs in the system can be viewed as
+     committing sequences of stores to the memory system that the CPU being
+     considered can then perceive.  A data dependency barrier issued by the CPU
+     under consideration guarantees that for any load preceding it, if that
+     load touches one of a sequence of stores from another CPU, then by the
+     time the barrier completes, the effects of all the stores prior to that
+     touched by the load will be perceptible to any loads issued after the data
+     dependency barrier.
+
+     See the "Examples of memory barrier sequences" subsection for diagrams
+     showing the ordering constraints.
+
+     [!] Note that the first load really has to have a _data_ dependency and
+     not a control dependency.  If the address for the second load is dependent
+     on the first load, but the dependency is through a conditional rather than
+     actually loading the address itself, then it's a _control_ dependency and
+     a full read barrier or better is required.  See the "Control dependencies"
+     subsection for more information.
+
+     [!] Note that data dependency barriers should normally be paired with
+     write barriers; see the "SMP barrier pairing" subsection.
+
+
+ (3) Read (or load) memory barriers.
+
+     A read barrier is a data dependency barrier plus a guarantee that all the
+     LOAD operations specified before the barrier will appear to happen before
+     all the LOAD operations specified after the barrier with respect to the
+     other components of the system.
+
+     A read barrier is a partial ordering on loads only; it is not required to
+     have any effect on stores.
+
+     Read memory barriers imply data dependency barriers, and so can substitute
+     for them.
+
+     [!] Note that read barriers should normally be paired with write barriers;
+     see the "SMP barrier pairing" subsection.
+
+
+ (4) General memory barriers.
+
+     A general memory barrier is a combination of both a read memory barrier
+     and a write memory barrier.  It is a partial ordering over both loads and
+     stores.
+
+     General memory barriers imply both read and write memory barriers, and so
+     can substitute for either.
+
+
+And a couple of implicit varieties:
+
+ (5) LOCK operations.
+
+     This acts as a one-way permeable barrier.  It guarantees that all memory
+     operations after the LOCK operation will appear to happen after the LOCK
+     operation with respect to the other components of the system.
+
+     Memory operations that occur before a LOCK operation may appear to happen
+     after it completes.
+
+     A LOCK operation should almost always be paired with an UNLOCK operation.
+
+
+ (6) UNLOCK operations.
+
+     This also acts as a one-way permeable barrier.  It guarantees that all
+     memory operations before the UNLOCK operation will appear to happen before
+     the UNLOCK operation with respect to the other components of the system.
+
+     Memory operations that occur after an UNLOCK operation may appear to
+     happen before it completes.
+
+     LOCK and UNLOCK operations are guaranteed to appear with respect to each
+     other strictly in the order specified.
+
+     The use of LOCK and UNLOCK operations generally precludes the need for
+     other sorts of memory barrier (but note the exceptions mentioned in the
+     subsection "MMIO write barrier").
+
+
+Memory barriers are only required where there's a possibility of interaction
+between two CPUs or between a CPU and a device.  If it can be guaranteed that
+there won't be any such interaction in any particular piece of code, then
+memory barriers are unnecessary in that piece of code.
+
+
+Note that these are the _minimum_ guarantees.  Different architectures may give
+more substantial guarantees, but they may _not_ be relied upon outside of arch
+specific code.
+
+
+WHAT MAY NOT BE ASSUMED ABOUT MEMORY BARRIERS?
+----------------------------------------------
+
+There are certain things that the Linux kernel memory barriers do not guarantee:
+
+ (*) There is no guarantee that any of the memory accesses specified before a
+     memory barrier will be _complete_ by the completion of a memory barrier
+     instruction; the barrier can be considered to draw a line in that CPU's
+     access queue that accesses of the appropriate type may not cross.
+
+ (*) There is no guarantee that issuing a memory barrier on one CPU will have
+     any direct effect on another CPU or any other hardware in the system.  The
+     indirect effect will be the order in which the second CPU sees the effects
+     of the first CPU's accesses occur, but see the next point:
+
+ (*) There is no guarantee that the a CPU will see the correct order of effects
+     from a second CPU's accesses, even _if_ the second CPU uses a memory
+     barrier, unless the first CPU _also_ uses a matching memory barrier (see
+     the subsection on "SMP Barrier Pairing").
+
+ (*) There is no guarantee that some intervening piece of off-the-CPU
+     hardware[*] will not reorder the memory accesses.  CPU cache coherency
+     mechanisms should propagate the indirect effects of a memory barrier
+     between CPUs, but might not do so in order.
+
+	[*] For information on bus mastering DMA and coherency please read:
+
+	    Documentation/pci.txt
+	    Documentation/DMA-mapping.txt
+	    Documentation/DMA-API.txt
+
+
+DATA DEPENDENCY BARRIERS
+------------------------
+
+The usage requirements of data dependency barriers are a little subtle, and
+it's not always obvious that they're needed.  To illustrate, consider the
+following sequence of events:
+
+	CPU 1		CPU 2
+	===============	===============
+	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
+	B = 4;
+	<write barrier>
+	P = &B
+			Q = P;
+			D = *Q;
+
+There's a clear data dependency here, and it would seem that by the end of the
+sequence, Q must be either &A or &B, and that:
+
+	(Q == &A) implies (D == 1)
+	(Q == &B) implies (D == 4)
+
+But! CPU 2's perception of P may be updated _before_ its perception of B, thus
+leading to the following situation:
+
+	(Q == &B) and (D == 2) ????
+
+Whilst this may seem like a failure of coherency or causality maintenance, it
+isn't, and this behaviour can be observed on certain real CPUs (such as the DEC
+Alpha).
+
+To deal with this, a data dependency barrier must be inserted between the
+address load and the data load:
+
+	CPU 1		CPU 2
+	===============	===============
+	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
+	B = 4;
+	<write barrier>
+	P = &B
+			Q = P;
+			<data dependency barrier>
+			D = *Q;
+
+This enforces the occurrence of one of the two implications, and prevents the
+third possibility from arising.
+
+[!] Note that this extremely counterintuitive situation arises most easily on
+machines with split caches, so that, for example, one cache bank processes
+even-numbered cache lines and the other bank processes odd-numbered cache
+lines.  The pointer P might be stored in an odd-numbered cache line, and the
+variable B might be stored in an even-numbered cache line.  Then, if the
+even-numbered bank of the reading CPU's cache is extremely busy while the
+odd-numbered bank is idle, one can see the new value of the pointer P (&B),
+but the old value of the variable B (1).
+
+
+Another example of where data dependency barriers might by required is where a
+number is read from memory and then used to calculate the index for an array
+access:
+
+	CPU 1		CPU 2
+	===============	===============
+	{ M[0] == 1, M[1] == 2, M[3] = 3, P == 0, Q == 3 }
+	M[1] = 4;
+	<write barrier>
+	P = 1
+			Q = P;
+			<data dependency barrier>
+			D = M[Q];
+
+
+The data dependency barrier is very important to the RCU system, for example.
+See rcu_dereference() in include/linux/rcupdate.h.  This permits the current
+target of an RCU'd pointer to be replaced with a new modified target, without
+the replacement target appearing to be incompletely initialised.
+
+See also the subsection on "Cache Coherency" for a more thorough example.
+
+
+CONTROL DEPENDENCIES
+--------------------
+
+A control dependency requires a full read memory barrier, not simply a data
+dependency barrier to make it work correctly.  Consider the following bit of
+code:
+
+	q = &a;
+	if (p)
+		q = &b;
+	<data dependency barrier>
+	x = *q;
+
+This will not have the desired effect because there is no actual data
+dependency, but rather a control dependency that the CPU may short-circuit by
+attempting to predict the outcome in advance.  In such a case what's actually
+required is:
+
+	q = &a;
+	if (p)
+		q = &b;
+	<read barrier>
+	x = *q;
+
+
+SMP BARRIER PAIRING
+-------------------
+
+When dealing with CPU-CPU interactions, certain types of memory barrier should
+always be paired.  A lack of appropriate pairing is almost certainly an error.
+
+A write barrier should always be paired with a data dependency barrier or read
+barrier, though a general barrier would also be viable.  Similarly a read
+barrier or a data dependency barrier should always be paired with at least an
+write barrier, though, again, a general barrier is viable:
+
+	CPU 1		CPU 2
+	===============	===============
+	a = 1;
+	<write barrier>
+	b = 2;		x = a;
+			<read barrier>
+			y = b;
+
+Or:
+
+	CPU 1		CPU 2
+	===============	===============================
+	a = 1;
+	<write barrier>
+	b = &a;		x = b;
+			<data dependency barrier>
+			y = *x;
+
+Basically, the read barrier always has to be there, even though it can be of
+the "weaker" type.
+
+
+EXAMPLES OF MEMORY BARRIER SEQUENCES
+------------------------------------
+
+Firstly, write barriers act as a partial orderings on store operations.
+Consider the following sequence of events:
+
+	CPU 1
+	=======================
+	STORE A = 1
+	STORE B = 2
+	STORE C = 3
+	<write barrier>
+	STORE D = 4
+	STORE E = 5
+
+This sequence of events is committed to the memory coherence system in an order
+that the rest of the system might perceive as the unordered set of { STORE A,
+STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
+}:
+
+	+-------+       :      :
+	|       |       +------+
+	|       |------>| C=3  |     }     /\
+	|       |  :    +------+     }-----  \  -----> Events perceptible
+	|       |  :    | A=1  |     }        \/       to rest of system
+	|       |  :    +------+     }
+	| CPU 1 |  :    | B=2  |     }
+	|       |       +------+     }
+	|       |   wwwwwwwwwwwwwwww }   <--- At this point the write barrier
+	|       |       +------+     }        requires all stores prior to the
+	|       |  :    | E=5  |     }        barrier to be committed before
+	|       |  :    +------+     }        further stores may be take place.
+	|       |------>| D=4  |     }
+	|       |       +------+
+	+-------+       :      :
+	                   |
+	                   | Sequence in which stores committed to memory system
+	                   | by CPU 1
+	                   V
+
+
+Secondly, data dependency barriers act as a partial orderings on data-dependent
+loads.  Consider the following sequence of events:
+
+	CPU 1			CPU 2
+	=======================	=======================
+	STORE A = 1
+	STORE B = 2
+	<write barrier>
+	STORE C = &B		LOAD X
+	STORE D = 4		LOAD C (gets &B)
+				LOAD *C (reads B)
+
+Without intervention, CPU 2 may perceive the events on CPU 1 in some
+effectively random order, despite the write barrier issued by CPU 1:
+
+	+-------+       :      :                :       :
+	|       |       +------+                +-------+  | Sequence of update
+	|       |------>| B=2  |-----       --->| Y->8  |  | of perception on
+	|       |  :    +------+     \          +-------+  | CPU 2
+	| CPU 1 |  :    | A=1  |      \     --->| C->&Y |  V
+	|       |       +------+       |        +-------+
+	|       |   wwwwwwwwwwwwwwww   |        :       :
+	|       |       +------+       |        :       :
+	|       |  :    | C=&B |---    |        :       :       +-------+
+	|       |  :    +------+   \   |        +-------+       |       |
+	|       |------>| D=4  |    ----------->| C->&B |------>|       |
+	|       |       +------+       |        +-------+       |       |
+	+-------+       :      :       |        :       :       |       |
+	                               |        :       :       |       |
+	                               |        :       :       | CPU 2 |
+	                               |        +-------+       |       |
+	    Apparently incorrect --->  |        | B->7  |------>|       |
+	    perception of B (!)        |        +-------+       |       |
+	                               |        :       :       |       |
+	                               |        +-------+       |       |
+	    The load of X holds --->    \       | X->9  |------>|       |
+	    up the maintenance           \      +-------+       |       |
+	    of coherence of B             ----->| B->2  |       +-------+
+	                                        +-------+
+	                                        :       :
+
+
+In the above example, CPU 2 perceives that B is 7, despite the load of *C
+(which would be B) coming after the the LOAD of C.
+
+If, however, a data dependency barrier were to be placed between the load of C
+and the load of *C (ie: B) on CPU 2, then the following will occur:
+
+	+-------+       :      :                :       :
+	|       |       +------+                +-------+
+	|       |------>| B=2  |-----       --->| Y->8  |
+	|       |  :    +------+     \          +-------+
+	| CPU 1 |  :    | A=1  |      \     --->| C->&Y |
+	|       |       +------+       |        +-------+
+	|       |   wwwwwwwwwwwwwwww   |        :       :
+	|       |       +------+       |        :       :
+	|       |  :    | C=&B |---    |        :       :       +-------+
+	|       |  :    +------+   \   |        +-------+       |       |
+	|       |------>| D=4  |    ----------->| C->&B |------>|       |
+	|       |       +------+       |        +-------+       |       |
+	+-------+       :      :       |        :       :       |       |
+	                               |        :       :       |       |
+	                               |        :       :       | CPU 2 |
+	                               |        +-------+       |       |
+	                                \       | X->9  |------>|       |
+	                                 \      +-------+       |       |
+	                                  ----->| B->2  |       |       |
+	                                        +-------+       |       |
+	     Makes sure all effects --->    ddddddddddddddddd   |       |
+	     prior to the store of C            +-------+       |       |
+	     are perceptible to                 | B->2  |------>|       |
+	     successive loads                   +-------+       |       |
+	                                        :       :       +-------+
+
+
+And thirdly, a read barrier acts as a partial order on loads.  Consider the
+following sequence of events:
+
+	CPU 1			CPU 2
+	=======================	=======================
+	STORE A=1
+	STORE B=2
+	STORE C=3
+	<write barrier>
+	STORE D=4
+	STORE E=5
+				LOAD A
+				LOAD B
+				LOAD C
+				LOAD D
+				LOAD E
+
+Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
+some effectively random order, despite the write barrier issued by CPU 1:
+
+	+-------+       :      :
+	|       |       +------+
+	|       |------>| C=3  | }
+	|       |  :    +------+ }
+	|       |  :    | A=1  | }
+	|       |  :    +------+ }
+	| CPU 1 |  :    | B=2  | }---
+	|       |       +------+ }   \
+	|       |   wwwwwwwwwwwww}    \
+	|       |       +------+ }     \          :       :       +-------+
+	|       |  :    | E=5  | }      \         +-------+       |       |
+	|       |  :    +------+ }       \      { | C->3  |------>|       |
+	|       |------>| D=4  | }        \     { +-------+    :  |       |
+	|       |       +------+           \    { | E->5  |    :  |       |
+	+-------+       :      :            \   { +-------+    :  |       |
+	                           Transfer  -->{ | A->1  |    :  | CPU 2 |
+	                          from CPU 1    { +-------+    :  |       |
+	                           to CPU 2     { | D->4  |    :  |       |
+	                                        { +-------+    :  |       |
+	                                        { | B->2  |------>|       |
+	                                          +-------+       |       |
+	                                          :       :       +-------+
+
+
+If, however, a read barrier were to be placed between the load of C and the
+load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
+perceived correctly by CPU 2.
+
+	+-------+       :      :
+	|       |       +------+
+	|       |------>| C=3  | }
+	|       |  :    +------+ }
+	|       |  :    | A=1  | }---
+	|       |  :    +------+ }   \
+	| CPU 1 |  :    | B=2  | }    \
+	|       |       +------+       \
+	|       |   wwwwwwwwwwwwwwww    \
+	|       |       +------+         \        :       :       +-------+
+	|       |  :    | E=5  | }        \       +-------+       |       |
+	|       |  :    +------+ }---      \    { | C->3  |------>|       |
+	|       |------>| D=4  | }   \      \   { +-------+    :  |       |
+	|       |       +------+      \      -->{ | B->2  |    :  |       |
+	+-------+       :      :       \        { +-------+    :  |       |
+	                                \       { | A->1  |    :  | CPU 2 |
+	                                 \        +-------+       |       |
+	   At this point the read ---->   \   rrrrrrrrrrrrrrrrr   |       |
+	   barrier causes all effects      \      +-------+       |       |
+	   prior to the storage of C        \   { | E->5  |    :  |       |
+	   to be perceptible to CPU 2        -->{ +-------+    :  |       |
+	                                        { | D->4  |------>|       |
+	                                          +-------+       |       |
+	                                          :       :       +-------+
+
+
+========================
+EXPLICIT KERNEL BARRIERS
+========================
+
+The Linux kernel has a variety of different barriers that act at different
+levels:
+
+  (*) Compiler barrier.
+
+  (*) CPU memory barriers.
+
+  (*) MMIO write barrier.
+
+
+COMPILER BARRIER
+----------------
+
+The Linux kernel has an explicit compiler barrier function that prevents the
+compiler from moving the memory accesses either side of it to the other side:
+
+	barrier();
+
+This a general barrier - lesser varieties of compiler barrier do not exist.
+
+The compiler barrier has no direct effect on the CPU, which may then reorder
+things however it wishes.
+
+
+CPU MEMORY BARRIERS
+-------------------
+
+The Linux kernel has eight basic CPU memory barriers:
+
+	TYPE		MANDATORY		SMP CONDITIONAL
+	===============	=======================	===========================
+	GENERAL		mb()			smp_mb()
+	WRITE		wmb()			smp_wmb()
+	READ		rmb()			smp_rmb()
+	DATA DEPENDENCY	read_barrier_depends()	smp_read_barrier_depends()
+
+
+All CPU memory barriers unconditionally imply compiler barriers.
+
+SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
+systems because it is assumed that a CPU will be appear to be self-consistent,
+and will order overlapping accesses correctly with respect to itself.
+
+[!] Note that SMP memory barriers _must_ be used to control the ordering of
+references to shared memory on SMP systems, though the use of locking instead
+is sufficient.
+
+Mandatory barriers should not be used to control SMP effects, since mandatory
+barriers unnecessarily impose overhead on UP systems. They may, however, be
+used to control MMIO effects on accesses through relaxed memory I/O windows.
+These are required even on non-SMP systems as they affect the order in which
+memory operations appear to a device by prohibiting both the compiler and the
+CPU from reordering them.
+
+
+There are some more advanced barrier functions:
+
+ (*) set_mb(var, value)
+ (*) set_wmb(var, value)
+
+     These assign the value to the variable and then insert at least a write
+     barrier after it, depending on the function.  They aren't guaranteed to
+     insert anything more than a compiler barrier in a UP compilation.
+
+
+ (*) smp_mb__before_atomic_dec();
+ (*) smp_mb__after_atomic_dec();
+ (*) smp_mb__before_atomic_inc();
+ (*) smp_mb__after_atomic_inc();
+
+     These are for use with atomic add, subtract, increment and decrement
+     functions, especially when used for reference counting.  These functions
+     do not imply memory barriers.
+
+     As an example, consider a piece of code that marks an object as being dead
+     and then decrements the object's reference count:
+
+	obj->dead = 1;
+	smp_mb__before_atomic_dec();
+	atomic_dec(&obj->ref_count);
+
+     This makes sure that the death mark on the object is perceived to be set
+     *before* the reference counter is decremented.
+
+     See Documentation/atomic_ops.txt for more information.  See the "Atomic
+     operations" subsection for information on where to use these.
+
+
+ (*) smp_mb__before_clear_bit(void);
+ (*) smp_mb__after_clear_bit(void);
+
+     These are for use similar to the atomic inc/dec barriers.  These are
+     typically used for bitwise unlocking operations, so care must be taken as
+     there are no implicit memory barriers here either.
+
+     Consider implementing an unlock operation of some nature by clearing a
+     locking bit.  The clear_bit() would then need to be barriered like this:
+
+	smp_mb__before_clear_bit();
+	clear_bit( ... );
+
+     This prevents memory operations before the clear leaking to after it.  See
+     the subsection on "Locking Functions" with reference to UNLOCK operation
+     implications.
+
+     See Documentation/atomic_ops.txt for more information.  See the "Atomic
+     operations" subsection for information on where to use these.
+
+
+MMIO WRITE BARRIER
+------------------
+
+The Linux kernel also has a special barrier for use with memory-mapped I/O
+writes:
+
+	mmiowb();
+
+This is a variation on the mandatory write barrier that causes writes to weakly
+ordered I/O regions to be partially ordered.  Its effects may go beyond the
+CPU->Hardware interface and actually affect the hardware at some level.
+
+See the subsection "Locks vs I/O accesses" for more information.
+
+
+===============================
+IMPLICIT KERNEL MEMORY BARRIERS
+===============================
+
+Some of the other functions in the linux kernel imply memory barriers, amongst
+which are locking, scheduling and memory allocation functions.
+
+This specification is a _minimum_ guarantee; any particular architecture may
+provide more substantial guarantees, but these may not be relied upon outside
+of arch specific code.
+
+
+LOCKING FUNCTIONS
+-----------------
+
+The Linux kernel has a number of locking constructs:
+
+ (*) spin locks
+ (*) R/W spin locks
+ (*) mutexes
+ (*) semaphores
+ (*) R/W semaphores
+ (*) RCU
+
+In all cases there are variants on "LOCK" operations and "UNLOCK" operations
+for each construct.  These operations all imply certain barriers:
+
+ (1) LOCK operation implication:
+
+     Memory operations issued after the LOCK will be completed after the LOCK
+     operation has completed.
+
+     Memory operations issued before the LOCK may be completed after the LOCK
+     operation has completed.
+
+ (2) UNLOCK operation implication:
+
+     Memory operations issued before the UNLOCK will be completed before the
+     UNLOCK operation has completed.
+
+     Memory operations issued after the UNLOCK may be completed before the
+     UNLOCK operation has completed.
+
+ (3) LOCK vs LOCK implication:
+
+     All LOCK operations issued before another LOCK operation will be completed
+     before that LOCK operation.
+
+ (4) LOCK vs UNLOCK implication:
+
+     All LOCK operations issued before an UNLOCK operation will be completed
+     before the UNLOCK operation.
+
+     All UNLOCK operations issued before a LOCK operation will be completed
+     before the LOCK operation.
+
+ (5) Failed conditional LOCK implication:
+
+     Certain variants of the LOCK operation may fail, either due to being
+     unable to get the lock immediately, or due to receiving an unblocked
+     signal whilst asleep waiting for the lock to become available.  Failed
+     locks do not imply any sort of barrier.
+
+Therefore, from (1), (2) and (4) an UNLOCK followed by an unconditional LOCK is
+equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
+
+[!] Note: one of the consequence of LOCKs and UNLOCKs being only one-way
+    barriers is that the effects instructions outside of a critical section may
+    seep into the inside of the critical section.
+
+Locks and semaphores may not provide any guarantee of ordering on UP compiled
+systems, and so cannot be counted on in such a situation to actually achieve
+anything at all - especially with respect to I/O accesses - unless combined
+with interrupt disabling operations.
+
+See also the section on "Inter-CPU locking barrier effects".
+
+
+As an example, consider the following:
+
+	*A = a;
+	*B = b;
+	LOCK
+	*C = c;
+	*D = d;
+	UNLOCK
+	*E = e;
+	*F = f;
+
+The following sequence of events is acceptable:
+
+	LOCK, {*F,*A}, *E, {*C,*D}, *B, UNLOCK
+
+	[+] Note that {*F,*A} indicates a combined access.
+
+But none of the following are:
+
+	{*F,*A}, *B,	LOCK, *C, *D,	UNLOCK, *E
+	*A, *B, *C,	LOCK, *D,	UNLOCK, *E, *F
+	*A, *B,		LOCK, *C,	UNLOCK, *D, *E, *F
+	*B,		LOCK, *C, *D,	UNLOCK, {*F,*A}, *E
+
+
+
+INTERRUPT DISABLING FUNCTIONS
+-----------------------------
+
+Functions that disable interrupts (LOCK equivalent) and enable interrupts
+(UNLOCK equivalent) will act as compiler barriers only.  So if memory or I/O
+barriers are required in such a situation, they must be provided from some
+other means.
+
+
+MISCELLANEOUS FUNCTIONS
+-----------------------
+
+Other functions that imply barriers:
+
+ (*) schedule() and similar imply full memory barriers.
+
+ (*) Memory allocation and release functions imply full memory barriers.
+
+
+=================================
+INTER-CPU LOCKING BARRIER EFFECTS
+=================================
+
+On SMP systems locking primitives give a more substantial form of barrier: one
+that does affect memory access ordering on other CPUs, within the context of
+conflict on any particular lock.
+
+
+LOCKS VS MEMORY ACCESSES
+------------------------
+
+Consider the following: the system has a pair of spinlocks (N) and (Q), and
+three CPUs; then should the following sequence of events occur:
+
+	CPU 1				CPU 2
+	===============================	===============================
+	*A = a;				*E = e;
+	LOCK M				LOCK Q
+	*B = b;				*F = f;
+	*C = c;				*G = g;
+	UNLOCK M			UNLOCK Q
+	*D = d;				*H = h;
+
+Then there is no guarantee as to what order CPU #3 will see the accesses to *A
+through *H occur in, other than the constraints imposed by the separate locks
+on the separate CPUs. It might, for example, see:
+
+	*E, LOCK M, LOCK Q, *G, *C, *F, *A, *B, UNLOCK Q, *D, *H, UNLOCK M
+
+But it won't see any of:
+
+	*B, *C or *D preceding LOCK M
+	*A, *B or *C following UNLOCK M
+	*F, *G or *H preceding LOCK Q
+	*E, *F or *G following UNLOCK Q
+
+
+However, if the following occurs:
+
+	CPU 1				CPU 2
+	===============================	===============================
+	*A = a;
+	LOCK M		[1]
+	*B = b;
+	*C = c;
+	UNLOCK M	[1]
+	*D = d;				*E = e;
+					LOCK M		[2]
+					*F = f;
+					*G = g;
+					UNLOCK M	[2]
+					*H = h;
+
+CPU #3 might see:
+
+	*E, LOCK M [1], *C, *B, *A, UNLOCK M [1],
+		LOCK M [2], *H, *F, *G, UNLOCK M [2], *D
+
+But assuming CPU #1 gets the lock first, it won't see any of:
+
+	*B, *C, *D, *F, *G or *H preceding LOCK M [1]
+	*A, *B or *C following UNLOCK M [1]
+	*F, *G or *H preceding LOCK M [2]
+	*A, *B, *C, *E, *F or *G following UNLOCK M [2]
+
+
+LOCKS VS I/O ACCESSES
+---------------------
+
+Under certain circumstances (especially involving NUMA), I/O accesses within
+two spinlocked sections on two different CPUs may be seen as interleaved by the
+PCI bridge, because the PCI bridge does not necessarily participate in the
+cache-coherence protocol, and is therefore incapable of issuing the required
+read memory barriers.
+
+For example:
+
+	CPU 1				CPU 2
+	===============================	===============================
+	spin_lock(Q)
+	writel(0, ADDR)
+	writel(1, DATA);
+	spin_unlock(Q);
+					spin_lock(Q);
+					writel(4, ADDR);
+					writel(5, DATA);
+					spin_unlock(Q);
+
+may be seen by the PCI bridge as follows:
+
+	STORE *ADDR = 0, STORE *ADDR = 4, STORE *DATA = 1, STORE *DATA = 5
+
+which would probably cause the hardware to malfunction.
+
+
+What is necessary here is to intervene with an mmiowb() before dropping the
+spinlock, for example:
+
+	CPU 1				CPU 2
+	===============================	===============================
+	spin_lock(Q)
+	writel(0, ADDR)
+	writel(1, DATA);
+	mmiowb();
+	spin_unlock(Q);
+					spin_lock(Q);
+					writel(4, ADDR);
+					writel(5, DATA);
+					mmiowb();
+					spin_unlock(Q);
+
+this will ensure that the two stores issued on CPU #1 appear at the PCI bridge
+before either of the stores issued on CPU #2.
+
+
+Furthermore, following a store by a load to the same device obviates the need
+for an mmiowb(), because the load forces the store to complete before the load
+is performed:
+
+	CPU 1				CPU 2
+	===============================	===============================
+	spin_lock(Q)
+	writel(0, ADDR)
+	a = readl(DATA);
+	spin_unlock(Q);
+					spin_lock(Q);
+					writel(4, ADDR);
+					b = readl(DATA);
+					spin_unlock(Q);
+
+
+See Documentation/DocBook/deviceiobook.tmpl for more information.
+
+
+=================================
+WHERE ARE MEMORY BARRIERS NEEDED?
+=================================
+
+Under normal operation, memory operation reordering is generally not going to
+be a problem as a single-threaded linear piece of code will still appear to
+work correctly, even if it's in an SMP kernel.  There are, however, three
+circumstances in which reordering definitely _could_ be a problem:
+
+ (*) Interprocessor interaction.
+
+ (*) Atomic operations.
+
+ (*) Accessing devices (I/O).
+
+ (*) Interrupts.
+
+
+INTERPROCESSOR INTERACTION
+--------------------------
+
+When there's a system with more than one processor, more than one CPU in the
+system may be working on the same data set at the same time.  This can cause
+synchronisation problems, and the usual way of dealing with them is to use
+locks.  Locks, however, are quite expensive, and so it may be preferable to
+operate without the use of a lock if at all possible.  In such a case
+operations that affect both CPUs may have to be carefully ordered to prevent
+a malfunction.
+
+Consider, for example, the R/W semaphore slow path.  Here a waiting process is
+queued on the semaphore, by virtue of it having a piece of its stack linked to
+the semaphore's list of waiting processes:
+
+	struct rw_semaphore {
+		...
+		spinlock_t lock;
+		struct list_head waiters;
+	};
+
+	struct rwsem_waiter {
+		struct list_head list;
+		struct task_struct *task;
+	};
+
+To wake up a particular waiter, the up_read() or up_write() functions have to:
+
+ (1) read the next pointer from this waiter's record to know as to where the
+     next waiter record is;
+
+ (4) read the pointer to the waiter's task structure;
+
+ (3) clear the task pointer to tell the waiter it has been given the semaphore;
+
+ (4) call wake_up_process() on the task; and
+
+ (5) release the reference held on the waiter's task struct.
+
+In otherwords, it has to perform this sequence of events:
+
+	LOAD waiter->list.next;
+	LOAD waiter->task;
+	STORE waiter->task;
+	CALL wakeup
+	RELEASE task
+
+and if any of these steps occur out of order, then the whole thing may
+malfunction.
+
+Once it has queued itself and dropped the semaphore lock, the waiter does not
+get the lock again; it instead just waits for its task pointer to be cleared
+before proceeding.  Since the record is on the waiter's stack, this means that
+if the task pointer is cleared _before_ the next pointer in the list is read,
+another CPU might start processing the waiter and might clobber the waiter's
+stack before the up*() function has a chance to read the next pointer.
+
+Consider then what might happen to the above sequence of events:
+
+	CPU 1				CPU 2
+	===============================	===============================
+					down_xxx()
+					Queue waiter
+					Sleep
+	up_yyy()
+	LOAD waiter->task;
+	STORE waiter->task;
+					Woken up by other event
+	<preempt>
+					Resume processing
+					down_xxx() returns
+					call foo()
+					foo() clobbers *waiter
+	</preempt>
+	LOAD waiter->list.next;
+	--- OOPS ---
+
+This could be dealt with using the semaphore lock, but then the down_xxx()
+function has to needlessly get the spinlock again after being woken up.
+
+The way to deal with this is to insert a general SMP memory barrier:
+
+	LOAD waiter->list.next;
+	LOAD waiter->task;
+	smp_mb();
+	STORE waiter->task;
+	CALL wakeup
+	RELEASE task
+
+In this case, the barrier makes a guarantee that all memory accesses before the
+barrier will appear to happen before all the memory accesses after the barrier
+with respect to the other CPUs on the system.  It does _not_ guarantee that all
+the memory accesses before the barrier will be complete by the time the barrier
+instruction itself is complete.
+
+On a UP system - where this wouldn't be a problem - the smp_mb() is just a
+compiler barrier, thus making sure the compiler emits the instructions in the
+right order without actually intervening in the CPU.  Since there there's only
+one CPU, that CPU's dependency ordering logic will take care of everything
+else.
+
+
+ATOMIC OPERATIONS
+-----------------
+
+Though they are technically interprocessor interaction considerations, atomic
+operations are noted specially as they do _not_ generally imply memory
+barriers.  The possible offenders include:
+
+	xchg();
+	cmpxchg();
+	test_and_set_bit();
+	test_and_clear_bit();
+	test_and_change_bit();
+	atomic_cmpxchg();
+	atomic_inc_return();
+	atomic_dec_return();
+	atomic_add_return();
+	atomic_sub_return();
+	atomic_inc_and_test();
+	atomic_dec_and_test();
+	atomic_sub_and_test();
+	atomic_add_negative();
+	atomic_add_unless();
+
+These may be used for such things as implementing LOCK operations or controlling
+the lifetime of objects by decreasing their reference counts.  In such cases
+they need preceding memory barriers.
+
+The following may also be possible offenders as they may be used as UNLOCK
+operations.
+
+	set_bit();
+	clear_bit();
+	change_bit();
+	atomic_set();
+
+
+The following are a little tricky:
+
+	atomic_add();
+	atomic_sub();
+	atomic_inc();
+	atomic_dec();
+
+If they're used for statistics generation, then they probably don't need memory
+barriers, unless there's a coupling between statistical data.
+
+If they're used for reference counting on an object to control its lifetime,
+they probably don't need memory barriers because either the reference count
+will be adjusted inside a locked section, or the caller will already hold
+sufficient references to make the lock, and thus a memory barrier unnecessary.
+
+If they're used for constructing a lock of some description, then they probably
+do need memory barriers as a lock primitive generally has to do things in a
+specific order.
+
+
+Basically, each usage case has to be carefully considered as to whether memory
+barriers are needed or not.  The simplest rule is probably: if the atomic
+operation is protected by a lock, then it does not require a barrier unless
+there's another operation within the critical section with respect to which an
+ordering must be maintained.
+
+See Documentation/atomic_ops.txt for more information.
+
+
+ACCESSING DEVICES
+-----------------
+
+Many devices can be memory mapped, and so appear to the CPU as if they're just
+a set of memory locations.  To control such a device, the driver usually has to
+make the right memory accesses in exactly the right order.
+
+However, having a clever CPU or a clever compiler creates a potential problem
+in that the carefully sequenced accesses in the driver code won't reach the
+device in the requisite order if the CPU or the compiler thinks it is more
+efficient to reorder, combine or merge accesses - something that would cause
+the device to malfunction.
+
+Inside of the Linux kernel, I/O should be done through the appropriate accessor
+routines - such as inb() or writel() - which know how to make such accesses
+appropriately sequential.  Whilst this, for the most part, renders the explicit
+use of memory barriers unnecessary, there are a couple of situations where they
+might be needed:
+
+ (1) On some systems, I/O stores are not strongly ordered across all CPUs, and
+     so for _all_ general drivers locks should be used and mmiowb() must be
+     issued prior to unlocking the critical section.
+
+ (2) If the accessor functions are used to refer to an I/O memory window with
+     relaxed memory access properties, then _mandatory_ memory barriers are
+     required to enforce ordering.
+
+See Documentation/DocBook/deviceiobook.tmpl for more information.
+
+
+INTERRUPTS
+----------
+
+A driver may be interrupted by its own interrupt service routine, and thus the
+two parts of the driver may interfere with each other's attempts to control or
+access the device.
+
+This may be alleviated - at least in part - by disabling local interrupts (a
+form of locking), such that the critical operations are all contained within
+the interrupt-disabled section in the driver.  Whilst the driver's interrupt
+routine is executing, the driver's core may not run on the same CPU, and its
+interrupt is not permitted to happen again until the current interrupt has been
+handled, thus the interrupt handler does not need to lock against that.
+
+However, consider a driver that was talking to an ethernet card that sports an
+address register and a data register.  If that driver's core talks to the card
+under interrupt-disablement and then the driver's interrupt handler is invoked:
+
+	LOCAL IRQ DISABLE
+	writew(ADDR, 3);
+	writew(DATA, y);
+	LOCAL IRQ ENABLE
+	<interrupt>
+	writew(ADDR, 4);
+	q = readw(DATA);
+	</interrupt>
+
+The store to the data register might happen after the second store to the
+address register if ordering rules are sufficiently relaxed:
+
+	STORE *ADDR = 3, STORE *ADDR = 4, STORE *DATA = y, q = LOAD *DATA
+
+
+If ordering rules are relaxed, it must be assumed that accesses done inside an
+interrupt disabled section may leak outside of it and may interleave with
+accesses performed in an interrupt - and vice versa - unless implicit or
+explicit barriers are used.
+
+Normally this won't be a problem because the I/O accesses done inside such
+sections will include synchronous load operations on strictly ordered I/O
+registers that form implicit I/O barriers. If this isn't sufficient then an
+mmiowb() may need to be used explicitly.
+
+
+A similar situation may occur between an interrupt routine and two routines
+running on separate CPUs that communicate with each other. If such a case is
+likely, then interrupt-disabling locks should be used to guarantee ordering.
+
+
+==========================
+KERNEL I/O BARRIER EFFECTS
+==========================
+
+When accessing I/O memory, drivers should use the appropriate accessor
+functions:
+
+ (*) inX(), outX():
+
+     These are intended to talk to I/O space rather than memory space, but
+     that's primarily a CPU-specific concept. The i386 and x86_64 processors do
+     indeed have special I/O space access cycles and instructions, but many
+     CPUs don't have such a concept.
+
+     The PCI bus, amongst others, defines an I/O space concept - which on such
+     CPUs as i386 and x86_64 cpus readily maps to the CPU's concept of I/O
+     space.  However, it may also mapped as a virtual I/O space in the CPU's
+     memory map, particularly on those CPUs that don't support alternate
+     I/O spaces.
+
+     Accesses to this space may be fully synchronous (as on i386), but
+     intermediary bridges (such as the PCI host bridge) may not fully honour
+     that.
+
+     They are guaranteed to be fully ordered with respect to each other.
+
+     They are not guaranteed to be fully ordered with respect to other types of
+     memory and I/O operation.
+
+ (*) readX(), writeX():
+
+     Whether these are guaranteed to be fully ordered and uncombined with
+     respect to each other on the issuing CPU depends on the characteristics
+     defined for the memory window through which they're accessing. On later
+     i386 architecture machines, for example, this is controlled by way of the
+     MTRR registers.
+
+     Ordinarily, these will be guaranteed to be fully ordered and uncombined,,
+     provided they're not accessing a prefetchable device.
+
+     However, intermediary hardware (such as a PCI bridge) may indulge in
+     deferral if it so wishes; to flush a store, a load from the same location
+     is preferred[*], but a load from the same device or from configuration
+     space should suffice for PCI.
+
+     [*] NOTE! attempting to load from the same location as was written to may
+     	 cause a malfunction - consider the 16550 Rx/Tx serial registers for
+     	 example.
+
+     Used with prefetchable I/O memory, an mmiowb() barrier may be required to
+     force stores to be ordered.
+
+     Please refer to the PCI specification for more information on interactions
+     between PCI transactions.
+
+ (*) readX_relaxed()
+
+     These are similar to readX(), but are not guaranteed to be ordered in any
+     way. Be aware that there is no I/O read barrier available.
+
+ (*) ioreadX(), iowriteX()
+
+     These will perform as appropriate for the type of access they're actually
+     doing, be it inX()/outX() or readX()/writeX().
+
+
+========================================
+ASSUMED MINIMUM EXECUTION ORDERING MODEL
+========================================
+
+It has to be assumed that the conceptual CPU is weakly-ordered but that it will
+maintain the appearance of program causality with respect to itself.  Some CPUs
+(such as i386 or x86_64) are more constrained than others (such as powerpc or
+frv), and so the most relaxed case (namely DEC Alpha) must be assumed outside
+of arch-specific code.
+
+This means that it must be considered that the CPU will execute its instruction
+stream in any order it feels like - or even in parallel - provided that if an
+instruction in the stream depends on the an earlier instruction, then that
+earlier instruction must be sufficiently complete[*] before the later
+instruction may proceed; in other words: provided that the appearance of
+causality is maintained.
+
+ [*] Some instructions have more than one effect - such as changing the
+     condition codes, changing registers or changing memory - and different
+     instructions may depend on different effects.
+
+A CPU may also discard any instruction sequence that winds up having no
+ultimate effect.  For example, if two adjacent instructions both load an
+immediate value into the same register, the first may be discarded.
+
+
+Similarly, it has to be assumed that compiler might reorder the instruction
+stream in any way it sees fit, again provided the appearance of causality is
+maintained.
+
+
+============================
+THE EFFECTS OF THE CPU CACHE
+============================
+
+The way cached memory operations are perceived across the system is affected to
+a certain extent by the caches that lie between CPUs and memory, and by the
+memory coherence system that maintains the consistency of state in the system.
+
+As far as the way a CPU interacts with another part of the system through the
+caches goes, the memory system has to include the CPU's caches, and memory
+barriers for the most part act at the interface between the CPU and its cache
+(memory barriers logically act on the dotted line in the following diagram):
+
+	    <--- CPU --->         :       <----------- Memory ----------->
+	                          :
+	+--------+    +--------+  :   +--------+    +-----------+
+	|        |    |        |  :   |        |    |           |    +--------+
+	|  CPU   |    | Memory |  :   | CPU    |    |           |    |	      |
+	|  Core  |--->| Access |----->| Cache  |<-->|           |    |	      |
+	|        |    | Queue  |  :   |        |    |           |--->| Memory |
+	|        |    |        |  :   |        |    |           |    |	      |
+	+--------+    +--------+  :   +--------+    |           |    | 	      |
+	                          :                 | Cache     |    +--------+
+	                          :                 | Coherency |
+	                          :                 | Mechanism |    +--------+
+	+--------+    +--------+  :   +--------+    |           |    |	      |
+	|        |    |        |  :   |        |    |           |    |        |
+	|  CPU   |    | Memory |  :   | CPU    |    |           |--->| Device |
+	|  Core  |--->| Access |----->| Cache  |<-->|           |    | 	      |
+	|        |    | Queue  |  :   |        |    |           |    | 	      |
+	|        |    |        |  :   |        |    |           |    +--------+
+	+--------+    +--------+  :   +--------+    +-----------+
+	                          :
+	                          :
+
+Although any particular load or store may not actually appear outside of the
+CPU that issued it since it may have been satisfied within the CPU's own cache,
+it will still appear as if the full memory access had taken place as far as the
+other CPUs are concerned since the cache coherency mechanisms will migrate the
+cacheline over to the accessing CPU and propagate the effects upon conflict.
+
+The CPU core may execute instructions in any order it deems fit, provided the
+expected program causality appears to be maintained.  Some of the instructions
+generate load and store operations which then go into the queue of memory
+accesses to be performed.  The core may place these in the queue in any order
+it wishes, and continue execution until it is forced to wait for an instruction
+to complete.
+
+What memory barriers are concerned with is controlling the order in which
+accesses cross from the CPU side of things to the memory side of things, and
+the order in which the effects are perceived to happen by the other observers
+in the system.
+
+[!] Memory barriers are _not_ needed within a given CPU, as CPUs always see
+their own loads and stores as if they had happened in program order.
+
+[!] MMIO or other device accesses may bypass the cache system.  This depends on
+the properties of the memory window through which devices are accessed and/or
+the use of any special device communication instructions the CPU may have.
+
+
+CACHE COHERENCY
+---------------
+
+Life isn't quite as simple as it may appear above, however: for while the
+caches are expected to be coherent, there's no guarantee that that coherency
+will be ordered.  This means that whilst changes made on one CPU will
+eventually become visible on all CPUs, there's no guarantee that they will
+become apparent in the same order on those other CPUs.
+
+
+Consider dealing with a system that has pair of CPUs (1 & 2), each of which has
+a pair of parallel data caches (CPU 1 has A/B, and CPU 2 has C/D):
+
+	            :
+	            :                          +--------+
+	            :      +---------+         |        |
+	+--------+  : +--->| Cache A |<------->|        |
+	|        |  : |    +---------+         |        |
+	|  CPU 1 |<---+                        |        |
+	|        |  : |    +---------+         |        |
+	+--------+  : +--->| Cache B |<------->|        |
+	            :      +---------+         |        |
+	            :                          | Memory |
+	            :      +---------+         | System |
+	+--------+  : +--->| Cache C |<------->|        |
+	|        |  : |    +---------+         |        |
+	|  CPU 2 |<---+                        |        |
+	|        |  : |    +---------+         |        |
+	+--------+  : +--->| Cache D |<------->|        |
+	            :      +---------+         |        |
+	            :                          +--------+
+	            :
+
+Imagine the system has the following properties:
+
+ (*) an odd-numbered cache line may be in cache A, cache C or it may still be
+     resident in memory;
+
+ (*) an even-numbered cache line may be in cache B, cache D or it may still be
+     resident in memory;
+
+ (*) whilst the CPU core is interrogating one cache, the other cache may be
+     making use of the bus to access the rest of the system - perhaps to
+     displace a dirty cacheline or to do a speculative load;
+
+ (*) each cache has a queue of operations that need to be applied to that cache
+     to maintain coherency with the rest of the system;
+
+ (*) the coherency queue is not flushed by normal loads to lines already
+     present in the cache, even though the contents of the queue may
+     potentially effect those loads.
+
+Imagine, then, that two writes are made on the first CPU, with a write barrier
+between them to guarantee that they will appear to reach that CPU's caches in
+the requisite order:
+
+	CPU 1		CPU 2		COMMENT
+	===============	===============	=======================================
+					u == 0, v == 1 and p == &u, q == &u
+	v = 2;
+	smp_wmb();			Make sure change to v visible before
+					 change to p
+	<A:modify v=2>			v is now in cache A exclusively
+	p = &v;
+	<B:modify p=&v>			p is now in cache B exclusively
+
+The write memory barrier forces the other CPUs in the system to perceive that
+the local CPU's caches have apparently been updated in the correct order.  But
+now imagine that the second CPU that wants to read those values:
+
+	CPU 1		CPU 2		COMMENT
+	===============	===============	=======================================
+	...
+			q = p;
+			x = *q;
+
+The above pair of reads may then fail to happen in expected order, as the
+cacheline holding p may get updated in one of the second CPU's caches whilst
+the update to the cacheline holding v is delayed in the other of the second
+CPU's caches by some other cache event:
+
+	CPU 1		CPU 2		COMMENT
+	===============	===============	=======================================
+					u == 0, v == 1 and p == &u, q == &u
+	v = 2;
+	smp_wmb();
+	<A:modify v=2>	<C:busy>
+			<C:queue v=2>
+	p = &b;		q = p;
+			<D:request p>
+	<B:modify p=&v>	<D:commit p=&v>
+		  	<D:read p>
+			x = *q;
+			<C:read *q>	Reads from v before v updated in cache
+			<C:unbusy>
+			<C:commit v=2>
+
+Basically, whilst both cachelines will be updated on CPU 2 eventually, there's
+no guarantee that, without intervention, the order of update will be the same
+as that committed on CPU 1.
+
+
+To intervene, we need to interpolate a data dependency barrier or a read
+barrier between the loads.  This will force the cache to commit its coherency
+queue before processing any further requests:
+
+	CPU 1		CPU 2		COMMENT
+	===============	===============	=======================================
+					u == 0, v == 1 and p == &u, q == &u
+	v = 2;
+	smp_wmb();
+	<A:modify v=2>	<C:busy>
+			<C:queue v=2>
+	p = &b;		q = p;
+			<D:request p>
+	<B:modify p=&v>	<D:commit p=&v>
+		  	<D:read p>
+			smp_read_barrier_depends()
+			<C:unbusy>
+			<C:commit v=2>
+			x = *q;
+			<C:read *q>	Reads from v after v updated in cache
+
+
+This sort of problem can be encountered on DEC Alpha processors as they have a
+split cache that improves performance by making better use of the data bus.
+Whilst most CPUs do imply a data dependency barrier on the read when a memory
+access depends on a read, not all do, so it may not be relied on.
+
+Other CPUs may also have split caches, but must coordinate between the various
+cachelets for normal memory accesss.  The semantics of the Alpha removes the
+need for coordination in absence of memory barriers.
+
+
+CACHE COHERENCY VS DMA
+----------------------
+
+Not all systems maintain cache coherency with respect to devices doing DMA.  In
+such cases, a device attempting DMA may obtain stale data from RAM because
+dirty cache lines may be resident in the caches of various CPUs, and may not
+have been written back to RAM yet.  To deal with this, the appropriate part of
+the kernel must flush the overlapping bits of cache on each CPU (and maybe
+invalidate them as well).
+
+In addition, the data DMA'd to RAM by a device may be overwritten by dirty
+cache lines being written back to RAM from a CPU's cache after the device has
+installed its own data, or cache lines simply present in a CPUs cache may
+simply obscure the fact that RAM has been updated, until at such time as the
+cacheline is discarded from the CPU's cache and reloaded.  To deal with this,
+the appropriate part of the kernel must invalidate the overlapping bits of the
+cache on each CPU.
+
+See Documentation/cachetlb.txt for more information on cache management.
+
+
+CACHE COHERENCY VS MMIO
+-----------------------
+
+Memory mapped I/O usually takes place through memory locations that are part of
+a window in the CPU's memory space that have different properties assigned than
+the usual RAM directed window.
+
+Amongst these properties is usually the fact that such accesses bypass the
+caching entirely and go directly to the device buses.  This means MMIO accesses
+may, in effect, overtake accesses to cached memory that were emitted earlier.
+A memory barrier isn't sufficient in such a case, but rather the cache must be
+flushed between the cached memory write and the MMIO access if the two are in
+any way dependent.
+
+
+=========================
+THE THINGS CPUS GET UP TO
+=========================
+
+A programmer might take it for granted that the CPU will perform memory
+operations in exactly the order specified, so that if a CPU is, for example,
+given the following piece of code to execute:
+
+	a = *A;
+	*B = b;
+	c = *C;
+	d = *D;
+	*E = e;
+
+They would then expect that the CPU will complete the memory operation for each
+instruction before moving on to the next one, leading to a definite sequence of
+operations as seen by external observers in the system:
+
+	LOAD *A, STORE *B, LOAD *C, LOAD *D, STORE *E.
+
+
+Reality is, of course, much messier.  With many CPUs and compilers, the above
+assumption doesn't hold because:
+
+ (*) loads are more likely to need to be completed immediately to permit
+     execution progress, whereas stores can often be deferred without a
+     problem;
+
+ (*) loads may be done speculatively, and the result discarded should it prove
+     to have been unnecessary;
+
+ (*) loads may be done speculatively, leading to the result having being
+     fetched at the wrong time in the expected sequence of events;
+
+ (*) the order of the memory accesses may be rearranged to promote better use
+     of the CPU buses and caches;
+
+ (*) loads and stores may be combined to improve performance when talking to
+     memory or I/O hardware that can do batched accesses of adjacent locations,
+     thus cutting down on transaction setup costs (memory and PCI devices may
+     both be able to do this); and
+
+ (*) the CPU's data cache may affect the ordering, and whilst cache-coherency
+     mechanisms may alleviate this - once the store has actually hit the cache
+     - there's no guarantee that the coherency management will be propagated in
+     order to other CPUs.
+
+So what another CPU, say, might actually observe from the above piece of code
+is:
+
+	LOAD *A, ..., LOAD {*C,*D}, STORE *E, STORE *B
+
+	(Where "LOAD {*C,*D}" is a combined load)
+
+
+However, it is guaranteed that a CPU will be self-consistent: it will see its
+_own_ accesses appear to be correctly ordered, without the need for a memory
+barrier.  For instance with the following code:
+
+	U = *A;
+	*A = V;
+	*A = W;
+	X = *A;
+	*A = Y;
+	Z = *A;
+
+and assuming no intervention by an external influence, it can be assumed that
+the final result will appear to be:
+
+	U == the original value of *A
+	X == W
+	Z == Y
+	*A == Y
+
+The code above may cause the CPU to generate the full sequence of memory
+accesses:
+
+	U=LOAD *A, STORE *A=V, STORE *A=W, X=LOAD *A, STORE *A=Y, Z=LOAD *A
+
+in that order, but, without intervention, the sequence may have almost any
+combination of elements combined or discarded, provided the program's view of
+the world remains consistent.
+
+The compiler may also combine, discard or defer elements of the sequence before
+the CPU even sees them.
+
+For instance:
+
+	*A = V;
+	*A = W;
+
+may be reduced to:
+
+	*A = W;
+
+since, without a write barrier, it can be assumed that the effect of the
+storage of V to *A is lost.  Similarly:
+
+	*A = Y;
+	Z = *A;
+
+may, without a memory barrier, be reduced to:
+
+	*A = Y;
+	Z = Y;
+
+and the LOAD operation never appear outside of the CPU.
+
+
+AND THEN THERE'S THE ALPHA
+--------------------------
+
+The DEC Alpha CPU is one of the most relaxed CPUs there is.  Not only that,
+some versions of the Alpha CPU have a split data cache, permitting them to have
+two semantically related cache lines updating at separate times.  This is where
+the data dependency barrier really becomes necessary as this synchronises both
+caches with the memory coherence system, thus making it seem like pointer
+changes vs new data occur in the right order.
+
+The Alpha defines the Linux's kernel's memory barrier model.
+
+See the subsection on "Cache Coherency" above.
+
+
+==========
+REFERENCES
+==========
+
+Alpha AXP Architecture Reference Manual, Second Edition (Sites & Witek,
+Digital Press)
+	Chapter 5.2: Physical Address Space Characteristics
+	Chapter 5.4: Caches and Write Buffers
+	Chapter 5.5: Data Sharing
+	Chapter 5.6: Read/Write Ordering
+
+AMD64 Architecture Programmer's Manual Volume 2: System Programming
+	Chapter 7.1: Memory-Access Ordering
+	Chapter 7.4: Buffering and Combining Memory Writes
+
+IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+System Programming Guide
+	Chapter 7.1: Locked Atomic Operations
+	Chapter 7.2: Memory Ordering
+	Chapter 7.4: Serializing Instructions
+
+The SPARC Architecture Manual, Version 9
+	Chapter 8: Memory Models
+	Appendix D: Formal Specification of the Memory Models
+	Appendix J: Programming with the Memory Models
+
+UltraSPARC Programmer Reference Manual
+	Chapter 5: Memory Accesses and Cacheability
+	Chapter 15: Sparc-V9 Memory Models
+
+UltraSPARC III Cu User's Manual
+	Chapter 9: Memory Models
+
+UltraSPARC IIIi Processor User's Manual
+	Chapter 8: Memory Models
+
+UltraSPARC Architecture 2005
+	Chapter 9: Memory
+	Appendix D: Formal Specifications of the Memory Models
+
+UltraSPARC T1 Supplement to the UltraSPARC Architecture 2005
+	Chapter 8: Memory Models
+	Appendix F: Caches and Cache Coherency
+
+Solaris Internals, Core Kernel Architecture, p63-68:
+	Chapter 3.3: Hardware Considerations for Locks and
+			Synchronization
+
+Unix Systems for Modern Architectures, Symmetric Multiprocessing and Caching
+for Kernel Programmers:
+	Chapter 13: Other Memory Models
+
+Intel Itanium Architecture Software Developer's Manual: Volume 1:
+	Section 2.6: Speculation
+	Section 4.4: Memory Access

+ 36 - 0
Documentation/networking/bcm43xx.txt

@@ -0,0 +1,36 @@
+
+			BCM43xx Linux Driver Project
+			============================
+
+About this software
+-------------------
+
+The goal of this project is to develop a linux driver for Broadcom
+BCM43xx chips, based on the specification at 
+http://bcm-specs.sipsolutions.net/
+
+The project page is http://bcm43xx.berlios.de/
+
+
+Requirements
+------------
+
+1)	Linux Kernel 2.6.16 or later
+	http://www.kernel.org/
+
+	You may want to configure your kernel with:
+
+	CONFIG_DEBUG_FS (optional):
+		-> Kernel hacking
+		  -> Debug Filesystem
+
+2)	SoftMAC IEEE 802.11 Networking Stack extension and patched ieee80211
+	modules:
+	http://softmac.sipsolutions.net/
+
+3)	Firmware Files
+
+	Please try fwcutter. Fwcutter can extract the firmware from various 
+	binary driver files. It supports driver files from Windows, MacOS and 
+	Linux. You can get fwcutter from http://bcm43xx.berlios.de/.
+	Also, fwcutter comes with a README file for further instructions.

+ 0 - 2
arch/alpha/kernel/alpha_ksyms.c

@@ -216,8 +216,6 @@ EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memchr);
 
-EXPORT_SYMBOL(get_wchan);
-
 #ifdef CONFIG_ALPHA_IRONGATE
 EXPORT_SYMBOL(irongate_ioremap);
 EXPORT_SYMBOL(irongate_iounmap);

+ 1 - 1
arch/alpha/kernel/core_marvel.c

@@ -435,7 +435,7 @@ marvel_specify_io7(char *str)
 		str = pchar;
 	} while(*str);
 
-	return 0;
+	return 1;
 }
 __setup("io7=", marvel_specify_io7);
 

+ 10 - 0
arch/arm/Kconfig

@@ -77,6 +77,14 @@ config FIQ
 config ARCH_MTD_XIP
 	bool
 
+config VECTORS_BASE
+	hex
+	default 0xffff0000 if MMU
+	default DRAM_BASE if REMAP_VECTORS_TO_RAM
+	default 0x00000000
+	help
+	  The base address of exception vectors.
+
 source "init/Kconfig"
 
 menu "System Type"
@@ -839,6 +847,8 @@ source "drivers/misc/Kconfig"
 
 source "drivers/mfd/Kconfig"
 
+source "drivers/leds/Kconfig"
+
 source "drivers/media/Kconfig"
 
 source "drivers/video/Kconfig"

+ 44 - 0
arch/arm/Kconfig-nommu

@@ -0,0 +1,44 @@
+#
+# Kconfig for uClinux(non-paged MM) depend configurations
+# Hyok S. Choi <hyok.choi@samsung.com>
+# 
+
+config SET_MEM_PARAM
+	bool "Set flash/sdram size and base addr"
+	help
+	 Say Y to manually set the base addresses and sizes.
+	 otherwise, the default values are assigned.
+
+config DRAM_BASE
+	hex '(S)DRAM Base Address' if SET_MEM_PARAM
+	default 0x00800000
+
+config DRAM_SIZE
+	hex '(S)DRAM SIZE' if SET_MEM_PARAM
+	default 0x00800000
+
+config FLASH_MEM_BASE
+	hex 'FLASH Base Address' if SET_MEM_PARAM
+	default 0x00400000
+
+config FLASH_SIZE
+	hex 'FLASH Size' if SET_MEM_PARAM
+	default 0x00400000
+
+config REMAP_VECTORS_TO_RAM
+	bool 'Install vectors to the begining of RAM' if DRAM_BASE
+	depends on DRAM_BASE
+	help
+	  The kernel needs to change the hardware exception vectors.
+	  In nommu mode, the hardware exception vectors are normally
+	  placed at address 0x00000000. However, this region may be
+	  occupied by read-only memory depending on H/W design.
+
+	  If the region contains read-write memory, say 'n' here.
+
+	  If your CPU provides a remap facility which allows the exception
+	  vectors to be mapped to writable memory, say 'n' here.
+
+	  Otherwise, say 'y' here.  In this case, the kernel will require
+	  external support to redirect the hardware exception vectors to
+	  the writable versions located at DRAM_BASE.

+ 7 - 2
arch/arm/Makefile

@@ -20,6 +20,11 @@ GZFLAGS		:=-9
 # Select a platform tht is kept up-to-date
 KBUILD_DEFCONFIG := versatile_defconfig
 
+# defines filename extension depending memory manement type.
+ifeq ($(CONFIG_MMU),)
+MMUEXT		:= -nommu
+endif
+
 ifeq ($(CONFIG_FRAME_POINTER),y)
 CFLAGS		+=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
 endif
@@ -73,7 +78,7 @@ AFLAGS		+=$(CFLAGS_ABI) $(arch-y) $(tune-y) -msoft-float
 CHECKFLAGS	+= -D__arm__
 
 #Default value
-head-y		:= arch/arm/kernel/head.o arch/arm/kernel/init_task.o
+head-y		:= arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o
 textofs-y	:= 0x00008000
 
  machine-$(CONFIG_ARCH_RPC)	   := rpc
@@ -133,7 +138,7 @@ else
 MACHINE  :=
 endif
   
-export	TEXT_OFFSET GZFLAGS
+export	TEXT_OFFSET GZFLAGS MMUEXT
 
 # Do we have FASTFPE?
 FASTFPE		:=arch/arm/fastfpe

+ 106 - 0
arch/arm/boot/compressed/head.S

@@ -2,6 +2,7 @@
  *  linux/arch/arm/boot/compressed/head.S
  *
  *  Copyright (C) 1996-2002 Russell King
+ *  Copyright (C) 2004 Hyok S. Choi (MPU support)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -320,6 +321,62 @@ params:		ldr	r0, =params_phys
 cache_on:	mov	r3, #8			@ cache_on function
 		b	call_cache_fn
 
+/*
+ * Initialize the highest priority protection region, PR7
+ * to cover all 32bit address and cacheable and bufferable.
+ */
+__armv4_mpu_cache_on:
+		mov	r0, #0x3f		@ 4G, the whole
+		mcr	p15, 0, r0, c6, c7, 0	@ PR7 Area Setting
+		mcr 	p15, 0, r0, c6, c7, 1
+
+		mov	r0, #0x80		@ PR7
+		mcr	p15, 0, r0, c2, c0, 0	@ D-cache on
+		mcr	p15, 0, r0, c2, c0, 1	@ I-cache on
+		mcr	p15, 0, r0, c3, c0, 0	@ write-buffer on
+
+		mov	r0, #0xc000
+		mcr	p15, 0, r0, c5, c0, 1	@ I-access permission
+		mcr	p15, 0, r0, c5, c0, 0	@ D-access permission
+
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
+		mcr	p15, 0, r0, c7, c5, 0	@ flush(inval) I-Cache
+		mcr	p15, 0, r0, c7, c6, 0	@ flush(inval) D-Cache
+		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
+						@ ...I .... ..D. WC.M
+		orr	r0, r0, #0x002d		@ .... .... ..1. 11.1
+		orr	r0, r0, #0x1000		@ ...1 .... .... ....
+
+		mcr	p15, 0, r0, c1, c0, 0	@ write control reg
+
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c5, 0	@ flush(inval) I-Cache
+		mcr	p15, 0, r0, c7, c6, 0	@ flush(inval) D-Cache
+		mov	pc, lr
+
+__armv3_mpu_cache_on:
+		mov	r0, #0x3f		@ 4G, the whole
+		mcr	p15, 0, r0, c6, c7, 0	@ PR7 Area Setting
+
+		mov	r0, #0x80		@ PR7
+		mcr	p15, 0, r0, c2, c0, 0	@ cache on
+		mcr	p15, 0, r0, c3, c0, 0	@ write-buffer on
+
+		mov	r0, #0xc000
+		mcr	p15, 0, r0, c5, c0, 0	@ access permission
+
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
+		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
+						@ .... .... .... WC.M
+		orr	r0, r0, #0x000d		@ .... .... .... 11.1
+		mov	r0, #0
+		mcr	p15, 0, r0, c1, c0, 0	@ write control reg
+
+		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
+		mov	pc, lr
+
 __setup_mmu:	sub	r3, r4, #16384		@ Page directory size
 		bic	r3, r3, #0xff		@ Align the pointer
 		bic	r3, r3, #0x3f00
@@ -496,6 +553,18 @@ proc_types:
 		b	__armv4_mmu_cache_off
 		mov	pc, lr
 
+		.word	0x41007400		@ ARM74x
+		.word	0xff00ff00
+		b	__armv3_mpu_cache_on
+		b	__armv3_mpu_cache_off
+		b	__armv3_mpu_cache_flush
+		
+		.word	0x41009400		@ ARM94x
+		.word	0xff00ff00
+		b	__armv4_mpu_cache_on
+		b	__armv4_mpu_cache_off
+		b	__armv4_mpu_cache_flush
+
 		.word	0x00007000		@ ARM7 IDs
 		.word	0x0000f000
 		mov	pc, lr
@@ -562,6 +631,24 @@ proc_types:
 cache_off:	mov	r3, #12			@ cache_off function
 		b	call_cache_fn
 
+__armv4_mpu_cache_off:
+		mrc	p15, 0, r0, c1, c0
+		bic	r0, r0, #0x000d
+		mcr	p15, 0, r0, c1, c0	@ turn MPU and cache off
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
+		mcr	p15, 0, r0, c7, c6, 0	@ flush D-Cache
+		mcr	p15, 0, r0, c7, c5, 0	@ flush I-Cache
+		mov	pc, lr
+
+__armv3_mpu_cache_off:
+		mrc	p15, 0, r0, c1, c0
+		bic	r0, r0, #0x000d
+		mcr	p15, 0, r0, c1, c0, 0	@ turn MPU and cache off
+		mov	r0, #0
+		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
+		mov	pc, lr
+
 __armv4_mmu_cache_off:
 		mrc	p15, 0, r0, c1, c0
 		bic	r0, r0, #0x000d
@@ -601,6 +688,24 @@ cache_clean_flush:
 		mov	r3, #16
 		b	call_cache_fn
 
+__armv4_mpu_cache_flush:
+		mov	r2, #1
+		mov	r3, #0
+		mcr	p15, 0, ip, c7, c6, 0	@ invalidate D cache
+		mov	r1, #7 << 5		@ 8 segments
+1:		orr	r3, r1, #63 << 26	@ 64 entries
+2:		mcr	p15, 0, r3, c7, c14, 2	@ clean & invalidate D index
+		subs	r3, r3, #1 << 26
+		bcs	2b			@ entries 63 to 0
+		subs 	r1, r1, #1 << 5
+		bcs	1b			@ segments 7 to 0
+
+		teq	r2, #0
+		mcrne	p15, 0, ip, c7, c5, 0	@ invalidate I cache
+		mcr	p15, 0, ip, c7, c10, 4	@ drain WB
+		mov	pc, lr
+		
+
 __armv6_mmu_cache_flush:
 		mov	r1, #0
 		mcr	p15, 0, r1, c7, c14, 0	@ clean+invalidate D
@@ -638,6 +743,7 @@ no_cache_id:
 		mov	pc, lr
 
 __armv3_mmu_cache_flush:
+__armv3_mpu_cache_flush:
 		mov	r1, #0
 		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
 		mov	pc, lr

+ 8 - 2
arch/arm/common/sharpsl_pm.c

@@ -22,6 +22,7 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/leds.h>
 
 #include <asm/hardware.h>
 #include <asm/mach-types.h>
@@ -75,6 +76,7 @@ static void sharpsl_battery_thread(void *private_);
 struct sharpsl_pm_status sharpsl_pm;
 DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
 DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
 
 
 static int get_percentage(int voltage)
@@ -190,10 +192,10 @@ void sharpsl_pm_led(int val)
 		dev_err(sharpsl_pm.dev, "Charging Error!\n");
 	} else if (val == SHARPSL_LED_ON) {
 		dev_dbg(sharpsl_pm.dev, "Charge LED On\n");
-
+		led_trigger_event(sharpsl_charge_led_trigger, LED_FULL);
 	} else {
 		dev_dbg(sharpsl_pm.dev, "Charge LED Off\n");
-
+		led_trigger_event(sharpsl_charge_led_trigger, LED_OFF);
 	}
 }
 
@@ -786,6 +788,8 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev)
 	init_timer(&sharpsl_pm.chrg_full_timer);
 	sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
 
+	led_trigger_register_simple("sharpsl-charge", &sharpsl_charge_led_trigger);
+
 	sharpsl_pm.machinfo->init();
 
 	device_create_file(&pdev->dev, &dev_attr_battery_percentage);
@@ -807,6 +811,8 @@ static int sharpsl_pm_remove(struct platform_device *pdev)
 	device_remove_file(&pdev->dev, &dev_attr_battery_percentage);
 	device_remove_file(&pdev->dev, &dev_attr_battery_voltage);
 
+	led_trigger_unregister_simple(sharpsl_charge_led_trigger);
+
 	sharpsl_pm.machinfo->exit();
 
 	del_timer_sync(&sharpsl_pm.chrg_full_timer);

+ 1 - 1
arch/arm/kernel/entry-armv.S

@@ -666,7 +666,7 @@ __kuser_helper_start:
  *
  * #define __kernel_dmb() \
  *         asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
- *	        : : : "lr","cc" )
+ *	        : : : "r0", "lr","cc" )
  */
 
 __kuser_memory_barrier:				@ 0xffff0fa0

+ 217 - 0
arch/arm/kernel/head-common.S

@@ -0,0 +1,217 @@
+/*
+ *  linux/arch/arm/kernel/head-common.S
+ *
+ *  Copyright (C) 1994-2002 Russell King
+ *  Copyright (c) 2003 ARM Limited
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+	.type	__switch_data, %object
+__switch_data:
+	.long	__mmap_switched
+	.long	__data_loc			@ r4
+	.long	__data_start			@ r5
+	.long	__bss_start			@ r6
+	.long	_end				@ r7
+	.long	processor_id			@ r4
+	.long	__machine_arch_type		@ r5
+	.long	cr_alignment			@ r6
+	.long	init_thread_union + THREAD_START_SP @ sp
+
+/*
+ * The following fragment of code is executed with the MMU on in MMU mode,
+ * and uses absolute addresses; this is not position independent.
+ *
+ *  r0  = cp#15 control register
+ *  r1  = machine ID
+ *  r9  = processor ID
+ */
+	.type	__mmap_switched, %function
+__mmap_switched:
+	adr	r3, __switch_data + 4
+
+	ldmia	r3!, {r4, r5, r6, r7}
+	cmp	r4, r5				@ Copy data segment if needed
+1:	cmpne	r5, r6
+	ldrne	fp, [r4], #4
+	strne	fp, [r5], #4
+	bne	1b
+
+	mov	fp, #0				@ Clear BSS (and zero fp)
+1:	cmp	r6, r7
+	strcc	fp, [r6],#4
+	bcc	1b
+
+	ldmia	r3, {r4, r5, r6, sp}
+	str	r9, [r4]			@ Save processor ID
+	str	r1, [r5]			@ Save machine type
+	bic	r4, r0, #CR_A			@ Clear 'A' bit
+	stmia	r6, {r0, r4}			@ Save control register values
+	b	start_kernel
+
+/*
+ * Exception handling.  Something went wrong and we can't proceed.  We
+ * ought to tell the user, but since we don't have any guarantee that
+ * we're even running on the right architecture, we do virtually nothing.
+ *
+ * If CONFIG_DEBUG_LL is set we try to print out something about the error
+ * and hope for the best (useful if bootloader fails to pass a proper
+ * machine ID for example).
+ */
+
+	.type	__error_p, %function
+__error_p:
+#ifdef CONFIG_DEBUG_LL
+	adr	r0, str_p1
+	bl	printascii
+	b	__error
+str_p1:	.asciz	"\nError: unrecognized/unsupported processor variant.\n"
+	.align
+#endif
+
+	.type	__error_a, %function
+__error_a:
+#ifdef CONFIG_DEBUG_LL
+	mov	r4, r1				@ preserve machine ID
+	adr	r0, str_a1
+	bl	printascii
+	mov	r0, r4
+	bl	printhex8
+	adr	r0, str_a2
+	bl	printascii
+	adr	r3, 3f
+	ldmia	r3, {r4, r5, r6}		@ get machine desc list
+	sub	r4, r3, r4			@ get offset between virt&phys
+	add	r5, r5, r4			@ convert virt addresses to
+	add	r6, r6, r4			@ physical address space
+1:	ldr	r0, [r5, #MACHINFO_TYPE]	@ get machine type
+	bl	printhex8
+	mov	r0, #'\t'
+	bl	printch
+	ldr     r0, [r5, #MACHINFO_NAME]	@ get machine name
+	add	r0, r0, r4
+	bl	printascii
+	mov	r0, #'\n'
+	bl	printch
+	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
+	cmp	r5, r6
+	blo	1b
+	adr	r0, str_a3
+	bl	printascii
+	b	__error
+str_a1:	.asciz	"\nError: unrecognized/unsupported machine ID (r1 = 0x"
+str_a2:	.asciz	").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
+str_a3:	.asciz	"\nPlease check your kernel config and/or bootloader.\n"
+	.align
+#endif
+
+	.type	__error, %function
+__error:
+#ifdef CONFIG_ARCH_RPC
+/*
+ * Turn the screen red on a error - RiscPC only.
+ */
+	mov	r0, #0x02000000
+	mov	r3, #0x11
+	orr	r3, r3, r3, lsl #8
+	orr	r3, r3, r3, lsl #16
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+#endif
+1:	mov	r0, r0
+	b	1b
+
+
+/*
+ * Read processor ID register (CP#15, CR0), and look up in the linker-built
+ * supported processor list.  Note that we can't use the absolute addresses
+ * for the __proc_info lists since we aren't running with the MMU on
+ * (and therefore, we are not in the correct address space).  We have to
+ * calculate the offset.
+ *
+ *	r9 = cpuid
+ * Returns:
+ *	r3, r4, r6 corrupted
+ *	r5 = proc_info pointer in physical address space
+ *	r9 = cpuid (preserved)
+ */
+	.type	__lookup_processor_type, %function
+__lookup_processor_type:
+	adr	r3, 3f
+	ldmda	r3, {r5 - r7}
+	sub	r3, r3, r7			@ get offset between virt&phys
+	add	r5, r5, r3			@ convert virt addresses to
+	add	r6, r6, r3			@ physical address space
+1:	ldmia	r5, {r3, r4}			@ value, mask
+	and	r4, r4, r9			@ mask wanted bits
+	teq	r3, r4
+	beq	2f
+	add	r5, r5, #PROC_INFO_SZ		@ sizeof(proc_info_list)
+	cmp	r5, r6
+	blo	1b
+	mov	r5, #0				@ unknown processor
+2:	mov	pc, lr
+
+/*
+ * This provides a C-API version of the above function.
+ */
+ENTRY(lookup_processor_type)
+	stmfd	sp!, {r4 - r7, r9, lr}
+	mov	r9, r0
+	bl	__lookup_processor_type
+	mov	r0, r5
+	ldmfd	sp!, {r4 - r7, r9, pc}
+
+/*
+ * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
+ * more information about the __proc_info and __arch_info structures.
+ */
+	.long	__proc_info_begin
+	.long	__proc_info_end
+3:	.long	.
+	.long	__arch_info_begin
+	.long	__arch_info_end
+
+/*
+ * Lookup machine architecture in the linker-build list of architectures.
+ * Note that we can't use the absolute addresses for the __arch_info
+ * lists since we aren't running with the MMU on (and therefore, we are
+ * not in the correct address space).  We have to calculate the offset.
+ *
+ *  r1 = machine architecture number
+ * Returns:
+ *  r3, r4, r6 corrupted
+ *  r5 = mach_info pointer in physical address space
+ */
+	.type	__lookup_machine_type, %function
+__lookup_machine_type:
+	adr	r3, 3b
+	ldmia	r3, {r4, r5, r6}
+	sub	r3, r3, r4			@ get offset between virt&phys
+	add	r5, r5, r3			@ convert virt addresses to
+	add	r6, r6, r3			@ physical address space
+1:	ldr	r3, [r5, #MACHINFO_TYPE]	@ get machine type
+	teq	r3, r1				@ matches loader number?
+	beq	2f				@ found
+	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
+	cmp	r5, r6
+	blo	1b
+	mov	r5, #0				@ unknown machine
+2:	mov	pc, lr
+
+/*
+ * This provides a C-API version of the above function.
+ */
+ENTRY(lookup_machine_type)
+	stmfd	sp!, {r4 - r6, lr}
+	mov	r1, r0
+	bl	__lookup_machine_type
+	mov	r0, r5
+	ldmfd	sp!, {r4 - r6, pc}

+ 83 - 0
arch/arm/kernel/head-nommu.S

@@ -0,0 +1,83 @@
+/*
+ *  linux/arch/arm/kernel/head-nommu.S
+ *
+ *  Copyright (C) 1994-2002 Russell King
+ *  Copyright (C) 2003-2006 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Common kernel startup code (non-paged MM)
+ *    for 32-bit CPUs which has a process ID register(CP15).
+ *
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/mach-types.h>
+#include <asm/procinfo.h>
+#include <asm/ptrace.h>
+#include <asm/constants.h>
+#include <asm/system.h>
+
+#define PROCINFO_INITFUNC       12
+
+/*
+ * Kernel startup entry point.
+ * ---------------------------
+ *
+ * This is normally called from the decompressor code.  The requirements
+ * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
+ * r1 = machine nr.
+ *
+ * See linux/arch/arm/tools/mach-types for the complete list of machine
+ * numbers for r1.
+ *
+ */
+	__INIT
+	.type	stext, %function
+ENTRY(stext)
+	msr	cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
+						@ and irqs disabled
+	mrc	p15, 0, r9, c0, c0		@ get processor id
+	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
+	movs	r10, r5				@ invalid processor (r5=0)?
+	beq	__error_p				@ yes, error 'p'
+	bl	__lookup_machine_type		@ r5=machinfo
+	movs	r8, r5				@ invalid machine (r5=0)?
+	beq	__error_a			@ yes, error 'a'
+
+	ldr	r13, __switch_data		@ address to jump to after
+						@ the initialization is done
+	adr	lr, __after_proc_init		@ return (PIC) address
+	add	pc, r10, #PROCINFO_INITFUNC
+
+/*
+ * Set the Control Register and Read the process ID.
+ */
+	.type	__after_proc_init, %function
+__after_proc_init:
+	mrc	p15, 0, r0, c1, c0, 0		@ read control reg
+#ifdef CONFIG_ALIGNMENT_TRAP
+	orr	r0, r0, #CR_A
+#else
+	bic	r0, r0, #CR_A
+#endif
+#ifdef CONFIG_CPU_DCACHE_DISABLE
+	bic	r0, r0, #CR_C
+#endif
+#ifdef CONFIG_CPU_BPREDICT_DISABLE
+	bic	r0, r0, #CR_Z
+#endif
+#ifdef CONFIG_CPU_ICACHE_DISABLE
+	bic	r0, r0, #CR_I
+#endif
+	mcr	p15, 0, r0, c1, c0, 0		@ write control reg
+
+	mov	pc, r13				@ clear the BSS and jump
+						@ to start_kernel
+
+#include "head-common.S"

+ 1 - 206
arch/arm/kernel/head.S

@@ -102,49 +102,6 @@ ENTRY(stext)
 	adr	lr, __enable_mmu		@ return (PIC) address
 	add	pc, r10, #PROCINFO_INITFUNC
 
-	.type	__switch_data, %object
-__switch_data:
-	.long	__mmap_switched
-	.long	__data_loc			@ r4
-	.long	__data_start			@ r5
-	.long	__bss_start			@ r6
-	.long	_end				@ r7
-	.long	processor_id			@ r4
-	.long	__machine_arch_type		@ r5
-	.long	cr_alignment			@ r6
-	.long	init_thread_union + THREAD_START_SP @ sp
-
-/*
- * The following fragment of code is executed with the MMU on, and uses
- * absolute addresses; this is not position independent.
- *
- *  r0  = cp#15 control register
- *  r1  = machine ID
- *  r9  = processor ID
- */
-	.type	__mmap_switched, %function
-__mmap_switched:
-	adr	r3, __switch_data + 4
-
-	ldmia	r3!, {r4, r5, r6, r7}
-	cmp	r4, r5				@ Copy data segment if needed
-1:	cmpne	r5, r6
-	ldrne	fp, [r4], #4
-	strne	fp, [r5], #4
-	bne	1b
-
-	mov	fp, #0				@ Clear BSS (and zero fp)
-1:	cmp	r6, r7
-	strcc	fp, [r6],#4
-	bcc	1b
-
-	ldmia	r3, {r4, r5, r6, sp}
-	str	r9, [r4]			@ Save processor ID
-	str	r1, [r5]			@ Save machine type
-	bic	r4, r0, #CR_A			@ Clear 'A' bit
-	stmia	r6, {r0, r4}			@ Save control register values
-	b	start_kernel
-
 #if defined(CONFIG_SMP)
 	.type   secondary_startup, #function
 ENTRY(secondary_startup)
@@ -367,166 +324,4 @@ __create_page_tables:
 	mov	pc, lr
 	.ltorg
 
-
-
-/*
- * Exception handling.  Something went wrong and we can't proceed.  We
- * ought to tell the user, but since we don't have any guarantee that
- * we're even running on the right architecture, we do virtually nothing.
- *
- * If CONFIG_DEBUG_LL is set we try to print out something about the error
- * and hope for the best (useful if bootloader fails to pass a proper
- * machine ID for example).
- */
-
-	.type	__error_p, %function
-__error_p:
-#ifdef CONFIG_DEBUG_LL
-	adr	r0, str_p1
-	bl	printascii
-	b	__error
-str_p1:	.asciz	"\nError: unrecognized/unsupported processor variant.\n"
-	.align
-#endif
-
-	.type	__error_a, %function
-__error_a:
-#ifdef CONFIG_DEBUG_LL
-	mov	r4, r1				@ preserve machine ID
-	adr	r0, str_a1
-	bl	printascii
-	mov	r0, r4
-	bl	printhex8
-	adr	r0, str_a2
-	bl	printascii
-	adr	r3, 3f
-	ldmia	r3, {r4, r5, r6}		@ get machine desc list
-	sub	r4, r3, r4			@ get offset between virt&phys
-	add	r5, r5, r4			@ convert virt addresses to
-	add	r6, r6, r4			@ physical address space
-1:	ldr	r0, [r5, #MACHINFO_TYPE]	@ get machine type
-	bl	printhex8
-	mov	r0, #'\t'
-	bl	printch
-	ldr     r0, [r5, #MACHINFO_NAME]	@ get machine name
-	add	r0, r0, r4
-	bl	printascii
-	mov	r0, #'\n'
-	bl	printch
-	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
-	cmp	r5, r6
-	blo	1b
-	adr	r0, str_a3
-	bl	printascii
-	b	__error
-str_a1:	.asciz	"\nError: unrecognized/unsupported machine ID (r1 = 0x"
-str_a2:	.asciz	").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
-str_a3:	.asciz	"\nPlease check your kernel config and/or bootloader.\n"
-	.align
-#endif
-
-	.type	__error, %function
-__error:
-#ifdef CONFIG_ARCH_RPC
-/*
- * Turn the screen red on a error - RiscPC only.
- */
-	mov	r0, #0x02000000
-	mov	r3, #0x11
-	orr	r3, r3, r3, lsl #8
-	orr	r3, r3, r3, lsl #16
-	str	r3, [r0], #4
-	str	r3, [r0], #4
-	str	r3, [r0], #4
-	str	r3, [r0], #4
-#endif
-1:	mov	r0, r0
-	b	1b
-
-
-/*
- * Read processor ID register (CP#15, CR0), and look up in the linker-built
- * supported processor list.  Note that we can't use the absolute addresses
- * for the __proc_info lists since we aren't running with the MMU on
- * (and therefore, we are not in the correct address space).  We have to
- * calculate the offset.
- *
- *	r9 = cpuid
- * Returns:
- *	r3, r4, r6 corrupted
- *	r5 = proc_info pointer in physical address space
- *	r9 = cpuid (preserved)
- */
-	.type	__lookup_processor_type, %function
-__lookup_processor_type:
-	adr	r3, 3f
-	ldmda	r3, {r5 - r7}
-	sub	r3, r3, r7			@ get offset between virt&phys
-	add	r5, r5, r3			@ convert virt addresses to
-	add	r6, r6, r3			@ physical address space
-1:	ldmia	r5, {r3, r4}			@ value, mask
-	and	r4, r4, r9			@ mask wanted bits
-	teq	r3, r4
-	beq	2f
-	add	r5, r5, #PROC_INFO_SZ		@ sizeof(proc_info_list)
-	cmp	r5, r6
-	blo	1b
-	mov	r5, #0				@ unknown processor
-2:	mov	pc, lr
-
-/*
- * This provides a C-API version of the above function.
- */
-ENTRY(lookup_processor_type)
-	stmfd	sp!, {r4 - r7, r9, lr}
-	mov	r9, r0
-	bl	__lookup_processor_type
-	mov	r0, r5
-	ldmfd	sp!, {r4 - r7, r9, pc}
-
-/*
- * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
- * more information about the __proc_info and __arch_info structures.
- */
-	.long	__proc_info_begin
-	.long	__proc_info_end
-3:	.long	.
-	.long	__arch_info_begin
-	.long	__arch_info_end
-
-/*
- * Lookup machine architecture in the linker-build list of architectures.
- * Note that we can't use the absolute addresses for the __arch_info
- * lists since we aren't running with the MMU on (and therefore, we are
- * not in the correct address space).  We have to calculate the offset.
- *
- *  r1 = machine architecture number
- * Returns:
- *  r3, r4, r6 corrupted
- *  r5 = mach_info pointer in physical address space
- */
-	.type	__lookup_machine_type, %function
-__lookup_machine_type:
-	adr	r3, 3b
-	ldmia	r3, {r4, r5, r6}
-	sub	r3, r3, r4			@ get offset between virt&phys
-	add	r5, r5, r3			@ convert virt addresses to
-	add	r6, r6, r3			@ physical address space
-1:	ldr	r3, [r5, #MACHINFO_TYPE]	@ get machine type
-	teq	r3, r1				@ matches loader number?
-	beq	2f				@ found
-	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
-	cmp	r5, r6
-	blo	1b
-	mov	r5, #0				@ unknown machine
-2:	mov	pc, lr
-
-/*
- * This provides a C-API version of the above function.
- */
-ENTRY(lookup_machine_type)
-	stmfd	sp!, {r4 - r6, lr}
-	mov	r1, r0
-	bl	__lookup_machine_type
-	mov	r0, r5
-	ldmfd	sp!, {r4 - r6, pc}
+#include "head-common.S"

+ 0 - 1
arch/arm/kernel/process.c

@@ -474,4 +474,3 @@ unsigned long get_wchan(struct task_struct *p)
 	} while (count ++ < 16);
 	return 0;
 }
-EXPORT_SYMBOL(get_wchan);

+ 1 - 1
arch/arm/kernel/signal.h

@@ -7,6 +7,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#define KERN_SIGRETURN_CODE	0xffff0500
+#define KERN_SIGRETURN_CODE	(CONFIG_VECTORS_BASE + 0x00000500)
 
 extern const unsigned long sigreturn_codes[7];

+ 5 - 4
arch/arm/kernel/traps.c

@@ -688,6 +688,7 @@ EXPORT_SYMBOL(abort);
 
 void __init trap_init(void)
 {
+	unsigned long vectors = CONFIG_VECTORS_BASE;
 	extern char __stubs_start[], __stubs_end[];
 	extern char __vectors_start[], __vectors_end[];
 	extern char __kuser_helper_start[], __kuser_helper_end[];
@@ -698,9 +699,9 @@ void __init trap_init(void)
 	 * into the vector page, mapped at 0xffff0000, and ensure these
 	 * are visible to the instruction stream.
 	 */
-	memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start);
-	memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start);
-	memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+	memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
+	memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
+	memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
 
 	/*
 	 * Copy signal return handlers into the vector page, and
@@ -709,6 +710,6 @@ void __init trap_init(void)
 	memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
 	       sizeof(sigreturn_codes));
 
-	flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
+	flush_icache_range(vectors, vectors + PAGE_SIZE);
 	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 }

+ 11 - 0
arch/arm/mach-pxa/corgi.c

@@ -141,6 +141,8 @@ struct corgissp_machinfo corgi_ssp_machinfo = {
  */
 static struct corgibl_machinfo corgi_bl_machinfo = {
 	.max_intensity = 0x2f,
+	.default_intensity = 0x1f,
+	.limit_mask = 0x0b,
 	.set_bl_intensity = corgi_bl_set_intensity,
 };
 
@@ -163,6 +165,14 @@ static struct platform_device corgikbd_device = {
 };
 
 
+/*
+ * Corgi LEDs
+ */
+static struct platform_device corgiled_device = {
+	.name		= "corgi-led",
+	.id		= -1,
+};
+
 /*
  * Corgi Touch Screen Device
  */
@@ -297,6 +307,7 @@ static struct platform_device *devices[] __initdata = {
 	&corgikbd_device,
 	&corgibl_device,
 	&corgits_device,
+	&corgiled_device,
 };
 
 static void __init corgi_init(void)

+ 11 - 0
arch/arm/mach-pxa/spitz.c

@@ -220,6 +220,8 @@ struct corgissp_machinfo spitz_ssp_machinfo = {
  * Spitz Backlight Device
  */
 static struct corgibl_machinfo spitz_bl_machinfo = {
+	.default_intensity = 0x1f,
+	.limit_mask = 0x0b,
 	.max_intensity = 0x2f,
 };
 
@@ -241,6 +243,14 @@ static struct platform_device spitzkbd_device = {
 };
 
 
+/*
+ * Spitz LEDs
+ */
+static struct platform_device spitzled_device = {
+	.name		= "spitz-led",
+	.id		= -1,
+};
+
 /*
  * Spitz Touch Screen Device
  */
@@ -418,6 +428,7 @@ static struct platform_device *devices[] __initdata = {
 	&spitzkbd_device,
 	&spitzts_device,
 	&spitzbl_device,
+	&spitzled_device,
 };
 
 static void __init common_init(void)

+ 9 - 0
arch/arm/mach-pxa/tosa.c

@@ -251,10 +251,19 @@ static struct platform_device tosakbd_device = {
 	.id		= -1,
 };
 
+/*
+ * Tosa LEDs
+ */
+static struct platform_device tosaled_device = {
+    .name   = "tosa-led",
+    .id     = -1,
+};
+
 static struct platform_device *devices[] __initdata = {
 	&tosascoop_device,
 	&tosascoop_jc_device,
 	&tosakbd_device,
+	&tosaled_device,
 };
 
 static void __init tosa_init(void)

+ 1 - 0
arch/arm/mm/proc-xsc3.S

@@ -30,6 +30,7 @@
 #include <asm/procinfo.h>
 #include <asm/hardware.h>
 #include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
 #include "proc-macros.S"

+ 0 - 2
arch/arm26/kernel/armksyms.c

@@ -212,8 +212,6 @@ EXPORT_SYMBOL(sys_open);
 EXPORT_SYMBOL(sys_exit);
 EXPORT_SYMBOL(sys_wait4);
 
-EXPORT_SYMBOL(get_wchan);
-
 #ifdef CONFIG_PREEMPT
 EXPORT_SYMBOL(kernel_flag);
 #endif

+ 0 - 2
arch/frv/kernel/frv_ksyms.c

@@ -79,8 +79,6 @@ EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(__outsl_ns);
 EXPORT_SYMBOL(__insl_ns);
 
-EXPORT_SYMBOL(get_wchan);
-
 #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
 EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
 EXPORT_SYMBOL(atomic_test_and_OR_mask);

+ 0 - 2
arch/h8300/kernel/h8300_ksyms.c

@@ -55,8 +55,6 @@ EXPORT_SYMBOL(memcmp);
 EXPORT_SYMBOL(memscan);
 EXPORT_SYMBOL(memmove);
 
-EXPORT_SYMBOL(get_wchan);
-
 /*
  * libgcc functions - functions that are used internally by the
  * compiler...  (prototypes are not correct though, but that

+ 21 - 1
arch/i386/kernel/apic.c

@@ -415,6 +415,7 @@ void __init init_bsp_APIC(void)
 void __devinit setup_local_APIC(void)
 {
 	unsigned long oldvalue, value, ver, maxlvt;
+	int i, j;
 
 	/* Pound the ESR really hard over the head with a big hammer - mbligh */
 	if (esr_disable) {
@@ -451,6 +452,25 @@ void __devinit setup_local_APIC(void)
 	value &= ~APIC_TPRI_MASK;
 	apic_write_around(APIC_TASKPRI, value);
 
+	/*
+	 * After a crash, we no longer service the interrupts and a pending
+	 * interrupt from previous kernel might still have ISR bit set.
+	 *
+	 * Most probably by now CPU has serviced that pending interrupt and
+	 * it might not have done the ack_APIC_irq() because it thought,
+	 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
+	 * does not clear the ISR bit and cpu thinks it has already serivced
+	 * the interrupt. Hence a vector might get locked. It was noticed
+	 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
+	 */
+	for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+		value = apic_read(APIC_ISR + i*0x10);
+		for (j = 31; j >= 0; j--) {
+			if (value & (1<<j))
+				ack_APIC_irq();
+		}
+	}
+
 	/*
 	 * Now that we are all set up, enable the APIC
 	 */
@@ -732,7 +752,7 @@ static int __init apic_set_verbosity(char *str)
 		printk(KERN_WARNING "APIC Verbosity level %s not recognised"
 				" use apic=verbose or apic=debug\n", str);
 
-	return 0;
+	return 1;
 }
 
 __setup("apic=", apic_set_verbosity);

+ 2 - 2
arch/i386/kernel/cpu/mcheck/mce.c

@@ -64,13 +64,13 @@ void mcheck_init(struct cpuinfo_x86 *c)
 static int __init mcheck_disable(char *str)
 {
 	mce_disabled = 1;
-	return 0;
+	return 1;
 }
 
 static int __init mcheck_enable(char *str)
 {
 	mce_disabled = -1;
-	return 0;
+	return 1;
 }
 
 __setup("nomce", mcheck_disable);

+ 1 - 1
arch/i386/kernel/io_apic.c

@@ -644,7 +644,7 @@ failed:
 int __init irqbalance_disable(char *str)
 {
 	irqbalance_disabled = 1;
-	return 0;
+	return 1;
 }
 
 __setup("noirqbalance", irqbalance_disable);

+ 0 - 1
arch/i386/kernel/process.c

@@ -781,7 +781,6 @@ unsigned long get_wchan(struct task_struct *p)
 	} while (count++ < 16);
 	return 0;
 }
-EXPORT_SYMBOL(get_wchan);
 
 /*
  * sys_alloc_thread_area: get a yet unused TLS descriptor index.

+ 2 - 0
arch/i386/kernel/syscall_table.S

@@ -312,3 +312,5 @@ ENTRY(sys_call_table)
 	.long sys_unshare		/* 310 */
 	.long sys_set_robust_list
 	.long sys_get_robust_list
+	.long sys_splice
+	.long sys_sync_file_range

+ 1 - 1
arch/i386/kernel/traps.c

@@ -1193,6 +1193,6 @@ void __init trap_init(void)
 static int __init kstack_setup(char *s)
 {
 	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-	return 0;
+	return 1;
 }
 __setup("kstack=", kstack_setup);

+ 1 - 1
arch/i386/kernel/vsyscall-sigreturn.S

@@ -44,7 +44,7 @@ __kernel_rt_sigreturn:
 .LSTARTCIEDLSI1:
 	.long 0			/* CIE ID */
 	.byte 1			/* Version number */
-	.string "zR"		/* NUL-terminated augmentation string */
+	.string "zRS"		/* NUL-terminated augmentation string */
 	.uleb128 1		/* Code alignment factor */
 	.sleb128 -4		/* Data alignment factor */
 	.byte 8			/* Return address register column */

+ 1 - 0
arch/ia64/kernel/entry.S

@@ -1605,5 +1605,6 @@ sys_call_table:
 	data8 sys_ni_syscall			// reserved for pselect
 	data8 sys_ni_syscall			// 1295 reserved for ppoll
 	data8 sys_unshare
+	data8 sys_splice
 
 	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls

+ 1 - 0
arch/ia64/kernel/gate.lds.S

@@ -59,6 +59,7 @@ SECTIONS
 	*(.dynbss)
 	*(.bss .bss.* .gnu.linkonce.b.*)
 	*(__ex_table)
+	*(__mca_table)
   }
 }
 

+ 162 - 103
arch/ia64/kernel/iosapic.c

@@ -9,54 +9,65 @@
  * Copyright (C) 1999 VA Linux Systems
  * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
  *
- * 00/04/19	D. Mosberger	Rewritten to mirror more closely the x86 I/O APIC code.
- *				In particular, we now have separate handlers for edge
- *				and level triggered interrupts.
- * 00/10/27	Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector allocation
- *				PCI to vector mapping, shared PCI interrupts.
- * 00/10/27	D. Mosberger	Document things a bit more to make them more understandable.
- *				Clean up much of the old IOSAPIC cruft.
- * 01/07/27	J.I. Lee	PCI irq routing, Platform/Legacy interrupts and fixes for
- *				ACPI S5(SoftOff) support.
+ * 00/04/19	D. Mosberger	Rewritten to mirror more closely the x86 I/O
+ *				APIC code.  In particular, we now have separate
+ *				handlers for edge and level triggered
+ *				interrupts.
+ * 00/10/27	Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector
+ *				allocation PCI to vector mapping, shared PCI
+ *				interrupts.
+ * 00/10/27	D. Mosberger	Document things a bit more to make them more
+ *				understandable.  Clean up much of the old
+ *				IOSAPIC cruft.
+ * 01/07/27	J.I. Lee	PCI irq routing, Platform/Legacy interrupts
+ *				and fixes for ACPI S5(SoftOff) support.
  * 02/01/23	J.I. Lee	iosapic pgm fixes for PCI irq routing from _PRT
- * 02/01/07     E. Focht        <efocht@ess.nec.de> Redirectable interrupt vectors in
- *                              iosapic_set_affinity(), initializations for
- *                              /proc/irq/#/smp_affinity
+ * 02/01/07     E. Focht        <efocht@ess.nec.de> Redirectable interrupt
+ *				vectors in iosapic_set_affinity(),
+ *				initializations for /proc/irq/#/smp_affinity
  * 02/04/02	P. Diefenbaugh	Cleaned up ACPI PCI IRQ routing.
  * 02/04/18	J.I. Lee	bug fix in iosapic_init_pci_irq
- * 02/04/30	J.I. Lee	bug fix in find_iosapic to fix ACPI PCI IRQ to IOSAPIC mapping
- *				error
+ * 02/04/30	J.I. Lee	bug fix in find_iosapic to fix ACPI PCI IRQ to
+ *				IOSAPIC mapping error
  * 02/07/29	T. Kochi	Allocate interrupt vectors dynamically
- * 02/08/04	T. Kochi	Cleaned up terminology (irq, global system interrupt, vector, etc.)
- * 02/09/20	D. Mosberger	Simplified by taking advantage of ACPI's pci_irq code.
+ * 02/08/04	T. Kochi	Cleaned up terminology (irq, global system
+ *				interrupt, vector, etc.)
+ * 02/09/20	D. Mosberger	Simplified by taking advantage of ACPI's
+ *				pci_irq code.
  * 03/02/19	B. Helgaas	Make pcat_compat system-wide, not per-IOSAPIC.
- *				Remove iosapic_address & gsi_base from external interfaces.
- *				Rationalize __init/__devinit attributes.
+ *				Remove iosapic_address & gsi_base from
+ *				external interfaces.  Rationalize
+ *				__init/__devinit attributes.
  * 04/12/04 Ashok Raj	<ashok.raj@intel.com> Intel Corporation 2004
- *				Updated to work with irq migration necessary for CPU Hotplug
+ *				Updated to work with irq migration necessary
+ *				for CPU Hotplug
  */
 /*
- * Here is what the interrupt logic between a PCI device and the kernel looks like:
+ * Here is what the interrupt logic between a PCI device and the kernel looks
+ * like:
  *
- * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, INTD).  The
- *     device is uniquely identified by its bus--, and slot-number (the function
- *     number does not matter here because all functions share the same interrupt
- *     lines).
+ * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
+ *     INTD).  The device is uniquely identified by its bus-, and slot-number
+ *     (the function number does not matter here because all functions share
+ *     the same interrupt lines).
  *
- * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC controller.
- *     Multiple interrupt lines may have to share the same IOSAPIC pin (if they're level
- *     triggered and use the same polarity).  Each interrupt line has a unique Global
- *     System Interrupt (GSI) number which can be calculated as the sum of the controller's
- *     base GSI number and the IOSAPIC pin number to which the line connects.
+ * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
+ *     controller.  Multiple interrupt lines may have to share the same
+ *     IOSAPIC pin (if they're level triggered and use the same polarity).
+ *     Each interrupt line has a unique Global System Interrupt (GSI) number
+ *     which can be calculated as the sum of the controller's base GSI number
+ *     and the IOSAPIC pin number to which the line connects.
  *
- * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the IOSAPIC pin
- *     into the IA-64 interrupt vector.  This interrupt vector is then sent to the CPU.
+ * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
+ * IOSAPIC pin into the IA-64 interrupt vector.  This interrupt vector is then
+ * sent to the CPU.
  *
- * (4) The kernel recognizes an interrupt as an IRQ.  The IRQ interface is used as
- *     architecture-independent interrupt handling mechanism in Linux.  As an
- *     IRQ is a number, we have to have IA-64 interrupt vector number <-> IRQ number
- *     mapping.  On smaller systems, we use one-to-one mapping between IA-64 vector and
- *     IRQ.  A platform can implement platform_irq_to_vector(irq) and
+ * (4) The kernel recognizes an interrupt as an IRQ.  The IRQ interface is
+ *     used as architecture-independent interrupt handling mechanism in Linux.
+ *     As an IRQ is a number, we have to have
+ *     IA-64 interrupt vector number <-> IRQ number mapping.  On smaller
+ *     systems, we use one-to-one mapping between IA-64 vector and IRQ.  A
+ *     platform can implement platform_irq_to_vector(irq) and
  *     platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
  *     Please see also include/asm-ia64/hw_irq.h for those APIs.
  *
@@ -64,9 +75,9 @@
  *
  *	PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
  *
- * Note: The term "IRQ" is loosely used everywhere in Linux kernel to describe interrupts.
- * Now we use "IRQ" only for Linux IRQ's.  ISA IRQ (isa_irq) is the only exception in this
- * source code.
+ * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
+ * describeinterrupts.  Now we use "IRQ" only for Linux IRQ's.  ISA IRQ
+ * (isa_irq) is the only exception in this source code.
  */
 #include <linux/config.h>
 
@@ -90,7 +101,6 @@
 #include <asm/ptrace.h>
 #include <asm/system.h>
 
-
 #undef DEBUG_INTERRUPT_ROUTING
 
 #ifdef DEBUG_INTERRUPT_ROUTING
@@ -99,36 +109,46 @@
 #define DBG(fmt...)
 #endif
 
-#define NR_PREALLOCATE_RTE_ENTRIES	(PAGE_SIZE / sizeof(struct iosapic_rte_info))
+#define NR_PREALLOCATE_RTE_ENTRIES \
+	(PAGE_SIZE / sizeof(struct iosapic_rte_info))
 #define RTE_PREALLOCATED	(1)
 
 static DEFINE_SPINLOCK(iosapic_lock);
 
-/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */
+/*
+ * These tables map IA-64 vectors to the IOSAPIC pin that generates this
+ * vector.
+ */
 
 struct iosapic_rte_info {
-	struct list_head rte_list;	/* node in list of RTEs sharing the same vector */
+	struct list_head rte_list;	/* node in list of RTEs sharing the
+					 * same vector */
 	char __iomem	*addr;		/* base address of IOSAPIC */
-	unsigned int	gsi_base;	/* first GSI assigned to this IOSAPIC */
+	unsigned int	gsi_base;	/* first GSI assigned to this
+					 * IOSAPIC */
 	char		rte_index;	/* IOSAPIC RTE index */
 	int		refcnt;		/* reference counter */
 	unsigned int	flags;		/* flags */
 } ____cacheline_aligned;
 
 static struct iosapic_intr_info {
-	struct list_head rtes;		/* RTEs using this vector (empty => not an IOSAPIC interrupt) */
+	struct list_head rtes;		/* RTEs using this vector (empty =>
+					 * not an IOSAPIC interrupt) */
 	int		count;		/* # of RTEs that shares this vector */
-	u32		low32;		/* current value of low word of Redirection table entry */
+	u32		low32;		/* current value of low word of
+					 * Redirection table entry */
 	unsigned int	dest;		/* destination CPU physical ID */
 	unsigned char	dmode	: 3;	/* delivery mode (see iosapic.h) */
-	unsigned char 	polarity: 1;	/* interrupt polarity (see iosapic.h) */
+	unsigned char 	polarity: 1;	/* interrupt polarity
+					 * (see iosapic.h) */
 	unsigned char	trigger	: 1;	/* trigger mode (see iosapic.h) */
 } iosapic_intr_info[IA64_NUM_VECTORS];
 
 static struct iosapic {
 	char __iomem	*addr;		/* base address of IOSAPIC */
-	unsigned int 	gsi_base;	/* first GSI assigned to this IOSAPIC */
-	unsigned short 	num_rte;	/* number of RTE in this IOSAPIC */
+	unsigned int 	gsi_base;	/* first GSI assigned to this
+					 * IOSAPIC */
+	unsigned short 	num_rte;	/* # of RTEs on this IOSAPIC */
 	int		rtes_inuse;	/* # of RTEs in use on this IOSAPIC */
 #ifdef CONFIG_NUMA
 	unsigned short	node;		/* numa node association via pxm */
@@ -149,7 +169,8 @@ find_iosapic (unsigned int gsi)
 	int i;
 
 	for (i = 0; i < NR_IOSAPICS; i++) {
-		if ((unsigned) (gsi - iosapic_lists[i].gsi_base) < iosapic_lists[i].num_rte)
+		if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
+		    iosapic_lists[i].num_rte)
 			return i;
 	}
 
@@ -162,7 +183,8 @@ _gsi_to_vector (unsigned int gsi)
 	struct iosapic_intr_info *info;
 	struct iosapic_rte_info *rte;
 
-	for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info)
+	for (info = iosapic_intr_info; info <
+		     iosapic_intr_info + IA64_NUM_VECTORS; ++info)
 		list_for_each_entry(rte, &info->rtes, rte_list)
 			if (rte->gsi_base + rte->rte_index == gsi)
 				return info - iosapic_intr_info;
@@ -185,8 +207,8 @@ gsi_to_irq (unsigned int gsi)
 	unsigned long flags;
 	int irq;
 	/*
-	 * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq
-	 * numbers...
+	 * XXX fix me: this assumes an identity mapping between IA-64 vector
+	 * and Linux irq numbers...
 	 */
 	spin_lock_irqsave(&iosapic_lock, flags);
 	{
@@ -197,7 +219,8 @@ gsi_to_irq (unsigned int gsi)
 	return irq;
 }
 
-static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned int vec)
+static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi,
+						  unsigned int vec)
 {
 	struct iosapic_rte_info *rte;
 
@@ -237,7 +260,9 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
 
 		for (irq = 0; irq < NR_IRQS; ++irq)
 			if (irq_to_vector(irq) == vector) {
-				set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
+				set_irq_affinity_info(irq,
+						      (int)(dest & 0xffff),
+						      redir);
 				break;
 			}
 	}
@@ -259,7 +284,7 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
 }
 
 static void
-nop (unsigned int vector)
+nop (unsigned int irq)
 {
 	/* do nothing... */
 }
@@ -281,7 +306,8 @@ mask_irq (unsigned int irq)
 	{
 		/* set only the mask bit */
 		low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK;
-		list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
+		list_for_each_entry(rte, &iosapic_intr_info[vec].rtes,
+				    rte_list) {
 			addr = rte->addr;
 			rte_index = rte->rte_index;
 			iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
@@ -306,7 +332,8 @@ unmask_irq (unsigned int irq)
 	spin_lock_irqsave(&iosapic_lock, flags);
 	{
 		low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK;
-		list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
+		list_for_each_entry(rte, &iosapic_intr_info[vec].rtes,
+				    rte_list) {
 			addr = rte->addr;
 			rte_index = rte->rte_index;
 			iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
@@ -346,21 +373,25 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
 
 	spin_lock_irqsave(&iosapic_lock, flags);
 	{
-		low32 = iosapic_intr_info[vec].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
+		low32 = iosapic_intr_info[vec].low32 &
+			~(7 << IOSAPIC_DELIVERY_SHIFT);
 
 		if (redir)
 		        /* change delivery mode to lowest priority */
-			low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
+			low32 |= (IOSAPIC_LOWEST_PRIORITY <<
+				  IOSAPIC_DELIVERY_SHIFT);
 		else
 		        /* change delivery mode to fixed */
 			low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
 
 		iosapic_intr_info[vec].low32 = low32;
 		iosapic_intr_info[vec].dest = dest;
-		list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
+		list_for_each_entry(rte, &iosapic_intr_info[vec].rtes,
+				    rte_list) {
 			addr = rte->addr;
 			rte_index = rte->rte_index;
-			iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
+			iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index),
+				      high32);
 			iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
 		}
 	}
@@ -433,7 +464,8 @@ iosapic_ack_edge_irq (unsigned int irq)
 	 * interrupt for real. This prevents IRQ storms from unhandled
 	 * devices.
 	 */
-	if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) == (IRQ_PENDING|IRQ_DISABLED))
+	if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
+	    (IRQ_PENDING|IRQ_DISABLED))
 		mask_irq(irq);
 }
 
@@ -467,7 +499,8 @@ iosapic_version (char __iomem *addr)
 	return iosapic_read(addr, IOSAPIC_VERSION);
 }
 
-static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long pol)
+static int iosapic_find_sharable_vector (unsigned long trigger,
+					 unsigned long pol)
 {
 	int i, vector = -1, min_count = -1;
 	struct iosapic_intr_info *info;
@@ -482,7 +515,8 @@ static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long po
 	for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) {
 		info = &iosapic_intr_info[i];
 		if (info->trigger == trigger && info->polarity == pol &&
-		    (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY)) {
+		    (info->dmode == IOSAPIC_FIXED || info->dmode ==
+		     IOSAPIC_LOWEST_PRIORITY)) {
 			if (min_count == -1 || info->count < min_count) {
 				vector = i;
 				min_count = info->count;
@@ -506,12 +540,15 @@ iosapic_reassign_vector (int vector)
 		new_vector = assign_irq_vector(AUTO_ASSIGN);
 		if (new_vector < 0)
 			panic("%s: out of interrupt vectors!\n", __FUNCTION__);
-		printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector);
+		printk(KERN_INFO "Reassigning vector %d to %d\n",
+		       vector, new_vector);
 		memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector],
 		       sizeof(struct iosapic_intr_info));
 		INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes);
-		list_move(iosapic_intr_info[vector].rtes.next, &iosapic_intr_info[new_vector].rtes);
-		memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
+		list_move(iosapic_intr_info[vector].rtes.next,
+			  &iosapic_intr_info[new_vector].rtes);
+		memset(&iosapic_intr_info[vector], 0,
+		       sizeof(struct iosapic_intr_info));
 		iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
 		INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
 	}
@@ -524,7 +561,8 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void)
 	int preallocated = 0;
 
 	if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
-		rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES);
+		rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
+				    NR_PREALLOCATE_RTE_ENTRIES);
 		if (!rte)
 			return NULL;
 		for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
@@ -532,7 +570,8 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void)
 	}
 
 	if (!list_empty(&free_rte_list)) {
-		rte = list_entry(free_rte_list.next, struct iosapic_rte_info, rte_list);
+		rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
+				 rte_list);
 		list_del(&rte->rte_list);
 		preallocated++;
 	} else {
@@ -575,7 +614,8 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
 
 	index = find_iosapic(gsi);
 	if (index < 0) {
-		printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi);
+		printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
+		       __FUNCTION__, gsi);
 		return -ENODEV;
 	}
 
@@ -586,7 +626,8 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
 	if (!rte) {
 		rte = iosapic_alloc_rte();
 		if (!rte) {
-			printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__);
+			printk(KERN_WARNING "%s: cannot allocate memory\n",
+			       __FUNCTION__);
 			return -ENOMEM;
 		}
 
@@ -602,7 +643,9 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
 	else if (vector_is_shared(vector)) {
 		struct iosapic_intr_info *info = &iosapic_intr_info[vector];
 		if (info->trigger != trigger || info->polarity != polarity) {
-			printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__);
+			printk (KERN_WARNING
+				"%s: cannot override the interrupt\n",
+				__FUNCTION__);
 			return -EINVAL;
 		}
 	}
@@ -619,8 +662,10 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
 	idesc = irq_descp(vector);
 	if (idesc->handler != irq_type) {
 		if (idesc->handler != &no_irq_type)
-			printk(KERN_WARNING "%s: changing vector %d from %s to %s\n",
-			       __FUNCTION__, vector, idesc->handler->typename, irq_type->typename);
+			printk(KERN_WARNING
+			       "%s: changing vector %d from %s to %s\n",
+			       __FUNCTION__, vector,
+			       idesc->handler->typename, irq_type->typename);
 		idesc->handler = irq_type;
 	}
 	return 0;
@@ -681,7 +726,7 @@ get_target_cpu (unsigned int gsi, int vector)
 		if (!num_cpus)
 			goto skip_numa_setup;
 
-		/* Use vector assigment to distribute across cpus in node */
+		/* Use vector assignment to distribute across cpus in node */
 		cpu_index = vector % num_cpus;
 
 		for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
@@ -703,7 +748,7 @@ skip_numa_setup:
 	} while (!cpu_online(cpu));
 
 	return cpu_physical_id(cpu);
-#else
+#else  /* CONFIG_SMP */
 	return cpu_physical_id(smp_processor_id());
 #endif
 }
@@ -755,7 +800,8 @@ again:
 			if (list_empty(&iosapic_intr_info[vector].rtes))
 				free_irq_vector(vector);
 			spin_unlock(&iosapic_lock);
-			spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
+			spin_unlock_irqrestore(&irq_descp(vector)->lock,
+					       flags);
 			goto again;
 		}
 
@@ -764,7 +810,8 @@ again:
 			      polarity, trigger);
 		if (err < 0) {
 			spin_unlock(&iosapic_lock);
-			spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
+			spin_unlock_irqrestore(&irq_descp(vector)->lock,
+					       flags);
 			return err;
 		}
 
@@ -806,7 +853,8 @@ iosapic_unregister_intr (unsigned int gsi)
 	 */
 	irq = gsi_to_irq(gsi);
 	if (irq < 0) {
-		printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi);
+		printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
+		       gsi);
 		WARN_ON(1);
 		return;
 	}
@@ -817,7 +865,9 @@ iosapic_unregister_intr (unsigned int gsi)
 	spin_lock(&iosapic_lock);
 	{
 		if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) {
-			printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi);
+			printk(KERN_ERR
+			       "iosapic_unregister_intr(%u) unbalanced\n",
+			       gsi);
 			WARN_ON(1);
 			goto out;
 		}
@@ -827,7 +877,8 @@ iosapic_unregister_intr (unsigned int gsi)
 
 		/* Mask the interrupt */
 		low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK;
-		iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), low32);
+		iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index),
+			      low32);
 
 		/* Remove the rte entry from the list */
 		list_del(&rte->rte_list);
@@ -840,7 +891,9 @@ iosapic_unregister_intr (unsigned int gsi)
 		trigger	 = iosapic_intr_info[vector].trigger;
 		polarity = iosapic_intr_info[vector].polarity;
 		dest     = iosapic_intr_info[vector].dest;
-		printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
+		printk(KERN_INFO
+		       "GSI %u (%s, %s) -> CPU %d (0x%04x)"
+		       " vector %d unregistered\n",
 		       gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
 		       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
 		       cpu_logical_id(dest), dest, vector);
@@ -853,12 +906,15 @@ iosapic_unregister_intr (unsigned int gsi)
 			idesc->handler = &no_irq_type;
 
 			/* Clear the interrupt information */
-			memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
+			memset(&iosapic_intr_info[vector], 0,
+			       sizeof(struct iosapic_intr_info));
 			iosapic_intr_info[vector].low32 |= IOSAPIC_MASK;
 			INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
 
 			if (idesc->action) {
-				printk(KERN_ERR "interrupt handlers still exist on IRQ %u\n", irq);
+				printk(KERN_ERR
+				       "interrupt handlers still exist on"
+				       "IRQ %u\n", irq);
 				WARN_ON(1);
 			}
 
@@ -873,7 +929,6 @@ iosapic_unregister_intr (unsigned int gsi)
 
 /*
  * ACPI calls this when it finds an entry for a platform interrupt.
- * Note that the irq_base and IOSAPIC address must be set in iosapic_init().
  */
 int __init
 iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
@@ -907,13 +962,16 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
 		mask = 1;
 		break;
 	      default:
-		printk(KERN_ERR "iosapic_register_platform_irq(): invalid int type 0x%x\n", int_type);
+		printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__,
+		       int_type);
 		return -1;
 	}
 
 	register_intr(gsi, vector, delivery, polarity, trigger);
 
-	printk(KERN_INFO "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
+	printk(KERN_INFO
+	       "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
+	       " vector %d\n",
 	       int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
 	       int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
 	       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
@@ -923,10 +981,8 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
 	return vector;
 }
 
-
 /*
  * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
- * Note that the gsi_base and IOSAPIC address must be set in iosapic_init().
  */
 void __init
 iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
@@ -955,16 +1011,19 @@ iosapic_system_init (int system_pcat_compat)
 
 	for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) {
 		iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
-		INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);	/* mark as unused */
+		/* mark as unused */
+		INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
 	}
 
 	pcat_compat = system_pcat_compat;
 	if (pcat_compat) {
 		/*
-		 * Disable the compatibility mode interrupts (8259 style), needs IN/OUT support
-		 * enabled.
+		 * Disable the compatibility mode interrupts (8259 style),
+		 * needs IN/OUT support enabled.
 		 */
-		printk(KERN_INFO "%s: Disabling PC-AT compatible 8259 interrupts\n", __FUNCTION__);
+		printk(KERN_INFO
+		       "%s: Disabling PC-AT compatible 8259 interrupts\n",
+		       __FUNCTION__);
 		outb(0xff, 0xA1);
 		outb(0xff, 0x21);
 	}
@@ -1004,10 +1063,7 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
 		base = iosapic_lists[index].gsi_base;
 		end  = base + iosapic_lists[index].num_rte - 1;
 
-		if (gsi_base < base && gsi_end < base)
-			continue;/* OK */
-
-		if (gsi_base > end && gsi_end > end)
+		if (gsi_end < base || end < gsi_base)
 			continue; /* OK */
 
 		return -EBUSY;
@@ -1053,12 +1109,14 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
 
 	if ((gsi_base == 0) && pcat_compat) {
 		/*
-		 * Map the legacy ISA devices into the IOSAPIC data.  Some of these may
-		 * get reprogrammed later on with data from the ACPI Interrupt Source
-		 * Override table.
+		 * Map the legacy ISA devices into the IOSAPIC data.  Some of
+		 * these may get reprogrammed later on with data from the ACPI
+		 * Interrupt Source Override table.
 		 */
 		for (isa_irq = 0; isa_irq < 16; ++isa_irq)
-			iosapic_override_isa_irq(isa_irq, isa_irq, IOSAPIC_POL_HIGH, IOSAPIC_EDGE);
+			iosapic_override_isa_irq(isa_irq, isa_irq,
+						 IOSAPIC_POL_HIGH,
+						 IOSAPIC_EDGE);
 	}
 	return 0;
 }
@@ -1081,7 +1139,8 @@ iosapic_remove (unsigned int gsi_base)
 
 		if (iosapic_lists[index].rtes_inuse) {
 			err = -EBUSY;
-			printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
+			printk(KERN_WARNING
+			       "%s: IOSAPIC for GSI base %u is busy\n",
 			       __FUNCTION__, gsi_base);
 			goto out;
 		}

+ 4 - 4
arch/ia64/kernel/palinfo.c

@@ -240,7 +240,7 @@ cache_info(char *page)
 			}
 			p += sprintf(p,
 				     "%s Cache level %lu:\n"
-				     "\tSize           : %lu bytes\n"
+				     "\tSize           : %u bytes\n"
 				     "\tAttributes     : ",
 				     cache_types[j+cci.pcci_unified], i+1,
 				     cci.pcci_cache_size);
@@ -648,9 +648,9 @@ frequency_info(char *page)
 	if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
 
 	p += sprintf(p,
-		     "Processor/Clock ratio   : %ld/%ld\n"
-		     "Bus/Clock ratio         : %ld/%ld\n"
-		     "ITC/Clock ratio         : %ld/%ld\n",
+		     "Processor/Clock ratio   : %d/%d\n"
+		     "Bus/Clock ratio         : %d/%d\n"
+		     "ITC/Clock ratio         : %d/%d\n",
 		     proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
 
 	return p - page;

+ 1 - 1
arch/ia64/kernel/time.c

@@ -188,7 +188,7 @@ ia64_init_itm (void)
 	itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
 
 	local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
-	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
+	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
 	       "ITC freq=%lu.%03luMHz", smp_processor_id(),
 	       platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
 	       itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);

+ 367 - 0
arch/ia64/kernel/topology.c

@@ -9,6 +9,8 @@
  * 		2002/08/07 Erich Focht <efocht@ess.nec.de>
  * Populate cpu entries in sysfs for non-numa systems as well
  *  	Intel Corporation - Ashok Raj
+ * 02/27/2006 Zhang, Yanmin
+ *	Populate cpu cache entries in sysfs for cpu cache info
  */
 
 #include <linux/config.h>
@@ -19,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/nodemask.h>
+#include <linux/notifier.h>
 #include <asm/mmzone.h>
 #include <asm/numa.h>
 #include <asm/cpu.h>
@@ -101,3 +104,367 @@ out:
 }
 
 subsys_initcall(topology_init);
+
+
+/*
+ * Export cpu cache information through sysfs
+ */
+
+/*
+ *  A bunch of string array to get pretty printing
+ */
+static const char *cache_types[] = {
+	"",			/* not used */
+	"Instruction",
+	"Data",
+	"Unified"	/* unified */
+};
+
+static const char *cache_mattrib[]={
+	"WriteThrough",
+	"WriteBack",
+	"",		/* reserved */
+	""		/* reserved */
+};
+
+struct cache_info {
+	pal_cache_config_info_t	cci;
+	cpumask_t shared_cpu_map;
+	int level;
+	int type;
+	struct kobject kobj;
+};
+
+struct cpu_cache_info {
+	struct cache_info *cache_leaves;
+	int	num_cache_leaves;
+	struct kobject kobj;
+};
+
+static struct cpu_cache_info	all_cpu_cache_info[NR_CPUS];
+#define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
+
+#ifdef CONFIG_SMP
+static void cache_shared_cpu_map_setup( unsigned int cpu,
+		struct cache_info * this_leaf)
+{
+	pal_cache_shared_info_t	csi;
+	int num_shared, i = 0;
+	unsigned int j;
+
+	if (cpu_data(cpu)->threads_per_core <= 1 &&
+		cpu_data(cpu)->cores_per_socket <= 1) {
+		cpu_set(cpu, this_leaf->shared_cpu_map);
+		return;
+	}
+
+	if (ia64_pal_cache_shared_info(this_leaf->level,
+					this_leaf->type,
+					0,
+					&csi) != PAL_STATUS_SUCCESS)
+		return;
+
+	num_shared = (int) csi.num_shared;
+	do {
+		for_each_cpu(j)
+			if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
+				&& cpu_data(j)->core_id == csi.log1_cid
+				&& cpu_data(j)->thread_id == csi.log1_tid)
+				cpu_set(j, this_leaf->shared_cpu_map);
+
+		i++;
+	} while (i < num_shared &&
+		ia64_pal_cache_shared_info(this_leaf->level,
+				this_leaf->type,
+				i,
+				&csi) == PAL_STATUS_SUCCESS);
+}
+#else
+static void cache_shared_cpu_map_setup(unsigned int cpu,
+		struct cache_info * this_leaf)
+{
+	cpu_set(cpu, this_leaf->shared_cpu_map);
+	return;
+}
+#endif
+
+static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
+					char *buf)
+{
+	return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
+}
+
+static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
+					char *buf)
+{
+	return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
+}
+
+static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
+{
+	return sprintf(buf,
+			"%s\n",
+			cache_mattrib[this_leaf->cci.pcci_cache_attr]);
+}
+
+static ssize_t show_size(struct cache_info *this_leaf, char *buf)
+{
+	return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
+}
+
+static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
+{
+	unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
+	number_of_sets /= this_leaf->cci.pcci_assoc;
+	number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
+
+	return sprintf(buf, "%u\n", number_of_sets);
+}
+
+static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
+{
+	ssize_t	len;
+	cpumask_t shared_cpu_map;
+
+	cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
+	len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
+	len += sprintf(buf+len, "\n");
+	return len;
+}
+
+static ssize_t show_type(struct cache_info *this_leaf, char *buf)
+{
+	int type = this_leaf->type + this_leaf->cci.pcci_unified;
+	return sprintf(buf, "%s\n", cache_types[type]);
+}
+
+static ssize_t show_level(struct cache_info *this_leaf, char *buf)
+{
+	return sprintf(buf, "%u\n", this_leaf->level);
+}
+
+struct cache_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct cache_info *, char *);
+	ssize_t (*store)(struct cache_info *, const char *, size_t count);
+};
+
+#ifdef define_one_ro
+	#undef define_one_ro
+#endif
+#define define_one_ro(_name) \
+	static struct cache_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+define_one_ro(level);
+define_one_ro(type);
+define_one_ro(coherency_line_size);
+define_one_ro(ways_of_associativity);
+define_one_ro(size);
+define_one_ro(number_of_sets);
+define_one_ro(shared_cpu_map);
+define_one_ro(attributes);
+
+static struct attribute * cache_default_attrs[] = {
+	&type.attr,
+	&level.attr,
+	&coherency_line_size.attr,
+	&ways_of_associativity.attr,
+	&attributes.attr,
+	&size.attr,
+	&number_of_sets.attr,
+	&shared_cpu_map.attr,
+	NULL
+};
+
+#define to_object(k) container_of(k, struct cache_info, kobj)
+#define to_attr(a) container_of(a, struct cache_attr, attr)
+
+static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
+{
+	struct cache_attr *fattr = to_attr(attr);
+	struct cache_info *this_leaf = to_object(kobj);
+	ssize_t ret;
+
+	ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
+	return ret;
+}
+
+static struct sysfs_ops cache_sysfs_ops = {
+	.show   = cache_show
+};
+
+static struct kobj_type cache_ktype = {
+	.sysfs_ops	= &cache_sysfs_ops,
+	.default_attrs	= cache_default_attrs,
+};
+
+static struct kobj_type cache_ktype_percpu_entry = {
+	.sysfs_ops	= &cache_sysfs_ops,
+};
+
+static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
+{
+	if (all_cpu_cache_info[cpu].cache_leaves) {
+		kfree(all_cpu_cache_info[cpu].cache_leaves);
+		all_cpu_cache_info[cpu].cache_leaves = NULL;
+	}
+	all_cpu_cache_info[cpu].num_cache_leaves = 0;
+	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
+
+	return;
+}
+
+static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
+{
+	u64 i, levels, unique_caches;
+	pal_cache_config_info_t cci;
+	int j;
+	s64 status;
+	struct cache_info *this_cache;
+	int num_cache_leaves = 0;
+
+	if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
+		printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
+		return -1;
+	}
+
+	this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
+			GFP_KERNEL);
+	if (this_cache == NULL)
+		return -ENOMEM;
+
+	for (i=0; i < levels; i++) {
+		for (j=2; j >0 ; j--) {
+			if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
+					PAL_STATUS_SUCCESS)
+				continue;
+
+			this_cache[num_cache_leaves].cci = cci;
+			this_cache[num_cache_leaves].level = i + 1;
+			this_cache[num_cache_leaves].type = j;
+
+			cache_shared_cpu_map_setup(cpu,
+					&this_cache[num_cache_leaves]);
+			num_cache_leaves ++;
+		}
+	}
+
+	all_cpu_cache_info[cpu].cache_leaves = this_cache;
+	all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
+
+	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
+
+	return 0;
+}
+
+/* Add cache interface for CPU device */
+static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+{
+	unsigned int cpu = sys_dev->id;
+	unsigned long i, j;
+	struct cache_info *this_object;
+	int retval = 0;
+	cpumask_t oldmask;
+
+	if (all_cpu_cache_info[cpu].kobj.parent)
+		return 0;
+
+	oldmask = current->cpus_allowed;
+	retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	if (unlikely(retval))
+		return retval;
+
+	retval = cpu_cache_sysfs_init(cpu);
+	set_cpus_allowed(current, oldmask);
+	if (unlikely(retval < 0))
+		return retval;
+
+	all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
+	kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
+	all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
+	retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
+
+	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
+		this_object = LEAF_KOBJECT_PTR(cpu,i);
+		this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
+		kobject_set_name(&(this_object->kobj), "index%1lu", i);
+		this_object->kobj.ktype = &cache_ktype;
+		retval = kobject_register(&(this_object->kobj));
+		if (unlikely(retval)) {
+			for (j = 0; j < i; j++) {
+				kobject_unregister(
+					&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
+			}
+			kobject_unregister(&all_cpu_cache_info[cpu].kobj);
+			cpu_cache_sysfs_exit(cpu);
+			break;
+		}
+	}
+	return retval;
+}
+
+/* Remove cache interface for CPU device */
+static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
+{
+	unsigned int cpu = sys_dev->id;
+	unsigned long i;
+
+	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
+		kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
+
+	if (all_cpu_cache_info[cpu].kobj.parent) {
+		kobject_unregister(&all_cpu_cache_info[cpu].kobj);
+		memset(&all_cpu_cache_info[cpu].kobj,
+			0,
+			sizeof(struct kobject));
+	}
+
+	cpu_cache_sysfs_exit(cpu);
+
+	return 0;
+}
+
+/*
+ * When a cpu is hot-plugged, do a check and initiate
+ * cache kobject if necessary
+ */
+static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
+		unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+	struct sys_device *sys_dev;
+
+	sys_dev = get_cpu_sysdev(cpu);
+	switch (action) {
+	case CPU_ONLINE:
+		cache_add_dev(sys_dev);
+		break;
+	case CPU_DEAD:
+		cache_remove_dev(sys_dev);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cache_cpu_notifier =
+{
+	.notifier_call = cache_cpu_callback
+};
+
+static int __cpuinit cache_sysfs_init(void)
+{
+	int i;
+
+	for_each_online_cpu(i) {
+		cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
+				(void *)(long)i);
+	}
+
+	register_cpu_notifier(&cache_cpu_notifier);
+
+	return 0;
+}
+
+device_initcall(cache_sysfs_init);
+

+ 9 - 9
arch/ia64/kernel/vmlinux.lds.S

@@ -70,6 +70,15 @@ SECTIONS
 	  __stop___ex_table = .;
 	}
 
+  /* MCA table */
+  . = ALIGN(16);
+  __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
+	{
+	  __start___mca_table = .;
+	  *(__mca_table)
+	  __stop___mca_table = .;
+	}
+
   /* Global data */
   _data = .;
 
@@ -130,15 +139,6 @@ SECTIONS
 	  __initcall_end = .;
 	}
 
-  /* MCA table */
-  . = ALIGN(16);
-  __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
-	{
-	  __start___mca_table = .;
-	  *(__mca_table)
-	  __stop___mca_table = .;
-	}
-
   .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
 	{
 	  __start___vtop_patchlist = .;

+ 7 - 1
arch/ia64/mm/init.c

@@ -109,6 +109,7 @@ lazy_mmu_prot_update (pte_t pte)
 {
 	unsigned long addr;
 	struct page *page;
+	unsigned long order;
 
 	if (!pte_exec(pte))
 		return;				/* not an executable page... */
@@ -119,7 +120,12 @@ lazy_mmu_prot_update (pte_t pte)
 	if (test_bit(PG_arch_1, &page->flags))
 		return;				/* i-cache is already coherent with d-cache */
 
-	flush_icache_range(addr, addr + PAGE_SIZE);
+	if (PageCompound(page)) {
+		order = (unsigned long) (page[1].lru.prev);
+		flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
+	}
+	else
+		flush_icache_range(addr, addr + PAGE_SIZE);
 	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
 }
 

+ 3 - 3
arch/ia64/mm/ioremap.c

@@ -21,12 +21,12 @@ __ioremap (unsigned long offset, unsigned long size)
 void __iomem *
 ioremap (unsigned long offset, unsigned long size)
 {
-	if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
-		return __ioremap(offset, size);
-
 	if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB))
 		return phys_to_virt(offset);
 
+	if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
+		return __ioremap(offset, size);
+
 	/*
 	 * Someday this should check ACPI resources so we
 	 * can do the right thing for hot-plugged regions.

+ 7 - 5
arch/ia64/mm/tlb.c

@@ -156,17 +156,19 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
 		nbits = purge.max_bits;
 	start &= ~((1UL << nbits) - 1);
 
-# ifdef CONFIG_SMP
-	platform_global_tlb_purge(mm, start, end, nbits);
-# else
 	preempt_disable();
+#ifdef CONFIG_SMP
+	if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
+		platform_global_tlb_purge(mm, start, end, nbits);
+		preempt_enable();
+		return;
+	}
+#endif
 	do {
 		ia64_ptcl(start, (nbits<<2));
 		start += (1UL << nbits);
 	} while (start < end);
 	preempt_enable();
-# endif
-
 	ia64_srlz_i();			/* srlz.i implies srlz.d */
 }
 EXPORT_SYMBOL(flush_tlb_range);

+ 6 - 2
arch/ia64/sn/kernel/sn2/sn_hwperf.c

@@ -110,7 +110,11 @@ static int sn_hwperf_geoid_to_cnode(char *location)
 	if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
 		return -1;
 
-	for_each_node(cnode) {
+	/*
+	 * FIXME: replace with cleaner for_each_XXX macro which addresses
+	 * both compute and IO nodes once ACPI3.0 is available.
+	 */
+	for (cnode = 0; cnode < num_cnodes; cnode++) {
 		geoid = cnodeid_get_geoid(cnode);
 		module_id = geo_module(geoid);
 		this_rack = MODULE_GET_RACK(module_id);
@@ -605,7 +609,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
 	op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
 
 	if (cpu != SN_HWPERF_ARG_ANY_CPU) {
-		if (cpu >= num_online_cpus() || !cpu_online(cpu)) {
+		if (cpu >= NR_CPUS || !cpu_online(cpu)) {
 			r = -EINVAL;
 			goto out;
 		}

+ 0 - 1
arch/m68k/kernel/m68k_ksyms.c

@@ -79,4 +79,3 @@ EXPORT_SYMBOL(__down_failed_interruptible);
 EXPORT_SYMBOL(__down_failed_trylock);
 EXPORT_SYMBOL(__up_wakeup);
 
-EXPORT_SYMBOL(get_wchan);

+ 0 - 2
arch/m68knommu/kernel/m68k_ksyms.c

@@ -57,8 +57,6 @@ EXPORT_SYMBOL(__down_failed_interruptible);
 EXPORT_SYMBOL(__down_failed_trylock);
 EXPORT_SYMBOL(__up_wakeup);
 
-EXPORT_SYMBOL(get_wchan);
-
 /*
  * libgcc functions - functions that are used internally by the
  * compiler...  (prototypes are not correct though, but that

+ 0 - 1
arch/mips/kernel/process.c

@@ -419,4 +419,3 @@ unsigned long get_wchan(struct task_struct *p)
 	return pc;
 }
 
-EXPORT_SYMBOL(get_wchan);

+ 1 - 5
arch/parisc/Kconfig

@@ -177,14 +177,10 @@ config ARCH_DISCONTIGMEM_DEFAULT
 	def_bool y
 	depends on ARCH_DISCONTIGMEM_ENABLE
 
+source "kernel/Kconfig.preempt"
 source "kernel/Kconfig.hz"
 source "mm/Kconfig"
 
-config PREEMPT
-	bool
-#	bool "Preemptible Kernel"
-	default n
-
 config COMPAT
 	def_bool y
 	depends on 64BIT

+ 74 - 86
arch/parisc/configs/712_defconfig

@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-rc5-pa1
-# Fri Oct 21 23:04:34 2005
+# Linux kernel version: 2.6.16-pa6
+# Sun Mar 26 19:59:51 2006
 #
 CONFIG_PARISC=y
 CONFIG_MMU=y
@@ -10,14 +10,11 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
 
 #
 # Code maturity level options
 #
 CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-CONFIG_BROKEN=y
 CONFIG_BROKEN_ON_SMP=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
 
@@ -32,17 +29,18 @@ CONFIG_POSIX_MQUEUE=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 CONFIG_SYSCTL=y
 # CONFIG_AUDIT is not set
-CONFIG_HOTPLUG=y
-CONFIG_KOBJECT_UEVENT=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_EMBEDDED is not set
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
+CONFIG_ELF_CORE=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_EPOLL=y
@@ -51,8 +49,10 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
 CONFIG_CC_ALIGN_LABELS=0
 CONFIG_CC_ALIGN_LOOPS=0
 CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
 # CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
 
 #
 # Loadable module support
@@ -65,6 +65,23 @@ CONFIG_OBSOLETE_MODPARM=y
 # CONFIG_MODULE_SRCVERSION_ALL is not set
 CONFIG_KMOD=y
 
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
 #
 # Processor type and features
 #
@@ -75,6 +92,10 @@ CONFIG_PA7100LC=y
 # CONFIG_PA8X00 is not set
 CONFIG_PA11=y
 # CONFIG_SMP is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
 # CONFIG_HZ_100 is not set
 CONFIG_HZ_250=y
 # CONFIG_HZ_1000 is not set
@@ -86,7 +107,7 @@ CONFIG_FLATMEM_MANUAL=y
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
 # CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_PREEMPT is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
 # CONFIG_HPUX is not set
 
 #
@@ -130,6 +151,7 @@ CONFIG_NET=y
 #
 # Networking options
 #
+# CONFIG_NETDEBUG is not set
 CONFIG_PACKET=y
 CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
@@ -165,7 +187,12 @@ CONFIG_TCP_CONG_BIC=y
 # CONFIG_IPV6 is not set
 CONFIG_NETFILTER=y
 # CONFIG_NETFILTER_DEBUG is not set
+
+#
+# Core Netfilter Configuration
+#
 # CONFIG_NETFILTER_NETLINK is not set
+# CONFIG_NETFILTER_XTABLES is not set
 
 #
 # IP: Netfilter Configuration
@@ -182,64 +209,6 @@ CONFIG_IP_NF_TFTP=m
 CONFIG_IP_NF_AMANDA=m
 # CONFIG_IP_NF_PPTP is not set
 CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
-# CONFIG_IP_NF_MATCH_REALM is not set
-CONFIG_IP_NF_MATCH_SCTP=m
-# CONFIG_IP_NF_MATCH_DCCP is not set
-CONFIG_IP_NF_MATCH_COMMENT=m
-CONFIG_IP_NF_MATCH_CONNMARK=m
-CONFIG_IP_NF_MATCH_HASHLIMIT=m
-# CONFIG_IP_NF_MATCH_STRING is not set
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-# CONFIG_IP_NF_TARGET_NFQUEUE is not set
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-# CONFIG_IP_NF_TARGET_TTL is not set
-CONFIG_IP_NF_TARGET_CONNMARK=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
 
 #
 # DCCP Configuration (EXPERIMENTAL)
@@ -250,6 +219,11 @@ CONFIG_IP_NF_ARP_MANGLE=m
 # SCTP Configuration (EXPERIMENTAL)
 #
 # CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
 # CONFIG_VLAN_8021Q is not set
@@ -263,8 +237,11 @@ CONFIG_LLC2=m
 # CONFIG_NET_DIVERT is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
 # CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
 
 #
 # Network testing
@@ -304,6 +281,7 @@ CONFIG_PARPORT=y
 CONFIG_PARPORT_PC=m
 # CONFIG_PARPORT_PC_FIFO is not set
 # CONFIG_PARPORT_PC_SUPERIO is not set
+CONFIG_PARPORT_NOT_PC=y
 CONFIG_PARPORT_GSC=y
 # CONFIG_PARPORT_1284 is not set
 
@@ -314,7 +292,6 @@ CONFIG_PARPORT_GSC=y
 #
 # Block devices
 #
-# CONFIG_BLK_DEV_FD is not set
 # CONFIG_PARIDE is not set
 # CONFIG_BLK_DEV_COW_COMMON is not set
 CONFIG_BLK_DEV_LOOP=y
@@ -325,14 +302,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=6144
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
 CONFIG_ATA_OVER_ETH=m
 
 #
@@ -376,6 +345,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
 #
 # SCSI low-level drivers
 #
+# CONFIG_ISCSI_TCP is not set
 # CONFIG_SCSI_SATA is not set
 # CONFIG_SCSI_PPA is not set
 # CONFIG_SCSI_IMM is not set
@@ -407,7 +377,6 @@ CONFIG_MD_RAID1=m
 #
 # IEEE 1394 (FireWire) support
 #
-# CONFIG_IEEE1394 is not set
 
 #
 # I2O device support
@@ -471,6 +440,7 @@ CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
 CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
 # CONFIG_SLIP is not set
 # CONFIG_SHAPER is not set
@@ -516,8 +486,8 @@ CONFIG_KEYBOARD_ATKBD_HP_KEYCODES=y
 # CONFIG_KEYBOARD_LKKBD is not set
 # CONFIG_KEYBOARD_XTKBD is not set
 # CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_KEYBOARD_HIL_OLD=y
-# CONFIG_KEYBOARD_HIL is not set
+# CONFIG_KEYBOARD_HIL_OLD is not set
+CONFIG_KEYBOARD_HIL=y
 CONFIG_INPUT_MOUSE=y
 CONFIG_MOUSE_PS2=y
 CONFIG_MOUSE_SERIAL=m
@@ -554,6 +524,7 @@ CONFIG_HW_CONSOLE=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=17
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_SHARE_IRQ=y
@@ -598,12 +569,20 @@ CONFIG_MAX_RAW_DEVS=256
 #
 # TPM devices
 #
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
 
 #
 # I2C support
 #
 # CONFIG_I2C is not set
 
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
 #
 # Dallas's 1-wire bus
 #
@@ -640,7 +619,6 @@ CONFIG_FB=y
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
-CONFIG_FB_SOFT_CURSOR=y
 # CONFIG_FB_MACMODES is not set
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
@@ -655,6 +633,7 @@ CONFIG_DUMMY_CONSOLE=y
 CONFIG_DUMMY_CONSOLE_COLUMNS=128
 CONFIG_DUMMY_CONSOLE_ROWS=48
 CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
 CONFIG_STI_CONSOLE=y
 CONFIG_FONTS=y
 CONFIG_FONT_8x8=y
@@ -695,6 +674,8 @@ CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
 CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
 
@@ -723,6 +704,10 @@ CONFIG_SND_HARMONY=y
 # CONFIG_USB_ARCH_HAS_HCD is not set
 # CONFIG_USB_ARCH_HAS_OHCI is not set
 
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
 #
 # USB Gadget Support
 #
@@ -736,10 +721,9 @@ CONFIG_SND_HARMONY=y
 #
 # InfiniBand support
 #
-# CONFIG_INFINIBAND is not set
 
 #
-# SN Devices
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
 #
 
 #
@@ -765,6 +749,7 @@ CONFIG_XFS_EXPORT=y
 # CONFIG_XFS_SECURITY is not set
 # CONFIG_XFS_POSIX_ACL is not set
 # CONFIG_XFS_RT is not set
+# CONFIG_OCFS2_FS is not set
 # CONFIG_MINIX_FS is not set
 # CONFIG_ROMFS_FS is not set
 CONFIG_INOTIFY=y
@@ -800,10 +785,10 @@ CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
-# CONFIG_HUGETLBFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_RAMFS=y
 # CONFIG_RELAYFS_FS is not set
+# CONFIG_CONFIGFS_FS is not set
 
 #
 # Miscellaneous filesystems
@@ -821,7 +806,6 @@ CONFIG_RAMFS=y
 # CONFIG_QNX4FS_FS is not set
 # CONFIG_SYSV_FS is not set
 CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
 
 #
 # Network File Systems
@@ -917,18 +901,22 @@ CONFIG_OPROFILE=m
 # Kernel hacking
 #
 # CONFIG_PRINTK_TIME is not set
-CONFIG_DEBUG_KERNEL=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_DETECT_SOFTLOCKUP=y
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_MUTEXES=y
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_IOREMAP is not set
 # CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_RODATA=y
 
 #
 # Security options

+ 2 - 2
arch/parisc/configs/a500_defconfig

@@ -1031,8 +1031,8 @@ CONFIG_NLS_CODEPAGE_850=m
 # CONFIG_NLS_ISO8859_8 is not set
 # CONFIG_NLS_CODEPAGE_1250 is not set
 # CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
 # CONFIG_NLS_ISO8859_2 is not set
 # CONFIG_NLS_ISO8859_3 is not set
 # CONFIG_NLS_ISO8859_4 is not set

+ 6 - 6
arch/parisc/configs/b180_defconfig

@@ -939,10 +939,10 @@ CONFIG_MSDOS_PARTITION=y
 #
 CONFIG_NLS=y
 CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
+CONFIG_NLS_CODEPAGE_437=m
 # CONFIG_NLS_CODEPAGE_737 is not set
 # CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
+CONFIG_NLS_CODEPAGE_850=m
 # CONFIG_NLS_CODEPAGE_852 is not set
 # CONFIG_NLS_CODEPAGE_855 is not set
 # CONFIG_NLS_CODEPAGE_857 is not set
@@ -962,8 +962,8 @@ CONFIG_NLS_DEFAULT="iso8859-1"
 # CONFIG_NLS_ISO8859_8 is not set
 # CONFIG_NLS_CODEPAGE_1250 is not set
 # CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
 # CONFIG_NLS_ISO8859_2 is not set
 # CONFIG_NLS_ISO8859_3 is not set
 # CONFIG_NLS_ISO8859_4 is not set
@@ -973,10 +973,10 @@ CONFIG_NLS_DEFAULT="iso8859-1"
 # CONFIG_NLS_ISO8859_9 is not set
 # CONFIG_NLS_ISO8859_13 is not set
 # CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
+CONFIG_NLS_ISO8859_15=m
 # CONFIG_NLS_KOI8_R is not set
 # CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_UTF8=m
 
 #
 # Kernel hacking

+ 108 - 144
arch/parisc/configs/c3000_defconfig

@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-rc5-pa1
-# Fri Oct 21 23:06:31 2005
+# Linux kernel version: 2.6.16-pa6
+# Sun Mar 26 20:03:29 2006
 #
 CONFIG_PARISC=y
 CONFIG_MMU=y
@@ -10,14 +10,11 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
 
 #
 # Code maturity level options
 #
 CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-CONFIG_BROKEN=y
 CONFIG_BROKEN_ON_SMP=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
 
@@ -32,28 +29,30 @@ CONFIG_SYSVIPC=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 CONFIG_SYSCTL=y
 # CONFIG_AUDIT is not set
-CONFIG_HOTPLUG=y
-CONFIG_KOBJECT_UEVENT=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EMBEDDED=y
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
+CONFIG_ELF_CORE=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_EPOLL=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SHMEM=y
 CONFIG_CC_ALIGN_FUNCTIONS=0
 CONFIG_CC_ALIGN_LABELS=0
 CONFIG_CC_ALIGN_LOOPS=0
 CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
 # CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
 
 #
 # Loadable module support
@@ -66,6 +65,23 @@ CONFIG_OBSOLETE_MODPARM=y
 # CONFIG_MODULE_SRCVERSION_ALL is not set
 CONFIG_KMOD=y
 
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
 #
 # Processor type and features
 #
@@ -78,6 +94,10 @@ CONFIG_PA20=y
 CONFIG_PREFETCH=y
 # CONFIG_64BIT is not set
 # CONFIG_SMP is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
 # CONFIG_HZ_100 is not set
 CONFIG_HZ_250=y
 # CONFIG_HZ_1000 is not set
@@ -89,7 +109,7 @@ CONFIG_FLATMEM_MANUAL=y
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
 # CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_PREEMPT is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_HPUX is not set
 
 #
@@ -135,6 +155,7 @@ CONFIG_NET=y
 #
 # Networking options
 #
+# CONFIG_NETDEBUG is not set
 CONFIG_PACKET=y
 CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
@@ -175,7 +196,12 @@ CONFIG_INET6_TUNNEL=m
 CONFIG_IPV6_TUNNEL=m
 CONFIG_NETFILTER=y
 CONFIG_NETFILTER_DEBUG=y
+
+#
+# Core Netfilter Configuration
+#
 # CONFIG_NETFILTER_NETLINK is not set
+# CONFIG_NETFILTER_XTABLES is not set
 
 #
 # IP: Netfilter Configuration
@@ -192,87 +218,11 @@ CONFIG_IP_NF_TFTP=m
 CONFIG_IP_NF_AMANDA=m
 # CONFIG_IP_NF_PPTP is not set
 CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
-# CONFIG_IP_NF_MATCH_REALM is not set
-# CONFIG_IP_NF_MATCH_SCTP is not set
-# CONFIG_IP_NF_MATCH_DCCP is not set
-# CONFIG_IP_NF_MATCH_COMMENT is not set
-# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
-# CONFIG_IP_NF_MATCH_STRING is not set
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-# CONFIG_IP_NF_TARGET_NFQUEUE is not set
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-# CONFIG_IP_NF_TARGET_TTL is not set
-# CONFIG_IP_NF_RAW is not set
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
 
 #
 # IPv6: Netfilter Configuration (EXPERIMENTAL)
 #
 # CONFIG_IP6_NF_QUEUE is not set
-CONFIG_IP6_NF_IPTABLES=m
-# CONFIG_IP6_NF_MATCH_LIMIT is not set
-CONFIG_IP6_NF_MATCH_MAC=m
-CONFIG_IP6_NF_MATCH_RT=m
-# CONFIG_IP6_NF_MATCH_OPTS is not set
-# CONFIG_IP6_NF_MATCH_FRAG is not set
-# CONFIG_IP6_NF_MATCH_HL is not set
-# CONFIG_IP6_NF_MATCH_MULTIPORT is not set
-CONFIG_IP6_NF_MATCH_OWNER=m
-# CONFIG_IP6_NF_MATCH_MARK is not set
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-# CONFIG_IP6_NF_MATCH_AHESP is not set
-CONFIG_IP6_NF_MATCH_LENGTH=m
-# CONFIG_IP6_NF_MATCH_EUI64 is not set
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_LOG=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-# CONFIG_IP6_NF_TARGET_NFQUEUE is not set
-CONFIG_IP6_NF_MANGLE=m
-# CONFIG_IP6_NF_TARGET_MARK is not set
-# CONFIG_IP6_NF_TARGET_HL is not set
-# CONFIG_IP6_NF_RAW is not set
 
 #
 # DCCP Configuration (EXPERIMENTAL)
@@ -283,6 +233,11 @@ CONFIG_IP6_NF_MANGLE=m
 # SCTP Configuration (EXPERIMENTAL)
 #
 # CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
 # CONFIG_VLAN_8021Q is not set
@@ -295,8 +250,11 @@ CONFIG_IP6_NF_MANGLE=m
 # CONFIG_NET_DIVERT is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
 # CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
 
 #
 # Network testing
@@ -341,7 +299,6 @@ CONFIG_FW_LOADER=y
 #
 # Block devices
 #
-# CONFIG_BLK_DEV_FD is not set
 # CONFIG_BLK_CPQ_DA is not set
 # CONFIG_BLK_CPQ_CISS_DA is not set
 # CONFIG_BLK_DEV_DAC960 is not set
@@ -355,14 +312,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
 # CONFIG_BLK_DEV_RAM is not set
 CONFIG_BLK_DEV_RAM_COUNT=16
 # CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
 # CONFIG_ATA_OVER_ETH is not set
 
 #
@@ -458,6 +407,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
 #
 # SCSI low-level drivers
 #
+# CONFIG_ISCSI_TCP is not set
 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set
 # CONFIG_SCSI_3W_9XXX is not set
 # CONFIG_SCSI_ACARD is not set
@@ -466,7 +416,6 @@ CONFIG_SCSI_ISCSI_ATTRS=m
 # CONFIG_SCSI_AIC7XXX_OLD is not set
 # CONFIG_SCSI_AIC79XX is not set
 # CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -476,18 +425,18 @@ CONFIG_SCSI_SATA=y
 CONFIG_SCSI_ATA_PIIX=m
 # CONFIG_SCSI_SATA_MV is not set
 # CONFIG_SCSI_SATA_NV is not set
-CONFIG_SCSI_SATA_PROMISE=m
+# CONFIG_SCSI_PDC_ADMA is not set
 # CONFIG_SCSI_SATA_QSTOR is not set
+CONFIG_SCSI_SATA_PROMISE=m
 # CONFIG_SCSI_SATA_SX4 is not set
 CONFIG_SCSI_SATA_SIL=m
+# CONFIG_SCSI_SATA_SIL24 is not set
 # CONFIG_SCSI_SATA_SIS is not set
 # CONFIG_SCSI_SATA_ULI is not set
 CONFIG_SCSI_SATA_VIA=m
 # CONFIG_SCSI_SATA_VITESSE is not set
 CONFIG_SCSI_SATA_INTEL_COMBINED=y
-# CONFIG_SCSI_CPQFCTS is not set
 # CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_EATA_PIO is not set
 # CONFIG_SCSI_FUTURE_DOMAIN is not set
 # CONFIG_SCSI_IPS is not set
 # CONFIG_SCSI_INITIO is not set
@@ -496,18 +445,11 @@ CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
 CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+CONFIG_SCSI_SYM53C8XX_MMIO=y
 # CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
 # CONFIG_SCSI_QLOGIC_FC is not set
 # CONFIG_SCSI_QLOGIC_1280 is not set
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-# CONFIG_SCSI_QLA2300 is not set
-# CONFIG_SCSI_QLA2322 is not set
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_QLA24XX is not set
+# CONFIG_SCSI_QLA_FC is not set
 # CONFIG_SCSI_LPFC is not set
 # CONFIG_SCSI_DC395x is not set
 # CONFIG_SCSI_DC390T is not set
@@ -633,6 +575,7 @@ CONFIG_E1000=m
 # CONFIG_R8169 is not set
 # CONFIG_SIS190 is not set
 # CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
 # CONFIG_SK98LIN is not set
 # CONFIG_VIA_VELOCITY is not set
 CONFIG_TIGON3=m
@@ -668,6 +611,7 @@ CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
 CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+# CONFIG_PPP_MPPE is not set
 CONFIG_PPPOE=m
 # CONFIG_SLIP is not set
 # CONFIG_NET_FC is not set
@@ -744,6 +688,7 @@ CONFIG_HW_CONSOLE=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=13
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_SHARE_IRQ=y
@@ -753,7 +698,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
 #
 # Non-8250 serial port support
 #
-# CONFIG_SERIAL_MUX is not set
 # CONFIG_PDC_CONSOLE is not set
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
@@ -788,12 +732,19 @@ CONFIG_MAX_RAW_DEVS=256
 # TPM devices
 #
 # CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
 
 #
 # I2C support
 #
 # CONFIG_I2C is not set
 
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
 #
 # Dallas's 1-wire bus
 #
@@ -830,7 +781,6 @@ CONFIG_FB=y
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
-CONFIG_FB_SOFT_CURSOR=y
 # CONFIG_FB_MACMODES is not set
 # CONFIG_FB_MODE_HELPERS is not set
 # CONFIG_FB_TILEBLITTING is not set
@@ -840,6 +790,7 @@ CONFIG_FB_SOFT_CURSOR=y
 # CONFIG_FB_ASILIANT is not set
 # CONFIG_FB_IMSTT is not set
 CONFIG_FB_STI=y
+# CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_NVIDIA is not set
 # CONFIG_FB_RIVA is not set
 # CONFIG_FB_MATROX is not set
@@ -853,10 +804,7 @@ CONFIG_FB_STI=y
 # CONFIG_FB_KYRO is not set
 # CONFIG_FB_3DFX is not set
 # CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_CYBLA is not set
 # CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_VIRTUAL is not set
 
 #
@@ -866,6 +814,7 @@ CONFIG_DUMMY_CONSOLE=y
 CONFIG_DUMMY_CONSOLE_COLUMNS=160
 CONFIG_DUMMY_CONSOLE_ROWS=64
 CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
 CONFIG_STI_CONSOLE=y
 # CONFIG_FONTS is not set
 CONFIG_FONT_8x8=y
@@ -898,23 +847,27 @@ CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
 CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
 
 #
 # Generic devices
 #
+CONFIG_SND_AC97_CODEC=y
+CONFIG_SND_AC97_BUS=y
 # CONFIG_SND_DUMMY is not set
 # CONFIG_SND_VIRMIDI is not set
 # CONFIG_SND_MTPAV is not set
 # CONFIG_SND_SERIAL_U16550 is not set
 # CONFIG_SND_MPU401 is not set
-CONFIG_SND_AC97_CODEC=y
-CONFIG_SND_AC97_BUS=y
 
 #
 # PCI devices
 #
+CONFIG_SND_AD1889=y
+# CONFIG_SND_AD1889_OPL3 is not set
 # CONFIG_SND_ALI5451 is not set
 # CONFIG_SND_ATIIXP is not set
 # CONFIG_SND_ATIIXP_MODEM is not set
@@ -923,39 +876,38 @@ CONFIG_SND_AC97_BUS=y
 # CONFIG_SND_AU8830 is not set
 # CONFIG_SND_AZT3328 is not set
 # CONFIG_SND_BT87X is not set
-# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
 # CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
 # CONFIG_SND_EMU10K1 is not set
 # CONFIG_SND_EMU10K1X is not set
-# CONFIG_SND_CA0106 is not set
-# CONFIG_SND_KORG1212 is not set
-# CONFIG_SND_MIXART is not set
-# CONFIG_SND_NM256 is not set
-# CONFIG_SND_RME32 is not set
-# CONFIG_SND_RME96 is not set
-# CONFIG_SND_RME9652 is not set
-# CONFIG_SND_HDSP is not set
-# CONFIG_SND_HDSPM is not set
-# CONFIG_SND_TRIDENT is not set
-# CONFIG_SND_YMFPCI is not set
-CONFIG_SND_AD1889=y
-# CONFIG_SND_AD1889_OPL3 is not set
-# CONFIG_SND_CMIPCI is not set
 # CONFIG_SND_ENS1370 is not set
 # CONFIG_SND_ENS1371 is not set
 # CONFIG_SND_ES1938 is not set
 # CONFIG_SND_ES1968 is not set
-# CONFIG_SND_MAESTRO3 is not set
 # CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
 # CONFIG_SND_ICE1712 is not set
 # CONFIG_SND_ICE1724 is not set
 # CONFIG_SND_INTEL8X0 is not set
 # CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
 # CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
 # CONFIG_SND_VIA82XX is not set
 # CONFIG_SND_VIA82XX_MODEM is not set
 # CONFIG_SND_VX222 is not set
-# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_YMFPCI is not set
 
 #
 # USB devices
@@ -998,12 +950,15 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
 # USB Device Class drivers
 #
 # CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
-# CONFIG_USB_BLUETOOTH_TTY is not set
 # CONFIG_USB_ACM is not set
 CONFIG_USB_PRINTER=m
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
 #
 CONFIG_USB_STORAGE=m
 # CONFIG_USB_STORAGE_DEBUG is not set
@@ -1015,12 +970,15 @@ CONFIG_USB_STORAGE_USBAT=y
 CONFIG_USB_STORAGE_SDDR09=y
 CONFIG_USB_STORAGE_SDDR55=y
 CONFIG_USB_STORAGE_JUMPSHOT=y
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_LIBUSUAL is not set
 
 #
 # USB Input Devices
 #
 CONFIG_USB_HID=y
 CONFIG_USB_HIDINPUT=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
 # CONFIG_HID_FF is not set
 CONFIG_USB_HIDDEV=y
 # CONFIG_USB_AIPTEK is not set
@@ -1034,6 +992,7 @@ CONFIG_USB_HIDDEV=y
 # CONFIG_USB_YEALINK is not set
 # CONFIG_USB_XPAD is not set
 # CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
 # CONFIG_USB_KEYSPAN_REMOTE is not set
 # CONFIG_USB_APPLETOUCH is not set
 
@@ -1108,7 +1067,7 @@ CONFIG_USB_LEGOTOWER=m
 # CONFIG_INFINIBAND is not set
 
 #
-# SN Devices
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
 #
 
 #
@@ -1130,6 +1089,7 @@ CONFIG_XFS_EXPORT=y
 # CONFIG_XFS_SECURITY is not set
 # CONFIG_XFS_POSIX_ACL is not set
 # CONFIG_XFS_RT is not set
+# CONFIG_OCFS2_FS is not set
 # CONFIG_MINIX_FS is not set
 # CONFIG_ROMFS_FS is not set
 CONFIG_INOTIFY=y
@@ -1164,10 +1124,10 @@ CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
-# CONFIG_HUGETLBFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_RAMFS=y
 # CONFIG_RELAYFS_FS is not set
+# CONFIG_CONFIGFS_FS is not set
 
 #
 # Miscellaneous filesystems
@@ -1225,10 +1185,10 @@ CONFIG_MSDOS_PARTITION=y
 #
 CONFIG_NLS=y
 CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
+CONFIG_NLS_CODEPAGE_437=m
 # CONFIG_NLS_CODEPAGE_737 is not set
 # CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
+CONFIG_NLS_CODEPAGE_850=m
 # CONFIG_NLS_CODEPAGE_852 is not set
 # CONFIG_NLS_CODEPAGE_855 is not set
 # CONFIG_NLS_CODEPAGE_857 is not set
@@ -1248,8 +1208,8 @@ CONFIG_NLS_DEFAULT="iso8859-1"
 # CONFIG_NLS_ISO8859_8 is not set
 # CONFIG_NLS_CODEPAGE_1250 is not set
 # CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
 # CONFIG_NLS_ISO8859_2 is not set
 # CONFIG_NLS_ISO8859_3 is not set
 # CONFIG_NLS_ISO8859_4 is not set
@@ -1259,10 +1219,10 @@ CONFIG_NLS_DEFAULT="iso8859-1"
 # CONFIG_NLS_ISO8859_9 is not set
 # CONFIG_NLS_ISO8859_13 is not set
 # CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
+CONFIG_NLS_ISO8859_15=m
 # CONFIG_NLS_KOI8_R is not set
 # CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_UTF8=m
 
 #
 # Profiling support
@@ -1274,18 +1234,22 @@ CONFIG_OPROFILE=m
 # Kernel hacking
 #
 # CONFIG_PRINTK_TIME is not set
-CONFIG_DEBUG_KERNEL=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_DETECT_SOFTLOCKUP=y
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_MUTEXES=y
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_IOREMAP is not set
 # CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_RODATA=y
 
 #
 # Security options

+ 110 - 55
arch/parisc/defconfig

@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-rc5-pa1
-# Fri Oct 21 23:01:33 2005
+# Linux kernel version: 2.6.16-pa6
+# Sun Mar 26 19:50:07 2006
 #
 CONFIG_PARISC=y
 CONFIG_MMU=y
@@ -15,7 +15,6 @@ CONFIG_GENERIC_IRQ_PROBE=y
 # Code maturity level options
 #
 CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
 CONFIG_BROKEN_ON_SMP=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
 
@@ -30,17 +29,18 @@ CONFIG_SYSVIPC=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 CONFIG_SYSCTL=y
 # CONFIG_AUDIT is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_KOBJECT_UEVENT=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_EMBEDDED is not set
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
+CONFIG_ELF_CORE=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_EPOLL=y
@@ -49,14 +49,33 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
 CONFIG_CC_ALIGN_LABELS=0
 CONFIG_CC_ALIGN_LOOPS=0
 CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
 # CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
 
 #
 # Loadable module support
 #
 # CONFIG_MODULES is not set
 
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
 #
 # Processor type and features
 #
@@ -67,6 +86,10 @@ CONFIG_PA7000=y
 # CONFIG_PA8X00 is not set
 CONFIG_PA11=y
 # CONFIG_SMP is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
 # CONFIG_HZ_100 is not set
 CONFIG_HZ_250=y
 # CONFIG_HZ_1000 is not set
@@ -78,7 +101,7 @@ CONFIG_FLATMEM_MANUAL=y
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
 # CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_PREEMPT is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
 # CONFIG_HPUX is not set
 
 #
@@ -132,6 +155,7 @@ CONFIG_NET=y
 #
 # Networking options
 #
+# CONFIG_NETDEBUG is not set
 CONFIG_PACKET=y
 CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
@@ -174,6 +198,11 @@ CONFIG_IPV6=y
 # SCTP Configuration (EXPERIMENTAL)
 #
 # CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
 # CONFIG_VLAN_8021Q is not set
@@ -186,8 +215,11 @@ CONFIG_IPV6=y
 # CONFIG_NET_DIVERT is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
 # CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
 
 #
 # Network testing
@@ -228,6 +260,7 @@ CONFIG_PARPORT_PC=y
 # CONFIG_PARPORT_SERIAL is not set
 # CONFIG_PARPORT_PC_FIFO is not set
 # CONFIG_PARPORT_PC_SUPERIO is not set
+CONFIG_PARPORT_NOT_PC=y
 CONFIG_PARPORT_GSC=y
 # CONFIG_PARPORT_1284 is not set
 
@@ -254,14 +287,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
 # CONFIG_ATA_OVER_ETH is not set
 
 #
@@ -305,6 +330,7 @@ CONFIG_SCSI_SPI_ATTRS=y
 #
 # SCSI low-level drivers
 #
+# CONFIG_ISCSI_TCP is not set
 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set
 # CONFIG_SCSI_3W_9XXX is not set
 # CONFIG_SCSI_ACARD is not set
@@ -331,7 +357,7 @@ CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
 CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+CONFIG_SCSI_SYM53C8XX_MMIO=y
 # CONFIG_SCSI_IPR is not set
 CONFIG_SCSI_ZALON=y
 CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8
@@ -340,13 +366,7 @@ CONFIG_SCSI_NCR53C8XX_SYNC=20
 # CONFIG_SCSI_NCR53C8XX_PROFILE is not set
 # CONFIG_SCSI_QLOGIC_FC is not set
 # CONFIG_SCSI_QLOGIC_1280 is not set
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-# CONFIG_SCSI_QLA2300 is not set
-# CONFIG_SCSI_QLA2322 is not set
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_QLA24XX is not set
+# CONFIG_SCSI_QLA_FC is not set
 # CONFIG_SCSI_LPFC is not set
 # CONFIG_SCSI_SIM710 is not set
 # CONFIG_SCSI_DC395x is not set
@@ -471,6 +491,7 @@ CONFIG_ACENIC=y
 # CONFIG_R8169 is not set
 # CONFIG_SIS190 is not set
 # CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
 # CONFIG_SK98LIN is not set
 # CONFIG_VIA_VELOCITY is not set
 CONFIG_TIGON3=y
@@ -562,13 +583,13 @@ CONFIG_INPUT_KEYBOARD=y
 # CONFIG_KEYBOARD_LKKBD is not set
 # CONFIG_KEYBOARD_XTKBD is not set
 # CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_KEYBOARD_HIL_OLD=y
+# CONFIG_KEYBOARD_HIL_OLD is not set
 CONFIG_KEYBOARD_HIL=y
 CONFIG_INPUT_MOUSE=y
 # CONFIG_MOUSE_PS2 is not set
 # CONFIG_MOUSE_SERIAL is not set
 # CONFIG_MOUSE_VSXXXAA is not set
-# CONFIG_MOUSE_HIL is not set
+CONFIG_MOUSE_HIL=y
 CONFIG_INPUT_JOYSTICK=y
 # CONFIG_JOYSTICK_ANALOG is not set
 # CONFIG_JOYSTICK_A3D is not set
@@ -628,6 +649,7 @@ CONFIG_HW_CONSOLE=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=13
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_SHARE_IRQ=y
@@ -675,12 +697,19 @@ CONFIG_GEN_RTC=y
 # TPM devices
 #
 # CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
 
 #
 # I2C support
 #
 # CONFIG_I2C is not set
 
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
 #
 # Dallas's 1-wire bus
 #
@@ -691,6 +720,7 @@ CONFIG_GEN_RTC=y
 #
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_F71805F is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
 
 #
@@ -718,7 +748,6 @@ CONFIG_FB=y
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
-CONFIG_FB_SOFT_CURSOR=y
 # CONFIG_FB_MACMODES is not set
 # CONFIG_FB_MODE_HELPERS is not set
 # CONFIG_FB_TILEBLITTING is not set
@@ -728,6 +757,7 @@ CONFIG_FB_SOFT_CURSOR=y
 # CONFIG_FB_ASILIANT is not set
 # CONFIG_FB_IMSTT is not set
 CONFIG_FB_STI=y
+# CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_NVIDIA is not set
 # CONFIG_FB_RIVA is not set
 # CONFIG_FB_MATROX is not set
@@ -741,9 +771,7 @@ CONFIG_FB_STI=y
 # CONFIG_FB_KYRO is not set
 # CONFIG_FB_3DFX is not set
 # CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_CYBLA is not set
 # CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_VIRTUAL is not set
 
 #
@@ -753,15 +781,28 @@ CONFIG_DUMMY_CONSOLE=y
 CONFIG_DUMMY_CONSOLE_COLUMNS=160
 CONFIG_DUMMY_CONSOLE_ROWS=64
 CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
 CONFIG_STI_CONSOLE=y
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
 CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
 
 #
 # Logo configuration
 #
-# CONFIG_LOGO is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_PARISC_CLUT224=y
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
@@ -781,23 +822,27 @@ CONFIG_SND_OSSEMUL=y
 CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
 CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
 
 #
 # Generic devices
 #
+CONFIG_SND_AC97_CODEC=y
+CONFIG_SND_AC97_BUS=y
 # CONFIG_SND_DUMMY is not set
 # CONFIG_SND_VIRMIDI is not set
 # CONFIG_SND_MTPAV is not set
 # CONFIG_SND_SERIAL_U16550 is not set
 # CONFIG_SND_MPU401 is not set
-CONFIG_SND_AC97_CODEC=y
-CONFIG_SND_AC97_BUS=y
 
 #
 # PCI devices
 #
+CONFIG_SND_AD1889=y
+# CONFIG_SND_AD1889_OPL3 is not set
 # CONFIG_SND_ALI5451 is not set
 # CONFIG_SND_ATIIXP is not set
 # CONFIG_SND_ATIIXP_MODEM is not set
@@ -806,39 +851,38 @@ CONFIG_SND_AC97_BUS=y
 # CONFIG_SND_AU8830 is not set
 # CONFIG_SND_AZT3328 is not set
 # CONFIG_SND_BT87X is not set
-# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
 # CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
 # CONFIG_SND_EMU10K1 is not set
 # CONFIG_SND_EMU10K1X is not set
-# CONFIG_SND_CA0106 is not set
-# CONFIG_SND_KORG1212 is not set
-# CONFIG_SND_MIXART is not set
-# CONFIG_SND_NM256 is not set
-# CONFIG_SND_RME32 is not set
-# CONFIG_SND_RME96 is not set
-# CONFIG_SND_RME9652 is not set
-# CONFIG_SND_HDSP is not set
-# CONFIG_SND_HDSPM is not set
-# CONFIG_SND_TRIDENT is not set
-# CONFIG_SND_YMFPCI is not set
-CONFIG_SND_AD1889=y
-# CONFIG_SND_AD1889_OPL3 is not set
-# CONFIG_SND_CMIPCI is not set
 # CONFIG_SND_ENS1370 is not set
 # CONFIG_SND_ENS1371 is not set
 # CONFIG_SND_ES1938 is not set
 # CONFIG_SND_ES1968 is not set
-# CONFIG_SND_MAESTRO3 is not set
 # CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
 # CONFIG_SND_ICE1712 is not set
 # CONFIG_SND_ICE1724 is not set
 # CONFIG_SND_INTEL8X0 is not set
 # CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
 # CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
 # CONFIG_SND_VIA82XX is not set
 # CONFIG_SND_VIA82XX_MODEM is not set
 # CONFIG_SND_VX222 is not set
-# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_YMFPCI is not set
 
 #
 # USB devices
@@ -888,14 +932,18 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
 # USB Device Class drivers
 #
 # CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
-# CONFIG_USB_BLUETOOTH_TTY is not set
 # CONFIG_USB_ACM is not set
 # CONFIG_USB_PRINTER is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
 #
 # CONFIG_USB_STORAGE is not set
+# CONFIG_USB_LIBUSUAL is not set
 
 #
 # USB Input Devices
@@ -918,6 +966,7 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
 # CONFIG_USB_YEALINK is not set
 # CONFIG_USB_XPAD is not set
 # CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
 # CONFIG_USB_KEYSPAN_REMOTE is not set
 # CONFIG_USB_APPLETOUCH is not set
 
@@ -994,7 +1043,7 @@ CONFIG_USB_MON=y
 # CONFIG_INFINIBAND is not set
 
 #
-# SN Devices
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
 #
 
 #
@@ -1011,6 +1060,7 @@ CONFIG_JBD=y
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
 # CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
 # CONFIG_MINIX_FS is not set
 # CONFIG_ROMFS_FS is not set
 CONFIG_INOTIFY=y
@@ -1045,6 +1095,7 @@ CONFIG_TMPFS=y
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_RAMFS=y
 # CONFIG_RELAYFS_FS is not set
+# CONFIG_CONFIGFS_FS is not set
 
 #
 # Miscellaneous filesystems
@@ -1151,18 +1202,22 @@ CONFIG_OPROFILE=y
 # Kernel hacking
 #
 # CONFIG_PRINTK_TIME is not set
-CONFIG_DEBUG_KERNEL=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
 CONFIG_LOG_BUF_SHIFT=15
 CONFIG_DETECT_SOFTLOCKUP=y
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_MUTEXES=y
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_IOREMAP is not set
 # CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_RODATA=y
 
 #
 # Security options

+ 3 - 3
arch/parisc/kernel/cache.c

@@ -89,7 +89,7 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 	if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
 	    test_bit(PG_dcache_dirty, &page->flags)) {
 
-		flush_kernel_dcache_page(page_address(page));
+		flush_kernel_dcache_page(page);
 		clear_bit(PG_dcache_dirty, &page->flags);
 	}
 }
@@ -278,7 +278,7 @@ void flush_dcache_page(struct page *page)
 		return;
 	}
 
-	flush_kernel_dcache_page(page_address(page));
+	flush_kernel_dcache_page(page);
 
 	if (!mapping)
 		return;
@@ -317,7 +317,7 @@ EXPORT_SYMBOL(flush_dcache_page);
 
 /* Defined in arch/parisc/kernel/pacache.S */
 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
-EXPORT_SYMBOL(flush_kernel_dcache_page);
+EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
 EXPORT_SYMBOL(flush_data_cache_local);
 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
 

+ 39 - 6
arch/parisc/kernel/entry.S

@@ -563,10 +563,10 @@
 	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
 	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
 
-	/* Get rid of prot bits and convert to page addr for iitlbt */
+	/* Get rid of prot bits and convert to page addr for iitlbt and idtlbt */
 
 	depd		%r0,63,PAGE_SHIFT,\pte
-	extrd,u		\pte,56,32,\pte
+	extrd,s		\pte,(63-PAGE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
 	.endm
 
 	/* Identical macro to make_insert_tlb above, except it
@@ -584,7 +584,7 @@
 
 	/* Get rid of prot bits and convert to page addr for iitlba */
 
-	depi		0,31,12,\pte
+	depi		0,31,PAGE_SHIFT,\pte
 	extru		\pte,24,25,\pte
 
 	.endm
@@ -1014,14 +1014,21 @@ intr_restore:
 	nop
 	nop
 
+#ifndef CONFIG_PREEMPT
+# define intr_do_preempt	intr_restore
+#endif /* !CONFIG_PREEMPT */
+
 	.import schedule,code
 intr_do_resched:
-	/* Only do reschedule if we are returning to user space */
+	/* Only call schedule on return to userspace. If we're returning
+	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
+	 * we jump back to intr_restore.
+	 */
 	LDREG	PT_IASQ0(%r16), %r20
-	CMPIB= 0,%r20,intr_restore /* backward */
+	CMPIB=	0, %r20, intr_do_preempt
 	nop
 	LDREG	PT_IASQ1(%r16), %r20
-	CMPIB= 0,%r20,intr_restore /* backward */
+	CMPIB=	0, %r20, intr_do_preempt
 	nop
 
 #ifdef CONFIG_64BIT
@@ -1037,6 +1044,32 @@ intr_do_resched:
 #endif
 	ldo	R%intr_check_sig(%r2), %r2
 
+	/* preempt the current task on returning to kernel
+	 * mode from an interrupt, iff need_resched is set,
+	 * and preempt_count is 0. otherwise, we continue on
+	 * our merry way back to the current running task.
+	 */
+#ifdef CONFIG_PREEMPT
+	.import preempt_schedule_irq,code
+intr_do_preempt:
+	rsm	PSW_SM_I, %r0		/* disable interrupts */
+
+	/* current_thread_info()->preempt_count */
+	mfctl	%cr30, %r1
+	LDREG	TI_PRE_COUNT(%r1), %r19
+	CMPIB<>	0, %r19, intr_restore	/* if preempt_count > 0 */
+	nop				/* prev insn branched backwards */
+
+	/* check if we interrupted a critical path */
+	LDREG	PT_PSW(%r16), %r20
+	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
+	nop
+
+	BL	preempt_schedule_irq, %r2
+	nop
+
+	b	intr_restore		/* ssm PSW_SM_I done by intr_restore */
+#endif /* CONFIG_PREEMPT */
 
 	.import do_signal,code
 intr_do_signal:

+ 2 - 2
arch/parisc/kernel/pacache.S

@@ -621,9 +621,9 @@ __clear_user_page_asm:
 
 	.procend
 
-	.export flush_kernel_dcache_page
+	.export flush_kernel_dcache_page_asm
 
-flush_kernel_dcache_page:
+flush_kernel_dcache_page_asm:
 	.proc
 	.callinfo NO_CALLS
 	.entry

+ 0 - 19
arch/parisc/kernel/parisc_ksyms.c

@@ -30,22 +30,7 @@
 #include <linux/syscalls.h>
 
 #include <linux/string.h>
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memscan);
 EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
 EXPORT_SYMBOL(strpbrk);
 
 #include <asm/atomic.h>
@@ -82,16 +67,12 @@ EXPORT_SYMBOL($global$);
 #endif
 
 #include <asm/io.h>
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(iounmap);
 EXPORT_SYMBOL(memcpy_toio);
 EXPORT_SYMBOL(memcpy_fromio);
 EXPORT_SYMBOL(memset_io);
 
 #include <asm/unistd.h>
-EXPORT_SYMBOL(sys_open);
 EXPORT_SYMBOL(sys_lseek);
-EXPORT_SYMBOL(sys_read);
 EXPORT_SYMBOL(sys_write);
 
 #include <asm/semaphore.h>

+ 2 - 3
arch/parisc/kernel/pdc_chassis.c

@@ -5,9 +5,8 @@
  *    Copyright (C) 2002-2004 Thibaut VARENE <varenet@parisc-linux.org>
  *
  *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; either version 2 of the License, or
- *    (at your option) any later version.
+ *    it under the terms of the GNU General Public License, version 2, as
+ *    published by the Free Software Foundation.
  *
  *    This program is distributed in the hope that it will be useful,
  *    but WITHOUT ANY WARRANTY; without even the implied warranty of

+ 1 - 1
arch/parisc/kernel/perf.c

@@ -805,7 +805,7 @@ static int perf_write_image(uint64_t *memaddr)
 		return -1;
 	}
 
-	runway = ioremap(cpu_device->hpa.start, 4096);
+	runway = ioremap_nocache(cpu_device->hpa.start, 4096);
 
 	/* Merge intrigue bits into Runway STATUS 0 */
 	tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;

+ 1 - 1
arch/parisc/kernel/syscall_table.S

@@ -287,7 +287,7 @@
 	ENTRY_SAME(chown)		/* 180 */
 	/* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
 	ENTRY_COMP(setsockopt)
-	ENTRY_SAME(getsockopt)
+	ENTRY_COMP(getsockopt)
 	ENTRY_COMP(sendmsg)
 	ENTRY_COMP(recvmsg)
 	ENTRY_SAME(semop)		/* 185 */

+ 0 - 4
arch/parisc/lib/iomap.c

@@ -263,11 +263,7 @@ static const struct iomap_ops iomem_ops = {
 
 const struct iomap_ops *iomap_ops[8] = {
 	[0] = &ioport_ops,
-#ifdef CONFIG_DEBUG_IOREMAP
-	[6] = &iomem_ops,
-#else
 	[7] = &iomem_ops
-#endif
 };
 
 

+ 3 - 4
arch/parisc/mm/init.c

@@ -1013,9 +1013,9 @@ void flush_tlb_all(void)
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-#if 0
-	if (start < end)
-		printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+	if (start >= end)
+		return;
+	printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
 	for (; start < end; start += PAGE_SIZE) {
 		ClearPageReserved(virt_to_page(start));
 		init_page_count(virt_to_page(start));
@@ -1023,6 +1023,5 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 		num_physpages++;
 		totalram_pages++;
 	}
-#endif
 }
 #endif

+ 9 - 45
arch/parisc/mm/ioremap.c

@@ -72,7 +72,6 @@ remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size,
 	return 0;
 }
 
-#if USE_HPPA_IOREMAP
 static int 
 remap_area_pages(unsigned long address, unsigned long phys_addr,
 		 unsigned long size, unsigned long flags)
@@ -114,31 +113,6 @@ remap_area_pages(unsigned long address, unsigned long phys_addr,
 
 	return error;
 }
-#endif /* USE_HPPA_IOREMAP */
-
-#ifdef CONFIG_DEBUG_IOREMAP
-static unsigned long last = 0;
-
-void gsc_bad_addr(unsigned long addr)
-{
-	if (time_after(jiffies, last + HZ*10)) {
-		printk("gsc_foo() called with bad address 0x%lx\n", addr);
-		dump_stack();
-		last = jiffies;
-	}
-}
-EXPORT_SYMBOL(gsc_bad_addr);
-
-void __raw_bad_addr(const volatile void __iomem *addr)
-{
-	if (time_after(jiffies, last + HZ*10)) {
-		printk("__raw_foo() called with bad address 0x%p\n", addr);
-		dump_stack();
-		last = jiffies;
-	}
-}
-EXPORT_SYMBOL(__raw_bad_addr);
-#endif
 
 /*
  * Generic mapping function (not visible outside):
@@ -154,26 +128,19 @@ EXPORT_SYMBOL(__raw_bad_addr);
  */
 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
 {
-#if !(USE_HPPA_IOREMAP)
+	void *addr;
+	struct vm_struct *area;
+	unsigned long offset, last_addr;
 
+#ifdef CONFIG_EISA
 	unsigned long end = phys_addr + size - 1;
 	/* Support EISA addresses */
-	if ((phys_addr >= 0x00080000 && end < 0x000fffff)
-			|| (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
-		phys_addr |= 0xfc000000;
+	if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
+	    (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
+		phys_addr |= F_EXTEND(0xfc000000);
 	}
-
-#ifdef CONFIG_DEBUG_IOREMAP
-	return (void __iomem *)(phys_addr - (0x1UL << NYBBLE_SHIFT));
-#else
-	return (void __iomem *)phys_addr;
 #endif
 
-#else
-	void *addr;
-	struct vm_struct *area;
-	unsigned long offset, last_addr;
-
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
 	if (!size || last_addr < phys_addr)
@@ -217,15 +184,12 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
 	}
 
 	return (void __iomem *) (offset + (char *)addr);
-#endif
 }
+EXPORT_SYMBOL(__ioremap);
 
 void iounmap(void __iomem *addr)
 {
-#if !(USE_HPPA_IOREMAP)
-	return;
-#else
 	if (addr > high_memory)
 		return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
-#endif
 }
+EXPORT_SYMBOL(iounmap);

+ 2 - 2
arch/powerpc/kernel/crash_dump.c

@@ -61,7 +61,7 @@ static int __init parse_elfcorehdr(char *p)
 	if (p)
 		elfcorehdr_addr = memparse(p, &p);
 
-	return 0;
+	return 1;
 }
 __setup("elfcorehdr=", parse_elfcorehdr);
 #endif
@@ -71,7 +71,7 @@ static int __init parse_savemaxmem(char *p)
 	if (p)
 		saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
 
-	return 0;
+	return 1;
 }
 __setup("savemaxmem=", parse_savemaxmem);
 

+ 0 - 1
arch/powerpc/kernel/process.c

@@ -834,7 +834,6 @@ unsigned long get_wchan(struct task_struct *p)
 	} while (count++ < 16);
 	return 0;
 }
-EXPORT_SYMBOL(get_wchan);
 
 static int kstack_depth_to_print = 64;
 

+ 1 - 1
arch/powerpc/kernel/vdso32/sigtramp.S

@@ -261,7 +261,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt32)
 .Lcie_start:
 	.long 0			/* CIE ID */
 	.byte 1			/* Version number */
-	.string "zR"		/* NUL-terminated augmentation string */
+	.string "zRS"		/* NUL-terminated augmentation string */
 	.uleb128 4		/* Code alignment factor */
 	.sleb128 -4		/* Data alignment factor */
 	.byte 67		/* Return address register column, ap */

+ 1 - 1
arch/powerpc/kernel/vdso64/sigtramp.S

@@ -263,7 +263,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
 .Lcie_start:
 	.long 0			/* CIE ID */
 	.byte 1			/* Version number */
-	.string "zR"		/* NUL-terminated augmentation string */
+	.string "zRS"		/* NUL-terminated augmentation string */
 	.uleb128 4		/* Code alignment factor */
 	.sleb128 -8		/* Data alignment factor */
 	.byte 67		/* Return address register column, ap */

+ 3 - 3
arch/s390/kernel/smp.c

@@ -801,7 +801,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
          */
 	print_cpu_info(&S390_lowcore.cpu_data);
 
-        for_each_cpu(i) {
+        for_each_possible_cpu(i) {
 		lowcore_ptr[i] = (struct _lowcore *)
 			__get_free_pages(GFP_KERNEL|GFP_DMA, 
 					sizeof(void*) == 8 ? 1 : 0);
@@ -831,7 +831,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 #endif
 	set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
 
-	for_each_cpu(cpu)
+	for_each_possible_cpu(cpu)
 		if (cpu != smp_processor_id())
 			smp_create_idle(cpu);
 }
@@ -868,7 +868,7 @@ static int __init topology_init(void)
 	int cpu;
 	int ret;
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
 		if (ret)
 			printk(KERN_WARNING "topology_init: register_cpu %d "

+ 1 - 1
arch/sh/kernel/cpu/init.c

@@ -30,7 +30,7 @@ static int x##_disabled __initdata = 0;		\
 static int __init x##_setup(char *opts)		\
 {						\
 	x##_disabled = 1;			\
-	return 0;				\
+	return 1;				\
 }						\
 __setup("no" __stringify(x), x##_setup);
 

+ 1 - 1
arch/sh/kernel/setup.c

@@ -401,7 +401,7 @@ static int __init topology_init(void)
 {
 	int cpu_id;
 
-	for_each_cpu(cpu_id)
+	for_each_possible_cpu(cpu_id)
 		register_cpu(&cpu[cpu_id], cpu_id, NULL);
 
 	return 0;

+ 3 - 0
arch/um/Kconfig

@@ -22,6 +22,9 @@ config SBUS
 config PCI
 	bool
 
+config PCMCIA
+	bool
+
 config GENERIC_CALIBRATE_DELAY
 	bool
 	default y

+ 3 - 4
arch/um/Makefile

@@ -20,7 +20,7 @@ core-y			+= $(ARCH_DIR)/kernel/		\
 
 # Have to precede the include because the included Makefiles reference them.
 SYMLINK_HEADERS := archparam.h system.h sigcontext.h processor.h ptrace.h \
-	module.h vm-flags.h elf.h ldt.h
+	module.h vm-flags.h elf.h host_ldt.h
 SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),include/asm-um/$(header))
 
 # XXX: The "os" symlink is only used by arch/um/include/os.h, which includes
@@ -129,7 +129,7 @@ CPPFLAGS_vmlinux.lds = -U$(SUBARCH) \
 	-DSTART=$(START) -DELF_ARCH=$(ELF_ARCH) \
 	-DELF_FORMAT="$(ELF_FORMAT)" $(CPP_MODE-y) \
 	-DKERNEL_STACK_SIZE=$(STACK_SIZE) \
-	-DUNMAP_PATH=arch/um/sys-$(SUBARCH)/unmap_fin.o
+	-DUNMAP_PATH=arch/um/sys-$(SUBARCH)/unmap.o
 
 #The wrappers will select whether using "malloc" or the kernel allocator.
 LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
@@ -150,8 +150,7 @@ CLEAN_FILES += linux x.i gmon.out $(ARCH_DIR)/include/uml-config.h \
 	$(ARCH_DIR)/include/user_constants.h \
 	$(ARCH_DIR)/include/kern_constants.h $(ARCH_DIR)/Kconfig.arch
 
-MRPROPER_FILES += $(SYMLINK_HEADERS) $(ARCH_SYMLINKS) \
-	$(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os
+MRPROPER_FILES += $(ARCH_SYMLINKS)
 
 archclean:
 	@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \

+ 1 - 1
arch/um/Makefile-x86_64

@@ -1,7 +1,7 @@
 # Copyright 2003 - 2004 Pathscale, Inc
 # Released under the GPL
 
-libs-y += arch/um/sys-x86_64/
+core-y += arch/um/sys-x86_64/
 START := 0x60000000
 
 #We #undef __x86_64__ for kernelspace, not for userspace where

+ 1 - 12
arch/um/drivers/daemon_kern.c

@@ -95,18 +95,7 @@ static struct transport daemon_transport = {
 static int register_daemon(void)
 {
 	register_transport(&daemon_transport);
-	return(1);
+	return 0;
 }
 
 __initcall(register_daemon);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

+ 4 - 4
arch/um/drivers/harddog_kern.c

@@ -104,7 +104,7 @@ static int harddog_release(struct inode *inode, struct file *file)
 
 extern int ping_watchdog(int fd);
 
-static ssize_t harddog_write(struct file *file, const char *data, size_t len,
+static ssize_t harddog_write(struct file *file, const char __user *data, size_t len,
 			     loff_t *ppos)
 {
 	/*
@@ -118,6 +118,7 @@ static ssize_t harddog_write(struct file *file, const char *data, size_t len,
 static int harddog_ioctl(struct inode *inode, struct file *file,
 			 unsigned int cmd, unsigned long arg)
 {
+	void __user *argp= (void __user *)arg;
 	static struct watchdog_info ident = {
 		WDIOC_SETTIMEOUT,
 		0,
@@ -127,13 +128,12 @@ static int harddog_ioctl(struct inode *inode, struct file *file,
 		default:
 			return -ENOTTY;
 		case WDIOC_GETSUPPORT:
-			if(copy_to_user((struct harddog_info *)arg, &ident,
-					sizeof(ident)))
+			if(copy_to_user(argp, &ident, sizeof(ident)))
 				return -EFAULT;
 			return 0;
 		case WDIOC_GETSTATUS:
 		case WDIOC_GETBOOTSTATUS:
-			return put_user(0,(int *)arg);
+			return put_user(0,(int __user *)argp);
 		case WDIOC_KEEPALIVE:
 			return(ping_watchdog(harddog_out_fd));
 	}

+ 5 - 5
arch/um/drivers/hostaudio_kern.c

@@ -67,8 +67,8 @@ MODULE_PARM_DESC(mixer, MIXER_HELP);
 
 /* /dev/dsp file operations */
 
-static ssize_t hostaudio_read(struct file *file, char *buffer, size_t count, 
-			      loff_t *ppos)
+static ssize_t hostaudio_read(struct file *file, char __user *buffer,
+			      size_t count, loff_t *ppos)
 {
         struct hostaudio_state *state = file->private_data;
 	void *kbuf;
@@ -94,7 +94,7 @@ static ssize_t hostaudio_read(struct file *file, char *buffer, size_t count,
 	return(err);
 }
 
-static ssize_t hostaudio_write(struct file *file, const char *buffer, 
+static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
 			       size_t count, loff_t *ppos)
 {
         struct hostaudio_state *state = file->private_data;
@@ -152,7 +152,7 @@ static int hostaudio_ioctl(struct inode *inode, struct file *file,
 	case SNDCTL_DSP_CHANNELS:
 	case SNDCTL_DSP_SUBDIVIDE:
 	case SNDCTL_DSP_SETFRAGMENT:
-		if(get_user(data, (int *) arg))
+		if(get_user(data, (int __user *) arg))
 			return(-EFAULT);
 		break;
 	default:
@@ -168,7 +168,7 @@ static int hostaudio_ioctl(struct inode *inode, struct file *file,
 	case SNDCTL_DSP_CHANNELS:
 	case SNDCTL_DSP_SUBDIVIDE:
 	case SNDCTL_DSP_SETFRAGMENT:
-		if(put_user(data, (int *) arg))
+		if(put_user(data, (int __user *) arg))
 			return(-EFAULT);
 		break;
 	default:

+ 1 - 12
arch/um/drivers/mcast_kern.c

@@ -124,18 +124,7 @@ static struct transport mcast_transport = {
 static int register_mcast(void)
 {
 	register_transport(&mcast_transport);
-	return(1);
+	return 0;
 }
 
 __initcall(register_mcast);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

+ 139 - 1
arch/um/drivers/mconsole_kern.c

@@ -20,6 +20,8 @@
 #include "linux/namei.h"
 #include "linux/proc_fs.h"
 #include "linux/syscalls.h"
+#include "linux/list.h"
+#include "linux/mm.h"
 #include "linux/console.h"
 #include "asm/irq.h"
 #include "asm/uaccess.h"
@@ -347,6 +349,142 @@ static struct mc_device *mconsole_find_dev(char *name)
 	return(NULL);
 }
 
+#define UNPLUGGED_PER_PAGE \
+	((PAGE_SIZE - sizeof(struct list_head)) / sizeof(unsigned long))
+
+struct unplugged_pages {
+	struct list_head list;
+	void *pages[UNPLUGGED_PER_PAGE];
+};
+
+static unsigned long long unplugged_pages_count = 0;
+static struct list_head unplugged_pages = LIST_HEAD_INIT(unplugged_pages);
+static int unplug_index = UNPLUGGED_PER_PAGE;
+
+static int mem_config(char *str)
+{
+	unsigned long long diff;
+	int err = -EINVAL, i, add;
+	char *ret;
+
+	if(str[0] != '=')
+		goto out;
+
+	str++;
+	if(str[0] == '-')
+		add = 0;
+	else if(str[0] == '+'){
+		add = 1;
+	}
+	else goto out;
+
+	str++;
+	diff = memparse(str, &ret);
+	if(*ret != '\0')
+		goto out;
+
+	diff /= PAGE_SIZE;
+
+	for(i = 0; i < diff; i++){
+		struct unplugged_pages *unplugged;
+		void *addr;
+
+		if(add){
+			if(list_empty(&unplugged_pages))
+				break;
+
+			unplugged = list_entry(unplugged_pages.next,
+					       struct unplugged_pages, list);
+			if(unplug_index > 0)
+				addr = unplugged->pages[--unplug_index];
+			else {
+				list_del(&unplugged->list);
+				addr = unplugged;
+				unplug_index = UNPLUGGED_PER_PAGE;
+			}
+
+			free_page((unsigned long) addr);
+			unplugged_pages_count--;
+		}
+		else {
+			struct page *page;
+
+			page = alloc_page(GFP_ATOMIC);
+			if(page == NULL)
+				break;
+
+			unplugged = page_address(page);
+			if(unplug_index == UNPLUGGED_PER_PAGE){
+				INIT_LIST_HEAD(&unplugged->list);
+				list_add(&unplugged->list, &unplugged_pages);
+				unplug_index = 0;
+			}
+			else {
+				struct list_head *entry = unplugged_pages.next;
+				addr = unplugged;
+
+				unplugged = list_entry(entry,
+						       struct unplugged_pages,
+						       list);
+				unplugged->pages[unplug_index++] = addr;
+				err = os_drop_memory(addr, PAGE_SIZE);
+				if(err)
+					printk("Failed to release memory - "
+					       "errno = %d\n", err);
+			}
+
+			unplugged_pages_count++;
+		}
+	}
+
+	err = 0;
+out:
+	return err;
+}
+
+static int mem_get_config(char *name, char *str, int size, char **error_out)
+{
+	char buf[sizeof("18446744073709551615")];
+	int len = 0;
+
+	sprintf(buf, "%ld", uml_physmem);
+	CONFIG_CHUNK(str, size, len, buf, 1);
+
+	return len;
+}
+
+static int mem_id(char **str, int *start_out, int *end_out)
+{
+	*start_out = 0;
+	*end_out = 0;
+
+	return 0;
+}
+
+static int mem_remove(int n)
+{
+	return -EBUSY;
+}
+
+static struct mc_device mem_mc = {
+	.name		= "mem",
+	.config		= mem_config,
+	.get_config	= mem_get_config,
+	.id		= mem_id,
+	.remove		= mem_remove,
+};
+
+static int mem_mc_init(void)
+{
+	if(can_drop_memory())
+		mconsole_register_dev(&mem_mc);
+	else printk("Can't release memory to the host - memory hotplug won't "
+		    "be supported\n");
+	return 0;
+}
+
+__initcall(mem_mc_init);
+
 #define CONFIG_BUF_SIZE 64
 
 static void mconsole_get_config(int (*get_config)(char *, char *, int,
@@ -478,7 +616,7 @@ static void console_write(struct console *console, const char *string,
 		return;
 
 	while(1){
-		n = min(len, ARRAY_SIZE(console_buf) - console_index);
+		n = min((size_t)len, ARRAY_SIZE(console_buf) - console_index);
 		strncpy(&console_buf[console_index], string, n);
 		console_index += n;
 		string += n;

+ 1 - 12
arch/um/drivers/pcap_kern.c

@@ -106,18 +106,7 @@ static struct transport pcap_transport = {
 static int register_pcap(void)
 {
 	register_transport(&pcap_transport);
-	return(1);
+	return 0;
 }
 
 __initcall(register_pcap);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

+ 1 - 12
arch/um/drivers/slip_kern.c

@@ -93,18 +93,7 @@ static struct transport slip_transport = {
 static int register_slip(void)
 {
 	register_transport(&slip_transport);
-	return(1);
+	return 0;
 }
 
 __initcall(register_slip);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

+ 2 - 13
arch/um/drivers/slirp_kern.c

@@ -77,7 +77,7 @@ static int slirp_setup(char *str, char **mac_out, void *data)
 	int i=0;
 
 	*init = ((struct slirp_init)
-		{ argw :		{ { "slirp", NULL  } } });
+		{ .argw = { { "slirp", NULL  } } });
 
 	str = split_if_spec(str, mac_out, NULL);
 
@@ -116,18 +116,7 @@ static struct transport slirp_transport = {
 static int register_slirp(void)
 {
 	register_transport(&slirp_transport);
-	return(1);
+	return 0;
 }
 
 __initcall(register_slirp);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

+ 1 - 1
arch/um/drivers/ubd_kern.c

@@ -891,7 +891,7 @@ int ubd_driver_init(void){
 			     SA_INTERRUPT, "ubd", ubd_dev);
 	if(err != 0)
 		printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err);
-	return(err);
+	return 0;
 }
 
 device_initcall(ubd_driver_init);

+ 5 - 1
arch/um/include/kern_util.h

@@ -116,7 +116,11 @@ extern void *get_current(void);
 extern struct task_struct *get_task(int pid, int require);
 extern void machine_halt(void);
 extern int is_syscall(unsigned long addr);
-extern void arch_switch(void);
+
+extern void arch_switch_to_tt(struct task_struct *from, struct task_struct *to);
+
+extern void arch_switch_to_skas(struct task_struct *from, struct task_struct *to);
+
 extern void free_irq(unsigned int, void *);
 extern int cpu(void);
 

+ 6 - 12
arch/um/include/line.h

@@ -58,23 +58,17 @@ struct line {
 };
 
 #define LINE_INIT(str, d) \
-	{ init_str :	str, \
-	  init_pri :	INIT_STATIC, \
-	  valid :	1, \
-	  throttled :	0, \
-	  lock :	SPIN_LOCK_UNLOCKED, \
-	  buffer :	NULL, \
-	  head :	NULL, \
-	  tail :	NULL, \
-	  sigio :	0, \
-	  driver :	d, \
-	  have_irq :	0 }
+	{ .init_str =	str, \
+	  .init_pri =	INIT_STATIC, \
+	  .valid =	1, \
+	  .lock =	SPIN_LOCK_UNLOCKED, \
+	  .driver =	d }
 
 struct lines {
 	int num;
 };
 
-#define LINES_INIT(n) {  num :		n }
+#define LINES_INIT(n) {  .num =	n }
 
 extern void line_close(struct tty_struct *tty, struct file * filp);
 extern int line_open(struct line *lines, struct tty_struct *tty);

+ 0 - 1
arch/um/include/mem_user.h

@@ -49,7 +49,6 @@ extern int iomem_size;
 extern unsigned long host_task_size;
 extern unsigned long task_size;
 
-extern void check_devanon(void);
 extern int init_mem_user(void);
 extern void setup_memory(void *entry);
 extern unsigned long find_iomem(char *driver, unsigned long *len_out);

+ 9 - 1
arch/um/include/os.h

@@ -13,6 +13,7 @@
 #include "kern_util.h"
 #include "skas/mm_id.h"
 #include "irq_user.h"
+#include "sysdep/tls.h"
 
 #define OS_TYPE_FILE 1 
 #define OS_TYPE_DIR 2 
@@ -172,6 +173,7 @@ extern int os_fchange_dir(int fd);
 extern void os_early_checks(void);
 extern int can_do_skas(void);
 extern void os_check_bugs(void);
+extern void check_host_supports_tls(int *supports_tls, int *tls_min);
 
 /* Make sure they are clear when running in TT mode. Required by
  * SEGV_MAYBE_FIXABLE */
@@ -205,6 +207,8 @@ extern int os_map_memory(void *virt, int fd, unsigned long long off,
 extern int os_protect_memory(void *addr, unsigned long len, 
 			     int r, int w, int x);
 extern int os_unmap_memory(void *addr, int len);
+extern int os_drop_memory(void *addr, int length);
+extern int can_drop_memory(void);
 extern void os_flush_stdout(void);
 
 /* tt.c
@@ -234,8 +238,12 @@ extern int run_helper_thread(int (*proc)(void *), void *arg,
 			     int stack_order);
 extern int helper_wait(int pid);
 
-/* umid.c */
 
+/* tls.c */
+extern int os_set_thread_area(user_desc_t *info, int pid);
+extern int os_get_thread_area(user_desc_t *info, int pid);
+
+/* umid.c */
 extern int umid_file_name(char *name, char *buf, int len);
 extern int set_umid(char *name);
 extern char *get_umid(void);

+ 3 - 2
arch/um/include/sysdep-i386/checksum.h

@@ -48,7 +48,8 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *
  */
 
 static __inline__
-unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
+unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
+					 unsigned char *dst,
 					 int len, int sum, int *err_ptr)
 {
 	if(copy_from_user(dst, src, len)){
@@ -192,7 +193,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
  */
 #define HAVE_CSUM_COPY_USER
 static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
-						     unsigned char *dst,
+						     unsigned char __user *dst,
 						     int len, int sum, int *err_ptr)
 {
 	if (access_ok(VERIFY_WRITE, dst, len)){

+ 5 - 0
arch/um/include/sysdep-i386/ptrace.h

@@ -14,7 +14,12 @@
 #define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long))
 #define MAX_REG_OFFSET (UM_FRAME_SIZE)
 
+#ifdef UML_CONFIG_PT_PROXY
 extern void update_debugregs(int seq);
+#else
+static inline void update_debugregs(int seq) {}
+#endif
+
 
 /* syscall emulation path in ptrace */
 

+ 32 - 0
arch/um/include/sysdep-i386/tls.h

@@ -0,0 +1,32 @@
+#ifndef _SYSDEP_TLS_H
+#define _SYSDEP_TLS_H
+
+# ifndef __KERNEL__
+
+/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
+ * may be named user_desc (but in 2.4 and in header matching its API was named
+ * modify_ldt_ldt_s). */
+
+typedef struct um_dup_user_desc {
+	unsigned int  entry_number;
+	unsigned int  base_addr;
+	unsigned int  limit;
+	unsigned int  seg_32bit:1;
+	unsigned int  contents:2;
+	unsigned int  read_exec_only:1;
+	unsigned int  limit_in_pages:1;
+	unsigned int  seg_not_present:1;
+	unsigned int  useable:1;
+} user_desc_t;
+
+# else /* __KERNEL__ */
+
+#  include <asm/ldt.h>
+typedef struct user_desc user_desc_t;
+
+# endif /* __KERNEL__ */
+
+#define GDT_ENTRY_TLS_MIN_I386 6
+#define GDT_ENTRY_TLS_MIN_X86_64 12
+
+#endif /* _SYSDEP_TLS_H */

+ 29 - 0
arch/um/include/sysdep-x86_64/tls.h

@@ -0,0 +1,29 @@
+#ifndef _SYSDEP_TLS_H
+#define _SYSDEP_TLS_H
+
+# ifndef __KERNEL__
+
+/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
+ * may be named user_desc (but in 2.4 and in header matching its API was named
+ * modify_ldt_ldt_s). */
+
+typedef struct um_dup_user_desc {
+	unsigned int  entry_number;
+	unsigned int  base_addr;
+	unsigned int  limit;
+	unsigned int  seg_32bit:1;
+	unsigned int  contents:2;
+	unsigned int  read_exec_only:1;
+	unsigned int  limit_in_pages:1;
+	unsigned int  seg_not_present:1;
+	unsigned int  useable:1;
+	unsigned int  lm:1;
+} user_desc_t;
+
+# else /* __KERNEL__ */
+
+#  include <asm/ldt.h>
+typedef struct user_desc user_desc_t;
+
+# endif /* __KERNEL__ */
+#endif /* _SYSDEP_TLS_H */

+ 4 - 1
arch/um/include/user_util.h

@@ -8,6 +8,9 @@
 
 #include "sysdep/ptrace.h"
 
+/* Copied from kernel.h */
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
 #define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR))
 
 extern int mode_tt;
@@ -31,7 +34,7 @@ extern unsigned long uml_physmem;
 extern unsigned long uml_reserved;
 extern unsigned long end_vm;
 extern unsigned long start_vm;
-extern unsigned long highmem;
+extern unsigned long long highmem;
 
 extern char host_info[];
 

+ 3 - 13
arch/um/kernel/exec_kern.c

@@ -22,6 +22,7 @@
 
 void flush_thread(void)
 {
+	arch_flush_thread(&current->thread.arch);
 	CHOOSE_MODE(flush_thread_tt(), flush_thread_skas());
 }
 
@@ -58,14 +59,14 @@ long um_execve(char *file, char __user *__user *argv, char __user *__user *env)
 	return(err);
 }
 
-long sys_execve(char *file, char __user *__user *argv,
+long sys_execve(char __user *file, char __user *__user *argv,
 		char __user *__user *env)
 {
 	long error;
 	char *filename;
 
 	lock_kernel();
-	filename = getname((char __user *) file);
+	filename = getname(file);
 	error = PTR_ERR(filename);
 	if (IS_ERR(filename)) goto out;
 	error = execve1(filename, argv, env);
@@ -74,14 +75,3 @@ long sys_execve(char *file, char __user *__user *argv,
 	unlock_kernel();
 	return(error);
 }
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

+ 1 - 1
arch/um/kernel/mem.c

@@ -30,7 +30,7 @@ extern char __binary_start;
 unsigned long *empty_zero_page = NULL;
 unsigned long *empty_bad_page = NULL;
 pgd_t swapper_pg_dir[PTRS_PER_PGD];
-unsigned long highmem;
+unsigned long long highmem;
 int kmalloc_ok = 0;
 
 static unsigned long brk_end;

+ 19 - 7
arch/um/kernel/process_kern.c

@@ -156,9 +156,25 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
 		unsigned long stack_top, struct task_struct * p, 
 		struct pt_regs *regs)
 {
+	int ret;
+
 	p->thread = (struct thread_struct) INIT_THREAD;
-	return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr, 
-				clone_flags, sp, stack_top, p, regs));
+	ret = CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
+				clone_flags, sp, stack_top, p, regs);
+
+	if (ret || !current->thread.forking)
+		goto out;
+
+	clear_flushed_tls(p);
+
+	/*
+	 * Set a new TLS for the child thread?
+	 */
+	if (clone_flags & CLONE_SETTLS)
+		ret = arch_copy_tls(p);
+
+out:
+	return ret;
 }
 
 void initial_thread_cb(void (*proc)(void *), void *arg)
@@ -185,10 +201,6 @@ void default_idle(void)
 {
 	CHOOSE_MODE(uml_idle_timer(), (void) 0);
 
-	atomic_inc(&init_mm.mm_count);
-	current->mm = &init_mm;
-	current->active_mm = &init_mm;
-
 	while(1){
 		/* endless idle loop with no priority at all */
 
@@ -407,7 +419,7 @@ static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int
 	return strlen(buf);
 }
 
-static int proc_write_sysemu(struct file *file,const char *buf, unsigned long count,void *data)
+static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
 {
 	char tmp[2];
 

+ 25 - 19
arch/um/kernel/ptrace.c

@@ -46,6 +46,7 @@ extern int poke_user(struct task_struct * child, long addr, long data);
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
 	int i, ret;
+	unsigned long __user *p = (void __user *)(unsigned long)data;
 
 	switch (request) {
 		/* when I and D space are separate, these will need to be fixed. */
@@ -58,7 +59,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
 		if (copied != sizeof(tmp))
 			break;
-		ret = put_user(tmp, (unsigned long __user *) data);
+		ret = put_user(tmp, p);
 		break;
 	}
 
@@ -136,15 +137,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
 #ifdef PTRACE_GETREGS
 	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
-	  	if (!access_ok(VERIFY_WRITE, (unsigned long *)data, 
-			       MAX_REG_OFFSET)) {
+		if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) {
 			ret = -EIO;
 			break;
 		}
 		for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
-			__put_user(getreg(child, i),
-				   (unsigned long __user *) data);
-			data += sizeof(long);
+			__put_user(getreg(child, i), p);
+			p++;
 		}
 		ret = 0;
 		break;
@@ -153,15 +152,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #ifdef PTRACE_SETREGS
 	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
 		unsigned long tmp = 0;
-	  	if (!access_ok(VERIFY_READ, (unsigned *)data, 
-			       MAX_REG_OFFSET)) {
+		if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) {
 			ret = -EIO;
 			break;
 		}
 		for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
-			__get_user(tmp, (unsigned long __user *) data);
+			__get_user(tmp, p);
 			putreg(child, i, tmp);
-			data += sizeof(long);
+			p++;
 		}
 		ret = 0;
 		break;
@@ -187,14 +185,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 		ret = set_fpxregs(data, child);
 		break;
 #endif
+	case PTRACE_GET_THREAD_AREA:
+		ret = ptrace_get_thread_area(child, addr,
+					     (struct user_desc __user *) data);
+		break;
+
+	case PTRACE_SET_THREAD_AREA:
+		ret = ptrace_set_thread_area(child, addr,
+					     (struct user_desc __user *) data);
+		break;
+
 	case PTRACE_FAULTINFO: {
-                /* Take the info from thread->arch->faultinfo,
-                 * but transfer max. sizeof(struct ptrace_faultinfo).
-                 * On i386, ptrace_faultinfo is smaller!
-                 */
-                ret = copy_to_user((unsigned long __user *) data,
-                                   &child->thread.arch.faultinfo,
-                                   sizeof(struct ptrace_faultinfo));
+		/* Take the info from thread->arch->faultinfo,
+		 * but transfer max. sizeof(struct ptrace_faultinfo).
+		 * On i386, ptrace_faultinfo is smaller!
+		 */
+		ret = copy_to_user(p, &child->thread.arch.faultinfo,
+				   sizeof(struct ptrace_faultinfo));
 		if(ret)
 			break;
 		break;
@@ -204,8 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 	case PTRACE_LDT: {
 		struct ptrace_ldt ldt;
 
-		if(copy_from_user(&ldt, (unsigned long __user *) data,
-				  sizeof(ldt))){
+		if(copy_from_user(&ldt, p, sizeof(ldt))){
 			ret = -EIO;
 			break;
 		}

+ 11 - 0
arch/um/kernel/skas/process_kern.c

@@ -35,6 +35,8 @@ void switch_to_skas(void *prev, void *next)
 	switch_threads(&from->thread.mode.skas.switch_buf,
 		       to->thread.mode.skas.switch_buf);
 
+	arch_switch_to_skas(current->thread.prev_sched, current);
+
 	if(current->pid == 0)
 		switch_timers(1);
 }
@@ -89,10 +91,17 @@ void fork_handler(int sig)
 		panic("blech");
 
 	schedule_tail(current->thread.prev_sched);
+
+	/* XXX: if interrupt_end() calls schedule, this call to
+	 * arch_switch_to_skas isn't needed. We could want to apply this to
+	 * improve performance. -bb */
+	arch_switch_to_skas(current->thread.prev_sched, current);
+
 	current->thread.prev_sched = NULL;
 
 /* Handle any immediate reschedules or signals */
 	interrupt_end();
+
 	userspace(&current->thread.regs.regs);
 }
 
@@ -109,6 +118,8 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
 		if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
 
 		handler = fork_handler;
+
+		arch_copy_thread(&current->thread.arch, &p->thread.arch);
 	}
 	else {
 		init_thread_registers(&p->thread.regs.regs);

+ 2 - 2
arch/um/kernel/syscall_kern.c

@@ -104,7 +104,7 @@ long sys_pipe(unsigned long __user * fildes)
 }
 
 
-long sys_uname(struct old_utsname * name)
+long sys_uname(struct old_utsname __user * name)
 {
 	long err;
 	if (!name)
@@ -115,7 +115,7 @@ long sys_uname(struct old_utsname * name)
 	return err?-EFAULT:0;
 }
 
-long sys_olduname(struct oldold_utsname * name)
+long sys_olduname(struct oldold_utsname __user * name)
 {
 	long error;
 

+ 4 - 4
arch/um/kernel/trap_kern.c

@@ -198,7 +198,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
 		si.si_signo = SIGBUS;
 		si.si_errno = 0;
 		si.si_code = BUS_ADRERR;
-		si.si_addr = (void *)address;
+		si.si_addr = (void __user *)address;
                 current->thread.arch.faultinfo = fi;
 		force_sig_info(SIGBUS, &si, current);
 	} else if (err == -ENOMEM) {
@@ -207,7 +207,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
 	} else {
 		BUG_ON(err != -EFAULT);
 		si.si_signo = SIGSEGV;
-		si.si_addr = (void *) address;
+		si.si_addr = (void __user *) address;
                 current->thread.arch.faultinfo = fi;
 		force_sig_info(SIGSEGV, &si, current);
 	}
@@ -220,8 +220,8 @@ void bad_segv(struct faultinfo fi, unsigned long ip)
 
 	si.si_signo = SIGSEGV;
 	si.si_code = SEGV_ACCERR;
-        si.si_addr = (void *) FAULT_ADDRESS(fi);
-        current->thread.arch.faultinfo = fi;
+	si.si_addr = (void __user *) FAULT_ADDRESS(fi);
+	current->thread.arch.faultinfo = fi;
 	force_sig_info(SIGSEGV, &si, current);
 }
 

+ 8 - 2
arch/um/kernel/tt/process_kern.c

@@ -51,6 +51,13 @@ void switch_to_tt(void *prev, void *next)
 
 	c = 0;
 
+	/* Notice that here we "up" the semaphore on which "to" is waiting, and
+	 * below (the read) we wait on this semaphore (which is implemented by
+	 * switch_pipe) and go sleeping. Thus, after that, we have resumed in
+	 * "to", and can't use any more the value of "from" (which is outdated),
+	 * nor the value in "to" (since it was the task which stole us the CPU,
+	 * which we don't care about). */
+
 	err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
 	if(err != sizeof(c))
 		panic("write of switch_pipe failed, err = %d", -err);
@@ -77,7 +84,7 @@ void switch_to_tt(void *prev, void *next)
 	change_sig(SIGALRM, alrm);
 	change_sig(SIGPROF, prof);
 
-	arch_switch();
+	arch_switch_to_tt(prev_sched, current);
 
 	flush_tlb_all();
 	local_irq_restore(flags);
@@ -141,7 +148,6 @@ static void new_thread_handler(int sig)
 	set_cmdline("(kernel thread)");
 
 	change_sig(SIGUSR1, 1);
-	change_sig(SIGVTALRM, 1);
 	change_sig(SIGPROF, 1);
 	local_irq_enable();
 	if(!run_kernel_thread(fn, arg, &current->thread.exec_buf))

+ 2 - 5
arch/um/os-Linux/Makefile

@@ -4,7 +4,7 @@
 #
 
 obj-y = aio.o elf_aux.o file.o helper.o irq.o main.o mem.o process.o sigio.o \
-	signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o \
+	signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o tls.o \
 	user_syms.o util.o drivers/ sys-$(SUBARCH)/
 
 obj-$(CONFIG_MODE_SKAS) += skas/
@@ -12,12 +12,9 @@ obj-$(CONFIG_TTY_LOG) += tty_log.o
 user-objs-$(CONFIG_TTY_LOG) += tty_log.o
 
 USER_OBJS := $(user-objs-y) aio.o elf_aux.o file.o helper.o irq.o main.o mem.o \
-	process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o \
+	process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o tls.o \
 	uaccess.o umid.o util.o
 
-elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
-CFLAGS_elf_aux.o += -I$(objtree)/arch/um
-
 CFLAGS_user_syms.o += -DSUBARCH_$(SUBARCH)
 
 HAVE_AIO_ABI := $(shell [ -r /usr/include/linux/aio_abi.h ] && \

+ 1 - 12
arch/um/os-Linux/drivers/ethertap_kern.c

@@ -102,18 +102,7 @@ static struct transport ethertap_transport = {
 static int register_ethertap(void)
 {
 	register_transport(&ethertap_transport);
-	return(1);
+	return 0;
 }
 
 __initcall(register_ethertap);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

Some files were not shown because too many files changed in this diff