Răsfoiți Sursa

Merge branch 'master'

Kumar Gala 19 ani în urmă
părinte
comite
1a02e59a29
100 a modificat fișierele cu 1301 adăugiri și 389 ștergeri
  1. 2 4
      CREDITS
  2. 24 3
      Documentation/cpu-hotplug.txt
  3. 14 27
      Documentation/cpusets.txt
  4. 5 1
      Documentation/dvb/bt8xx.txt
  5. 18 0
      Documentation/feature-removal-schedule.txt
  6. 6 0
      Documentation/filesystems/ntfs.txt
  7. 21 9
      Documentation/filesystems/tmpfs.txt
  8. 10 6
      Documentation/filesystems/v9fs.txt
  9. 234 0
      Documentation/fujitsu/frv/kernel-ABI.txt
  10. 4 0
      Documentation/hwmon/w83627hf
  11. 26 0
      Documentation/kernel-parameters.txt
  12. 42 39
      Documentation/kprobes.txt
  13. 5 1
      Documentation/mips/AU1xxx_IDE.README
  14. 23 0
      Documentation/scsi/ChangeLog.megaraid_sas
  15. 10 0
      Documentation/sysctl/kernel.txt
  16. 2 2
      Documentation/video4linux/CARDLIST.saa7134
  17. 82 36
      Documentation/vm/page_migration
  18. 4 0
      Documentation/x86_64/boot-options.txt
  19. 19 4
      MAINTAINERS
  20. 5 6
      Makefile
  21. 6 1
      arch/alpha/kernel/irq.c
  22. 3 1
      arch/arm/Kconfig
  23. 21 0
      arch/arm/common/locomo.c
  24. 12 4
      arch/arm/common/rtctime.c
  25. 3 1
      arch/arm/kernel/asm-offsets.c
  26. 1 1
      arch/arm/kernel/calls.S
  27. 2 0
      arch/arm/kernel/compat.c
  28. 13 0
      arch/arm/kernel/compat.h
  29. 1 1
      arch/arm/kernel/entry-armv.S
  30. 2 1
      arch/arm/kernel/process.c
  31. 4 10
      arch/arm/kernel/ptrace.c
  32. 7 3
      arch/arm/kernel/setup.c
  33. 0 1
      arch/arm/kernel/smp.c
  34. 30 0
      arch/arm/kernel/sys_oabi-compat.c
  35. 6 4
      arch/arm/kernel/time.c
  36. 8 0
      arch/arm/kernel/traps.c
  37. 2 2
      arch/arm/lib/muldi3.S
  38. 3 1
      arch/arm/mach-at91rm9200/devices.c
  39. 29 2
      arch/arm/mach-at91rm9200/gpio.c
  40. 15 6
      arch/arm/mach-integrator/platsmp.c
  41. 0 1
      arch/arm/mach-iop3xx/iop321-setup.c
  42. 0 1
      arch/arm/mach-iop3xx/iop331-setup.c
  43. 1 3
      arch/arm/mach-ixp4xx/Kconfig
  44. 13 7
      arch/arm/mach-ixp4xx/common.c
  45. 3 0
      arch/arm/mach-ixp4xx/nas100d-power.c
  46. 3 0
      arch/arm/mach-ixp4xx/nas100d-setup.c
  47. 3 0
      arch/arm/mach-ixp4xx/nslu2-power.c
  48. 11 2
      arch/arm/mach-ixp4xx/nslu2-setup.c
  49. 15 6
      arch/arm/mach-realview/platsmp.c
  50. 12 0
      arch/arm/mach-s3c2410/devs.c
  51. 27 2
      arch/arm/mach-s3c2410/mach-h1940.c
  52. 31 0
      arch/arm/mach-s3c2410/s3c2400.h
  53. 48 45
      arch/arm/mach-versatile/pci.c
  54. 1 1
      arch/arm/mm/abort-ev6.S
  55. 4 3
      arch/arm/mm/cache-v6.S
  56. 4 2
      arch/arm/mm/flush.c
  57. 1 0
      arch/arm/mm/tlb-v6.S
  58. 0 1
      arch/arm/plat-omap/pm.c
  59. 33 2
      arch/arm/tools/mach-types
  60. 4 0
      arch/frv/Kconfig
  61. 1 1
      arch/frv/Makefile
  62. 73 4
      arch/frv/kernel/break.S
  63. 34 5
      arch/frv/kernel/entry-table.S
  64. 79 12
      arch/frv/kernel/entry.S
  65. 3 0
      arch/frv/kernel/head.S
  66. 3 38
      arch/frv/kernel/irq.c
  67. 0 9
      arch/frv/mm/kmap.c
  68. 4 0
      arch/h8300/Kconfig
  69. 1 1
      arch/h8300/Kconfig.cpu
  70. 1 1
      arch/h8300/Kconfig.debug
  71. 1 1
      arch/h8300/defconfig
  72. 3 0
      arch/h8300/kernel/process.c
  73. 2 1
      arch/i386/Kconfig
  74. 3 0
      arch/i386/boot/.gitignore
  75. 1 0
      arch/i386/boot/tools/.gitignore
  76. 1 0
      arch/i386/kernel/.gitignore
  77. 2 2
      arch/i386/kernel/Makefile
  78. 1 1
      arch/i386/kernel/acpi/Makefile
  79. 0 3
      arch/i386/kernel/acpi/boot.c
  80. 8 0
      arch/i386/kernel/acpi/earlyquirk.c
  81. 4 2
      arch/i386/kernel/apic.c
  82. 30 6
      arch/i386/kernel/cpu/common.c
  83. 1 0
      arch/i386/kernel/cpu/transmeta.c
  84. 9 5
      arch/i386/kernel/efi.c
  85. 4 2
      arch/i386/kernel/head.S
  86. 0 2
      arch/i386/kernel/i386_ksyms.c
  87. 22 3
      arch/i386/kernel/io_apic.c
  88. 14 2
      arch/i386/kernel/kprobes.c
  89. 7 7
      arch/i386/kernel/machine_kexec.c
  90. 9 6
      arch/i386/kernel/microcode.c
  91. 9 3
      arch/i386/kernel/mpparse.c
  92. 2 2
      arch/i386/kernel/nmi.c
  93. 4 0
      arch/i386/kernel/setup.c
  94. 0 10
      arch/i386/kernel/smpboot.c
  95. 1 1
      arch/i386/kernel/syscall_table.S
  96. 2 2
      arch/i386/kernel/time.c
  97. 5 0
      arch/i386/kernel/timers/timer_tsc.c
  98. 3 3
      arch/i386/kernel/topology.c
  99. 15 0
      arch/i386/kernel/vsyscall-sysenter.S
  100. 1 1
      arch/i386/mach-default/Makefile

+ 2 - 4
CREDITS

@@ -3643,11 +3643,9 @@ S: Cambridge. CB1 7EG
 S: England
 S: England
 
 
 N: Chris Wright
 N: Chris Wright
-E: chrisw@osdl.org
+E: chrisw@sous-sol.org
 D: hacking on LSM framework and security modules.
 D: hacking on LSM framework and security modules.
-S: c/o OSDL
-S: 12725 SW Millikan Way, Suite 400
-S: Beaverton, OR 97005
+S: Portland, OR
 S: USA
 S: USA
 
 
 N: Michal Wronski
 N: Michal Wronski

+ 24 - 3
Documentation/cpu-hotplug.txt

@@ -11,6 +11,8 @@
 			Joel Schopp <jschopp@austin.ibm.com>
 			Joel Schopp <jschopp@austin.ibm.com>
 		ia64/x86_64:
 		ia64/x86_64:
 			Ashok Raj <ashok.raj@intel.com>
 			Ashok Raj <ashok.raj@intel.com>
+		s390:
+			Heiko Carstens <heiko.carstens@de.ibm.com>
 
 
 Authors: Ashok Raj <ashok.raj@intel.com>
 Authors: Ashok Raj <ashok.raj@intel.com>
 Lots of feedback: Nathan Lynch <nathanl@austin.ibm.com>,
 Lots of feedback: Nathan Lynch <nathanl@austin.ibm.com>,
@@ -44,9 +46,28 @@ maxcpus=n    Restrict boot time cpus to n. Say if you have 4 cpus, using
              maxcpus=2 will only boot 2. You can choose to bring the
              maxcpus=2 will only boot 2. You can choose to bring the
              other cpus later online, read FAQ's for more info.
              other cpus later online, read FAQ's for more info.
 
 
-additional_cpus=n	[x86_64 only] use this to limit hotpluggable cpus.
-                        This option sets
-			cpu_possible_map = cpu_present_map + additional_cpus
+additional_cpus*=n	Use this to limit hotpluggable cpus. This option sets
+  			cpu_possible_map = cpu_present_map + additional_cpus
+
+(*) Option valid only for following architectures
+- x86_64, ia64, s390
+
+ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT
+to determine the number of potentially hot-pluggable cpus. The implementation
+should only rely on this to count the #of cpus, but *MUST* not rely on the
+apicid values in those tables for disabled apics. In the event BIOS doesnt
+mark such hot-pluggable cpus as disabled entries, one could use this
+parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map.
+
+s390 uses the number of cpus it detects at IPL time to also the number of bits
+in cpu_possible_map. If it is desired to add additional cpus at a later time
+the number should be specified using this option or the possible_cpus option.
+
+possible_cpus=n		[s390 only] use this to set hotpluggable cpus.
+			This option sets possible_cpus bits in
+			cpu_possible_map. Thus keeping the numbers of bits set
+			constant even if the machine gets rebooted.
+			This option overrides additional_cpus.
 
 
 CPU maps and such
 CPU maps and such
 -----------------
 -----------------

+ 14 - 27
Documentation/cpusets.txt

@@ -4,8 +4,9 @@
 Copyright (C) 2004 BULL SA.
 Copyright (C) 2004 BULL SA.
 Written by Simon.Derr@bull.net
 Written by Simon.Derr@bull.net
 
 
-Portions Copyright (c) 2004 Silicon Graphics, Inc.
+Portions Copyright (c) 2004-2006 Silicon Graphics, Inc.
 Modified by Paul Jackson <pj@sgi.com>
 Modified by Paul Jackson <pj@sgi.com>
+Modified by Christoph Lameter <clameter@sgi.com>
 
 
 CONTENTS:
 CONTENTS:
 =========
 =========
@@ -90,7 +91,8 @@ This can be especially valuable on:
 
 
 These subsets, or "soft partitions" must be able to be dynamically
 These subsets, or "soft partitions" must be able to be dynamically
 adjusted, as the job mix changes, without impacting other concurrently
 adjusted, as the job mix changes, without impacting other concurrently
-executing jobs.
+executing jobs. The location of the running jobs pages may also be moved
+when the memory locations are changed.
 
 
 The kernel cpuset patch provides the minimum essential kernel
 The kernel cpuset patch provides the minimum essential kernel
 mechanisms required to efficiently implement such subsets.  It
 mechanisms required to efficiently implement such subsets.  It
@@ -102,8 +104,8 @@ memory allocator code.
 1.3 How are cpusets implemented ?
 1.3 How are cpusets implemented ?
 ---------------------------------
 ---------------------------------
 
 
-Cpusets provide a Linux kernel (2.6.7 and above) mechanism to constrain
-which CPUs and Memory Nodes are used by a process or set of processes.
+Cpusets provide a Linux kernel mechanism to constrain which CPUs and
+Memory Nodes are used by a process or set of processes.
 
 
 The Linux kernel already has a pair of mechanisms to specify on which
 The Linux kernel already has a pair of mechanisms to specify on which
 CPUs a task may be scheduled (sched_setaffinity) and on which Memory
 CPUs a task may be scheduled (sched_setaffinity) and on which Memory
@@ -371,22 +373,17 @@ cpusets memory placement policy 'mems' subsequently changes.
 If the cpuset flag file 'memory_migrate' is set true, then when
 If the cpuset flag file 'memory_migrate' is set true, then when
 tasks are attached to that cpuset, any pages that task had
 tasks are attached to that cpuset, any pages that task had
 allocated to it on nodes in its previous cpuset are migrated
 allocated to it on nodes in its previous cpuset are migrated
-to the tasks new cpuset.  Depending on the implementation,
-this migration may either be done by swapping the page out,
-so that the next time the page is referenced, it will be paged
-into the tasks new cpuset, usually on the node where it was
-referenced, or this migration may be done by directly copying
-the pages from the tasks previous cpuset to the new cpuset,
-where possible to the same node, relative to the new cpuset,
-as the node that held the page, relative to the old cpuset.
+to the tasks new cpuset. The relative placement of the page within
+the cpuset is preserved during these migration operations if possible.
+For example if the page was on the second valid node of the prior cpuset
+then the page will be placed on the second valid node of the new cpuset.
+
 Also if 'memory_migrate' is set true, then if that cpusets
 Also if 'memory_migrate' is set true, then if that cpusets
 'mems' file is modified, pages allocated to tasks in that
 'mems' file is modified, pages allocated to tasks in that
 cpuset, that were on nodes in the previous setting of 'mems',
 cpuset, that were on nodes in the previous setting of 'mems',
-will be moved to nodes in the new setting of 'mems.'  Again,
-depending on the implementation, this might be done by swapping,
-or by direct copying.  In either case, pages that were not in
-the tasks prior cpuset, or in the cpusets prior 'mems' setting,
-will not be moved.
+will be moved to nodes in the new setting of 'mems.'
+Pages that were not in the tasks prior cpuset, or in the cpusets
+prior 'mems' setting, will not be moved.
 
 
 There is an exception to the above.  If hotplug functionality is used
 There is an exception to the above.  If hotplug functionality is used
 to remove all the CPUs that are currently assigned to a cpuset,
 to remove all the CPUs that are currently assigned to a cpuset,
@@ -434,16 +431,6 @@ and then start a subshell 'sh' in that cpuset:
   # The next line should display '/Charlie'
   # The next line should display '/Charlie'
   cat /proc/self/cpuset
   cat /proc/self/cpuset
 
 
-In the case that a change of cpuset includes wanting to move already
-allocated memory pages, consider further the work of IWAMOTO
-Toshihiro <iwamoto@valinux.co.jp> for page remapping and memory
-hotremoval, which can be found at:
-
-  http://people.valinux.co.jp/~iwamoto/mh.html
-
-The integration of cpusets with such memory migration is not yet
-available.
-
 In the future, a C library interface to cpusets will likely be
 In the future, a C library interface to cpusets will likely be
 available.  For now, the only way to query or modify cpusets is
 available.  For now, the only way to query or modify cpusets is
 via the cpuset file system, using the various cd, mkdir, echo, cat,
 via the cpuset file system, using the various cd, mkdir, echo, cat,

+ 5 - 1
Documentation/dvb/bt8xx.txt

@@ -111,4 +111,8 @@ source:  linux/Documentation/video4linux/CARDLIST.bttv
 If you have problems with this please do ask on the mailing list.
 If you have problems with this please do ask on the mailing list.
 
 
 --
 --
-Authors: Richard Walker, Jamie Honan, Michael Hunold, Manu Abraham
+Authors: Richard Walker,
+	 Jamie Honan,
+	 Michael Hunold,
+	 Manu Abraham,
+	 Michael Krufky

+ 18 - 0
Documentation/feature-removal-schedule.txt

@@ -171,3 +171,21 @@ Why:	The ISA interface is faster and should be always available. The I2C
 	probing is also known to cause trouble in at least one case (see
 	probing is also known to cause trouble in at least one case (see
 	bug #5889.)
 	bug #5889.)
 Who:	Jean Delvare <khali@linux-fr.org>
 Who:	Jean Delvare <khali@linux-fr.org>
+
+---------------------------
+
+What:	mount/umount uevents
+When:	February 2007
+Why:	These events are not correct, and do not properly let userspace know
+	when a file system has been mounted or unmounted.  Userspace should
+	poll the /proc/mounts file instead to detect this properly.
+Who:	Greg Kroah-Hartman <gregkh@suse.de>
+
+---------------------------
+
+What:	Support for NEC DDB5074 and DDB5476 evaluation boards.
+When:	June 2006
+Why:	Board specific code doesn't build anymore since ~2.6.0 and no
+	users have complained indicating there is no more need for these
+	boards.  This should really be considered a last call.
+Who:	Ralf Baechle <ralf@linux-mips.org>

+ 6 - 0
Documentation/filesystems/ntfs.txt

@@ -457,6 +457,12 @@ ChangeLog
 
 
 Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
 Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
 
 
+2.1.26:
+	- Implement support for sector sizes above 512 bytes (up to the maximum
+	  supported by NTFS which is 4096 bytes).
+	- Enhance support for NTFS volumes which were supported by Windows but
+	  not by Linux due to invalid attribute list attribute flags.
+	- A few minor updates and bug fixes.
 2.1.25:
 2.1.25:
 	- Write support is now extended with write(2) being able to both
 	- Write support is now extended with write(2) being able to both
 	  overwrite existing file data and to extend files.  Also, if a write
 	  overwrite existing file data and to extend files.  Also, if a write

+ 21 - 9
Documentation/filesystems/tmpfs.txt

@@ -79,15 +79,27 @@ that instance in a system with many cpus making intensive use of it.
 
 
 
 
 tmpfs has a mount option to set the NUMA memory allocation policy for
 tmpfs has a mount option to set the NUMA memory allocation policy for
-all files in that instance:
-mpol=interleave		prefers to allocate memory from each node in turn
-mpol=default		prefers to allocate memory from the local node
-mpol=bind		prefers to allocate from mpol_nodelist
-mpol=preferred		prefers to allocate from first node in mpol_nodelist
+all files in that instance (if CONFIG_NUMA is enabled) - which can be
+adjusted on the fly via 'mount -o remount ...'
 
 
-The following mount option is used in conjunction with mpol=interleave,
-mpol=bind or mpol=preferred:
-mpol_nodelist:	nodelist suitable for parsing with nodelist_parse.
+mpol=default             prefers to allocate memory from the local node
+mpol=prefer:Node         prefers to allocate memory from the given Node
+mpol=bind:NodeList       allocates memory only from nodes in NodeList
+mpol=interleave          prefers to allocate from each node in turn
+mpol=interleave:NodeList allocates from each node of NodeList in turn
+
+NodeList format is a comma-separated list of decimal numbers and ranges,
+a range being two hyphen-separated decimal numbers, the smallest and
+largest node numbers in the range.  For example, mpol=bind:0-3,5,7,9-15
+
+Note that trying to mount a tmpfs with an mpol option will fail if the
+running kernel does not support NUMA; and will fail if its nodelist
+specifies a node >= MAX_NUMNODES.  If your system relies on that tmpfs
+being mounted, but from time to time runs a kernel built without NUMA
+capability (perhaps a safe recovery kernel), or configured to support
+fewer nodes, then it is advisable to omit the mpol option from automatic
+mount options.  It can be added later, when the tmpfs is already mounted
+on MountPoint, by 'mount -o remount,mpol=Policy:NodeList MountPoint'.
 
 
 
 
 To specify the initial root directory you can use the following mount
 To specify the initial root directory you can use the following mount
@@ -109,4 +121,4 @@ RAM/SWAP in 10240 inodes and it is only accessible by root.
 Author:
 Author:
    Christoph Rohland <cr@sap.com>, 1.12.01
    Christoph Rohland <cr@sap.com>, 1.12.01
 Updated:
 Updated:
-   Hugh Dickins <hugh@veritas.com>, 13 March 2005
+   Hugh Dickins <hugh@veritas.com>, 19 February 2006

+ 10 - 6
Documentation/filesystems/v9fs.txt

@@ -57,8 +57,6 @@ OPTIONS
 
 
   port=n	port to connect to on the remote server
   port=n	port to connect to on the remote server
 
 
-  timeout=n	request timeouts (in ms) (default 60000ms)
-
   noextend	force legacy mode (no 9P2000.u semantics)
   noextend	force legacy mode (no 9P2000.u semantics)
 
 
   uid		attempt to mount as a particular uid
   uid		attempt to mount as a particular uid
@@ -74,10 +72,16 @@ OPTIONS
 RESOURCES
 RESOURCES
 =========
 =========
 
 
-The Linux version of the 9P server, along with some client-side utilities
-can be found at http://v9fs.sf.net (along with a CVS repository of the
-development branch of this module).  There are user and developer mailing
-lists here, as well as a bug-tracker.
+The Linux version of the 9P server is now maintained under the npfs project
+on sourceforge (http://sourceforge.net/projects/npfs).
+
+There are user and developer mailing lists available through the v9fs project
+on sourceforge (http://sourceforge.net/projects/v9fs).
+
+News and other information is maintained on SWiK (http://swik.net/v9fs).
+
+Bug reports may be issued through the kernel.org bugzilla 
+(http://bugzilla.kernel.org)
 
 
 For more information on the Plan 9 Operating System check out
 For more information on the Plan 9 Operating System check out
 http://plan9.bell-labs.com/plan9
 http://plan9.bell-labs.com/plan9

+ 234 - 0
Documentation/fujitsu/frv/kernel-ABI.txt

@@ -0,0 +1,234 @@
+				 =================================
+				 INTERNAL KERNEL ABI FOR FR-V ARCH
+				 =================================
+
+The internal FRV kernel ABI is not quite the same as the userspace ABI. A number of the registers
+are used for special purposed, and the ABI is not consistent between modules vs core, and MMU vs
+no-MMU.
+
+This partly stems from the fact that FRV CPUs do not have a separate supervisor stack pointer, and
+most of them do not have any scratch registers, thus requiring at least one general purpose
+register to be clobbered in such an event. Also, within the kernel core, it is possible to simply
+jump or call directly between functions using a relative offset. This cannot be extended to modules
+for the displacement is likely to be too far. Thus in modules the address of a function to call
+must be calculated in a register and then used, requiring two extra instructions.
+
+This document has the following sections:
+
+ (*) System call register ABI
+ (*) CPU operating modes
+ (*) Internal kernel-mode register ABI
+ (*) Internal debug-mode register ABI
+ (*) Virtual interrupt handling
+
+
+========================
+SYSTEM CALL REGISTER ABI
+========================
+
+When a system call is made, the following registers are effective:
+
+	REGISTERS	CALL			RETURN
+	===============	=======================	=======================
+	GR7		System call number	Preserved
+	GR8		Syscall arg #1		Return value
+	GR9-GR13	Syscall arg #2-6	Preserved
+
+
+===================
+CPU OPERATING MODES
+===================
+
+The FR-V CPU has three basic operating modes. In order of increasing capability:
+
+  (1) User mode.
+
+      Basic userspace running mode.
+
+  (2) Kernel mode.
+
+      Normal kernel mode. There are many additional control registers available that may be
+      accessed in this mode, in addition to all the stuff available to user mode. This has two
+      submodes:
+
+      (a) Exceptions enabled (PSR.T == 1).
+
+      	  Exceptions will invoke the appropriate normal kernel mode handler. On entry to the
+      	  handler, the PSR.T bit will be cleared.
+
+      (b) Exceptions disabled (PSR.T == 0).
+
+      	  No exceptions or interrupts may happen. Any mandatory exceptions will cause the CPU to
+      	  halt unless the CPU is told to jump into debug mode instead.
+
+  (3) Debug mode.
+
+      No exceptions may happen in this mode. Memory protection and management exceptions will be
+      flagged for later consideration, but the exception handler won't be invoked. Debugging traps
+      such as hardware breakpoints and watchpoints will be ignored. This mode is entered only by
+      debugging events obtained from the other two modes.
+
+      All kernel mode registers may be accessed, plus a few extra debugging specific registers.
+
+
+=================================
+INTERNAL KERNEL-MODE REGISTER ABI
+=================================
+
+There are a number of permanent register assignments that are set up by entry.S in the exception
+prologue. Note that there is a complete set of exception prologues for each of user->kernel
+transition and kernel->kernel transition. There are also user->debug and kernel->debug mode
+transition prologues.
+
+
+	REGISTER	FLAVOUR	USE
+	===============	=======	====================================================
+	GR1			Supervisor stack pointer
+	GR15			Current thread info pointer
+	GR16			GP-Rel base register for small data
+	GR28			Current exception frame pointer (__frame)
+	GR29			Current task pointer (current)
+	GR30			Destroyed by kernel mode entry
+	GR31		NOMMU	Destroyed by debug mode entry
+	GR31		MMU	Destroyed by TLB miss kernel mode entry
+	CCR.ICC2		Virtual interrupt disablement tracking
+	CCCR.CC3		Cleared by exception prologue (atomic op emulation)
+	SCR0		MMU	See mmu-layout.txt.
+	SCR1		MMU	See mmu-layout.txt.
+	SCR2		MMU	Save for EAR0 (destroyed by icache insns in debug mode)
+	SCR3		MMU	Save for GR31 during debug exceptions
+	DAMR/IAMR	NOMMU	Fixed memory protection layout.
+	DAMR/IAMR	MMU	See mmu-layout.txt.
+
+
+Certain registers are also used or modified across function calls:
+
+	REGISTER	CALL				RETURN
+	===============	===============================	===============================
+	GR0		Fixed Zero			-
+	GR2		Function call frame pointer
+	GR3		Special				Preserved
+	GR3-GR7		-				Clobbered
+	GR8		Function call arg #1		Return value (or clobbered)
+	GR9		Function call arg #2		Return value MSW (or clobbered)
+	GR10-GR13	Function call arg #3-#6		Clobbered
+	GR14		-				Clobbered
+	GR15-GR16	Special				Preserved
+	GR17-GR27	-				Preserved
+	GR28-GR31	Special				Only accessed explicitly
+	LR		Return address after CALL	Clobbered
+	CCR/CCCR	-				Mostly Clobbered
+
+
+================================
+INTERNAL DEBUG-MODE REGISTER ABI
+================================
+
+This is the same as the kernel-mode register ABI for functions calls. The difference is that in
+debug-mode there's a different stack and a different exception frame. Almost all the global
+registers from kernel-mode (including the stack pointer) may be changed.
+
+	REGISTER	FLAVOUR	USE
+	===============	=======	====================================================
+	GR1			Debug stack pointer
+	GR16			GP-Rel base register for small data
+	GR31			Current debug exception frame pointer (__debug_frame)
+	SCR3		MMU	Saved value of GR31
+
+
+Note that debug mode is able to interfere with the kernel's emulated atomic ops, so it must be
+exceedingly careful not to do any that would interact with the main kernel in this regard. Hence
+the debug mode code (gdbstub) is almost completely self-contained. The only external code used is
+the sprintf family of functions.
+
+Futhermore, break.S is so complicated because single-step mode does not switch off on entry to an
+exception. That means unless manually disabled, single-stepping will blithely go on stepping into
+things like interrupts. See gdbstub.txt for more information.
+
+
+==========================
+VIRTUAL INTERRUPT HANDLING
+==========================
+
+Because accesses to the PSR is so slow, and to disable interrupts we have to access it twice (once
+to read and once to write), we don't actually disable interrupts at all if we don't have to. What
+we do instead is use the ICC2 condition code flags to note virtual disablement, such that if we
+then do take an interrupt, we note the flag, really disable interrupts, set another flag and resume
+execution at the point the interrupt happened. Setting condition flags as a side effect of an
+arithmetic or logical instruction is really fast. This use of the ICC2 only occurs within the
+kernel - it does not affect userspace.
+
+The flags we use are:
+
+ (*) CCR.ICC2.Z [Zero flag]
+
+     Set to virtually disable interrupts, clear when interrupts are virtually enabled. Can be
+     modified by logical instructions without affecting the Carry flag.
+
+ (*) CCR.ICC2.C [Carry flag]
+
+     Clear to indicate hardware interrupts are really disabled, set otherwise.
+
+
+What happens is this:
+
+ (1) Normal kernel-mode operation.
+
+	ICC2.Z is 0, ICC2.C is 1.
+
+ (2) An interrupt occurs. The exception prologue examines ICC2.Z and determines that nothing needs
+     doing. This is done simply with an unlikely BEQ instruction.
+
+ (3) The interrupts are disabled (local_irq_disable)
+
+	ICC2.Z is set to 1.
+
+ (4) If interrupts were then re-enabled (local_irq_enable):
+
+	ICC2.Z would be set to 0.
+
+     A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would be used to trap if
+     interrupts were now virtually enabled, but physically disabled - which they're not, so the
+     trap isn't taken. The kernel would then be back to state (1).
+
+ (5) An interrupt occurs. The exception prologue examines ICC2.Z and determines that the interrupt
+     shouldn't actually have happened. It jumps aside, and there disabled interrupts by setting
+     PSR.PIL to 14 and then it clears ICC2.C.
+
+ (6) If interrupts were then saved and disabled again (local_irq_save):
+
+	ICC2.Z would be shifted into the save variable and masked off (giving a 1).
+
+	ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be unaffected (ie: 0).
+
+ (7) If interrupts were then restored from state (6) (local_irq_restore):
+
+	ICC2.Z would be set to indicate the result of XOR'ing the saved value (ie: 1) with 1, which
+	gives a result of 0 - thus leaving ICC2.Z set.
+
+	ICC2.C would remain unaffected (ie: 0).
+
+     A TIHI #2 instruction would be used to again assay the current state, but this would do
+     nothing as Z==1.
+
+ (8) If interrupts were then enabled (local_irq_enable):
+
+	ICC2.Z would be cleared. ICC2.C would be left unaffected. Both flags would now be 0.
+
+     A TIHI #2 instruction again issued to assay the current state would then trap as both Z==0
+     [interrupts virtually enabled] and C==0 [interrupts really disabled] would then be true.
+
+ (9) The trap #2 handler would simply enable hardware interrupts (set PSR.PIL to 0), set ICC2.C to
+     1 and return.
+
+(10) Immediately upon returning, the pending interrupt would be taken.
+
+(11) The interrupt handler would take the path of actually processing the interrupt (ICC2.Z is
+     clear, BEQ fails as per step (2)).
+
+(12) The interrupt handler would then set ICC2.C to 1 since hardware interrupts are definitely
+     enabled - or else the kernel wouldn't be here.
+
+(13) On return from the interrupt handler, things would be back to state (1).
+
+This trap (#2) is only available in kernel mode. In user mode it will result in SIGILL.

+ 4 - 0
Documentation/hwmon/w83627hf

@@ -36,6 +36,10 @@ Module Parameters
   (default is 1)
   (default is 1)
   Use 'init=0' to bypass initializing the chip.
   Use 'init=0' to bypass initializing the chip.
   Try this if your computer crashes when you load the module.
   Try this if your computer crashes when you load the module.
+* reset: int
+  (default is 0)
+  The driver used to reset the chip on load, but does no more. Use
+  'reset=1' to restore the old behavior. Report if you need to do this.
 
 
 Description
 Description
 -----------
 -----------

+ 26 - 0
Documentation/kernel-parameters.txt

@@ -335,6 +335,12 @@ running once the system is up.
 			timesource is not avalible, it defaults to PIT.
 			timesource is not avalible, it defaults to PIT.
 			Format: { pit | tsc | cyclone | pmtmr }
 			Format: { pit | tsc | cyclone | pmtmr }
 
 
+	disable_8254_timer
+	enable_8254_timer
+			[IA32/X86_64] Disable/Enable interrupt 0 timer routing
+			over the 8254 in addition to over the IO-APIC. The
+			kernel tries to set a sensible default.
+
 	hpet=		[IA-32,HPET] option to disable HPET and use PIT.
 	hpet=		[IA-32,HPET] option to disable HPET and use PIT.
 			Format: disable
 			Format: disable
 
 
@@ -1034,6 +1040,8 @@ running once the system is up.
 
 
 	nomce		[IA-32] Machine Check Exception
 	nomce		[IA-32] Machine Check Exception
 
 
+	nomca		[IA-64] Disable machine check abort handling
+
 	noresidual	[PPC] Don't use residual data on PReP machines.
 	noresidual	[PPC] Don't use residual data on PReP machines.
 
 
 	noresume	[SWSUSP] Disables resume and restores original swap
 	noresume	[SWSUSP] Disables resume and restores original swap
@@ -1133,6 +1141,8 @@ running once the system is up.
 				Mechanism 1.
 				Mechanism 1.
 		conf2		[IA-32] Force use of PCI Configuration
 		conf2		[IA-32] Force use of PCI Configuration
 				Mechanism 2.
 				Mechanism 2.
+		nommconf	[IA-32,X86_64] Disable use of MMCONFIG for PCI
+				Configuration
 		nosort		[IA-32] Don't sort PCI devices according to
 		nosort		[IA-32] Don't sort PCI devices according to
 				order given by the PCI BIOS. This sorting is
 				order given by the PCI BIOS. This sorting is
 				done to get a device order compatible with
 				done to get a device order compatible with
@@ -1280,6 +1290,19 @@ running once the system is up.
 			New name for the ramdisk parameter.
 			New name for the ramdisk parameter.
 			See Documentation/ramdisk.txt.
 			See Documentation/ramdisk.txt.
 
 
+	rcu.blimit=	[KNL,BOOT] Set maximum number of finished
+			RCU callbacks to process in one batch.
+
+	rcu.qhimark=	[KNL,BOOT] Set threshold of queued
+			RCU callbacks over which batch limiting is disabled.
+
+	rcu.qlowmark=	[KNL,BOOT] Set threshold of queued
+			RCU callbacks below which batch limiting is re-enabled.
+
+	rcu.rsinterval=	[KNL,BOOT,SMP] Set the number of additional
+			RCU callbacks to queued before forcing reschedule
+			on all cpus.
+
 	rdinit=		[KNL]
 	rdinit=		[KNL]
 			Format: <full_path>
 			Format: <full_path>
 			Run specified binary instead of /init from the ramdisk,
 			Run specified binary instead of /init from the ramdisk,
@@ -1636,6 +1659,9 @@ running once the system is up.
 			Format:
 			Format:
 			<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
 			<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
 
 
+	norandmaps	Don't use address space randomization
+			Equivalent to echo 0 > /proc/sys/kernel/randomize_va_space
+
 
 
 ______________________________________________________________________
 ______________________________________________________________________
 Changelog:
 Changelog:

+ 42 - 39
Documentation/kprobes.txt

@@ -136,17 +136,20 @@ Kprobes, jprobes, and return probes are implemented on the following
 architectures:
 architectures:
 
 
 - i386
 - i386
-- x86_64 (AMD-64, E64MT)
+- x86_64 (AMD-64, EM64T)
 - ppc64
 - ppc64
-- ia64 (Support for probes on certain instruction types is still in progress.)
+- ia64 (Does not support probes on instruction slot1.)
 - sparc64 (Return probes not yet implemented.)
 - sparc64 (Return probes not yet implemented.)
 
 
 3. Configuring Kprobes
 3. Configuring Kprobes
 
 
 When configuring the kernel using make menuconfig/xconfig/oldconfig,
 When configuring the kernel using make menuconfig/xconfig/oldconfig,
-ensure that CONFIG_KPROBES is set to "y".  Under "Kernel hacking",
-look for "Kprobes".  You may have to enable "Kernel debugging"
-(CONFIG_DEBUG_KERNEL) before you can enable Kprobes.
+ensure that CONFIG_KPROBES is set to "y".  Under "Instrumentation
+Support", look for "Kprobes".
+
+So that you can load and unload Kprobes-based instrumentation modules,
+make sure "Loadable module support" (CONFIG_MODULES) and "Module
+unloading" (CONFIG_MODULE_UNLOAD) are set to "y".
 
 
 You may also want to ensure that CONFIG_KALLSYMS and perhaps even
 You may also want to ensure that CONFIG_KALLSYMS and perhaps even
 CONFIG_KALLSYMS_ALL are set to "y", since kallsyms_lookup_name()
 CONFIG_KALLSYMS_ALL are set to "y", since kallsyms_lookup_name()
@@ -262,18 +265,18 @@ at any time after the probe has been registered.
 
 
 5. Kprobes Features and Limitations
 5. Kprobes Features and Limitations
 
 
-As of Linux v2.6.12, Kprobes allows multiple probes at the same
-address.  Currently, however, there cannot be multiple jprobes on
-the same function at the same time.
+Kprobes allows multiple probes at the same address.  Currently,
+however, there cannot be multiple jprobes on the same function at
+the same time.
 
 
 In general, you can install a probe anywhere in the kernel.
 In general, you can install a probe anywhere in the kernel.
 In particular, you can probe interrupt handlers.  Known exceptions
 In particular, you can probe interrupt handlers.  Known exceptions
 are discussed in this section.
 are discussed in this section.
 
 
-For obvious reasons, it's a bad idea to install a probe in
-the code that implements Kprobes (mostly kernel/kprobes.c and
-arch/*/kernel/kprobes.c).  A patch in the v2.6.13 timeframe instructs
-Kprobes to reject such requests.
+The register_*probe functions will return -EINVAL if you attempt
+to install a probe in the code that implements Kprobes (mostly
+kernel/kprobes.c and arch/*/kernel/kprobes.c, but also functions such
+as do_page_fault and notifier_call_chain).
 
 
 If you install a probe in an inline-able function, Kprobes makes
 If you install a probe in an inline-able function, Kprobes makes
 no attempt to chase down all inline instances of the function and
 no attempt to chase down all inline instances of the function and
@@ -290,18 +293,14 @@ from the accidental ones.  Don't drink and probe.
 
 
 Kprobes makes no attempt to prevent probe handlers from stepping on
 Kprobes makes no attempt to prevent probe handlers from stepping on
 each other -- e.g., probing printk() and then calling printk() from a
 each other -- e.g., probing printk() and then calling printk() from a
-probe handler.  As of Linux v2.6.12, if a probe handler hits a probe,
-that second probe's handlers won't be run in that instance.
-
-In Linux v2.6.12 and previous versions, Kprobes' data structures are
-protected by a single lock that is held during probe registration and
-unregistration and while handlers are run.  Thus, no two handlers
-can run simultaneously.  To improve scalability on SMP systems,
-this restriction will probably be removed soon, in which case
-multiple handlers (or multiple instances of the same handler) may
-run concurrently on different CPUs.  Code your handlers accordingly.
-
-Kprobes does not use semaphores or allocate memory except during
+probe handler.  If a probe handler hits a probe, that second probe's
+handlers won't be run in that instance, and the kprobe.nmissed member
+of the second probe will be incremented.
+
+As of Linux v2.6.15-rc1, multiple handlers (or multiple instances of
+the same handler) may run concurrently on different CPUs.
+
+Kprobes does not use mutexes or allocate memory except during
 registration and unregistration.
 registration and unregistration.
 
 
 Probe handlers are run with preemption disabled.  Depending on the
 Probe handlers are run with preemption disabled.  Depending on the
@@ -316,11 +315,18 @@ address instead of the real return address for kretprobed functions.
 (As far as we can tell, __builtin_return_address() is used only
 (As far as we can tell, __builtin_return_address() is used only
 for instrumentation and error reporting.)
 for instrumentation and error reporting.)
 
 
-If the number of times a function is called does not match the
-number of times it returns, registering a return probe on that
-function may produce undesirable results.  We have the do_exit()
-and do_execve() cases covered.  do_fork() is not an issue.  We're
-unaware of other specific cases where this could be a problem.
+If the number of times a function is called does not match the number
+of times it returns, registering a return probe on that function may
+produce undesirable results.  We have the do_exit() case covered.
+do_execve() and do_fork() are not an issue.  We're unaware of other
+specific cases where this could be a problem.
+
+If, upon entry to or exit from a function, the CPU is running on
+a stack other than that of the current task, registering a return
+probe on that function may produce undesirable results.  For this
+reason, Kprobes doesn't support return probes (or kprobes or jprobes)
+on the x86_64 version of __switch_to(); the registration functions
+return -EINVAL.
 
 
 6. Probe Overhead
 6. Probe Overhead
 
 
@@ -347,14 +353,12 @@ k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99
 
 
 7. TODO
 7. TODO
 
 
-a. SystemTap (http://sourceware.org/systemtap): Work in progress
-to provide a simplified programming interface for probe-based
-instrumentation.
-b. Improved SMP scalability: Currently, work is in progress to handle
-multiple kprobes in parallel.
-c. Kernel return probes for sparc64.
-d. Support for other architectures.
-e. User-space probes.
+a. SystemTap (http://sourceware.org/systemtap): Provides a simplified
+programming interface for probe-based instrumentation.  Try it out.
+b. Kernel return probes for sparc64.
+c. Support for other architectures.
+d. User-space probes.
+e. Watchpoint probes (which fire on data references).
 
 
 8. Kprobes Example
 8. Kprobes Example
 
 
@@ -411,8 +415,7 @@ int init_module(void)
 		printk("Couldn't find %s to plant kprobe\n", "do_fork");
 		printk("Couldn't find %s to plant kprobe\n", "do_fork");
 		return -1;
 		return -1;
 	}
 	}
-	ret = register_kprobe(&kp);
-	if (ret < 0) {
+	if ((ret = register_kprobe(&kp) < 0)) {
 		printk("register_kprobe failed, returned %d\n", ret);
 		printk("register_kprobe failed, returned %d\n", ret);
 		return -1;
 		return -1;
 	}
 	}

+ 5 - 1
Documentation/mips/AU1xxx_IDE.README

@@ -95,11 +95,13 @@ CONFIG_BLK_DEV_IDEDMA_PCI=y
 CONFIG_IDEDMA_PCI_AUTO=y
 CONFIG_IDEDMA_PCI_AUTO=y
 CONFIG_BLK_DEV_IDE_AU1XXX=y
 CONFIG_BLK_DEV_IDE_AU1XXX=y
 CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA=y
 CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA=y
-CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON=y
 CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128
 CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128
 CONFIG_BLK_DEV_IDEDMA=y
 CONFIG_BLK_DEV_IDEDMA=y
 CONFIG_IDEDMA_AUTO=y
 CONFIG_IDEDMA_AUTO=y
 
 
+Also define 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to enable
+the burst support on DBDMA controller.
+
 If the used system need the USB support enable the following kernel configs for
 If the used system need the USB support enable the following kernel configs for
 high IDE to USB throughput.
 high IDE to USB throughput.
 
 
@@ -115,6 +117,8 @@ CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128
 CONFIG_BLK_DEV_IDEDMA=y
 CONFIG_BLK_DEV_IDEDMA=y
 CONFIG_IDEDMA_AUTO=y
 CONFIG_IDEDMA_AUTO=y
 
 
+Also undefine 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to
+disable the burst support on DBDMA controller.
 
 
 ADD NEW HARD DISC TO WHITE OR BLACK LIST
 ADD NEW HARD DISC TO WHITE OR BLACK LIST
 ----------------------------------------
 ----------------------------------------

+ 23 - 0
Documentation/scsi/ChangeLog.megaraid_sas

@@ -1,3 +1,26 @@
+1 Release Date    : Wed Feb 03 14:31:44 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
+2 Current Version : 00.00.02.04
+3 Older Version   : 00.00.02.04 
+
+i.	Support for 1078 type (ppc IOP) controller, device id : 0x60 added.
+	During initialization, depending on the device id, the template members 
+	are initialized with function pointers specific to the ppc or 
+	xscale controllers.  
+
+		-Sumant Patro <Sumant.Patro@lsil.com>
+		
+1 Release Date    : Fri Feb 03 14:16:25 PST 2006 - Sumant Patro 
+							<Sumant.Patro@lsil.com>
+2 Current Version : 00.00.02.04
+3 Older Version   : 00.00.02.02 
+i.	Register 16 byte CDB capability with scsi midlayer 
+
+	"Ths patch properly registers the 16 byte command length capability of the 
+	megaraid_sas controlled hardware with the scsi midlayer. All megaraid_sas 
+	hardware supports 16 byte CDB's."
+
+		-Joshua Giles <joshua_giles@dell.com> 
+
 1 Release Date    : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
 1 Release Date    : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
 2 Current Version : 00.00.02.02
 2 Current Version : 00.00.02.02
 3 Older Version   : 00.00.02.01 
 3 Older Version   : 00.00.02.01 

+ 10 - 0
Documentation/sysctl/kernel.txt

@@ -16,6 +16,7 @@ before actually making adjustments.
 
 
 Currently, these files might (depending on your configuration)
 Currently, these files might (depending on your configuration)
 show up in /proc/sys/kernel:
 show up in /proc/sys/kernel:
+- acpi_video_flags
 - acct
 - acct
 - core_pattern
 - core_pattern
 - core_uses_pid
 - core_uses_pid
@@ -57,6 +58,15 @@ show up in /proc/sys/kernel:
 
 
 ==============================================================
 ==============================================================
 
 
+acpi_video_flags:
+
+flags
+
+See Doc*/kernel/power/video.txt, it allows mode of video boot to be
+set during run time.
+
+==============================================================
+
 acct:
 acct:
 
 
 highwater lowwater frequency
 highwater lowwater frequency

+ 2 - 2
Documentation/video4linux/CARDLIST.saa7134

@@ -13,7 +13,7 @@
  12 -> Medion 7134                              [16be:0003]
  12 -> Medion 7134                              [16be:0003]
  13 -> Typhoon TV+Radio 90031
  13 -> Typhoon TV+Radio 90031
  14 -> ELSA EX-VISION 300TV                     [1048:226b]
  14 -> ELSA EX-VISION 300TV                     [1048:226b]
- 15 -> ELSA EX-VISION 500TV                     [1048:226b]
+ 15 -> ELSA EX-VISION 500TV                     [1048:226a]
  16 -> ASUS TV-FM 7134                          [1043:4842,1043:4830,1043:4840]
  16 -> ASUS TV-FM 7134                          [1043:4842,1043:4830,1043:4840]
  17 -> AOPEN VA1000 POWER                       [1131:7133]
  17 -> AOPEN VA1000 POWER                       [1131:7133]
  18 -> BMK MPEX No Tuner
  18 -> BMK MPEX No Tuner
@@ -75,7 +75,7 @@
  74 -> LifeView FlyTV Platinum Mini2            [14c0:1212]
  74 -> LifeView FlyTV Platinum Mini2            [14c0:1212]
  75 -> AVerMedia AVerTVHD MCE A180              [1461:1044]
  75 -> AVerMedia AVerTVHD MCE A180              [1461:1044]
  76 -> SKNet MonsterTV Mobile                   [1131:4ee9]
  76 -> SKNet MonsterTV Mobile                   [1131:4ee9]
- 77 -> Pinnacle PCTV 110i (saa7133)             [11bd:002e]
+ 77 -> Pinnacle PCTV 40i/50i/110i (saa7133)     [11bd:002e]
  78 -> ASUSTeK P7131 Dual                       [1043:4862]
  78 -> ASUSTeK P7131 Dual                       [1043:4862]
  79 -> Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)
  79 -> Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)
  80 -> ASUS Digimatrix TV                       [1043:0210]
  80 -> ASUS Digimatrix TV                       [1043:0210]

+ 82 - 36
Documentation/vm/page_migration

@@ -12,12 +12,18 @@ is running.
 
 
 Page migration allows a process to manually relocate the node on which its
 Page migration allows a process to manually relocate the node on which its
 pages are located through the MF_MOVE and MF_MOVE_ALL options while setting
 pages are located through the MF_MOVE and MF_MOVE_ALL options while setting
-a new memory policy. The pages of process can also be relocated
+a new memory policy via mbind(). The pages of process can also be relocated
 from another process using the sys_migrate_pages() function call. The
 from another process using the sys_migrate_pages() function call. The
 migrate_pages function call takes two sets of nodes and moves pages of a
 migrate_pages function call takes two sets of nodes and moves pages of a
 process that are located on the from nodes to the destination nodes.
 process that are located on the from nodes to the destination nodes.
-
-Manual migration is very useful if for example the scheduler has relocated
+Page migration functions are provided by the numactl package by Andi Kleen
+(a version later than 0.9.3 is required. Get it from
+ftp://ftp.suse.com/pub/people/ak). numactl provided libnuma which
+provides an interface similar to other numa functionality for page migration.
+cat /proc/<pid>/numa_maps allows an easy review of where the pages of
+a process are located. See also the numa_maps manpage in the numactl package.
+
+Manual migration is useful if for example the scheduler has relocated
 a process to a processor on a distant node. A batch scheduler or an
 a process to a processor on a distant node. A batch scheduler or an
 administrator may detect the situation and move the pages of the process
 administrator may detect the situation and move the pages of the process
 nearer to the new processor. At some point in the future we may have
 nearer to the new processor. At some point in the future we may have
@@ -25,10 +31,12 @@ some mechanism in the scheduler that will automatically move the pages.
 
 
 Larger installations usually partition the system using cpusets into
 Larger installations usually partition the system using cpusets into
 sections of nodes. Paul Jackson has equipped cpusets with the ability to
 sections of nodes. Paul Jackson has equipped cpusets with the ability to
-move pages when a task is moved to another cpuset. This allows automatic
-control over locality of a process. If a task is moved to a new cpuset
-then also all its pages are moved with it so that the performance of the
-process does not sink dramatically (as is the case today).
+move pages when a task is moved to another cpuset (See ../cpusets.txt).
+Cpusets allows the automation of process locality. If a task is moved to
+a new cpuset then also all its pages are moved with it so that the
+performance of the process does not sink dramatically. Also the pages
+of processes in a cpuset are moved if the allowed memory nodes of a
+cpuset are changed.
 
 
 Page migration allows the preservation of the relative location of pages
 Page migration allows the preservation of the relative location of pages
 within a group of nodes for all migration techniques which will preserve a
 within a group of nodes for all migration techniques which will preserve a
@@ -37,22 +45,26 @@ process. This is necessary in order to preserve the memory latencies.
 Processes will run with similar performance after migration.
 Processes will run with similar performance after migration.
 
 
 Page migration occurs in several steps. First a high level
 Page migration occurs in several steps. First a high level
-description for those trying to use migrate_pages() and then
-a low level description of how the low level details work.
+description for those trying to use migrate_pages() from the kernel
+(for userspace usage see the Andi Kleen's numactl package mentioned above)
+and then a low level description of how the low level details work.
 
 
-A. Use of migrate_pages()
--------------------------
+A. In kernel use of migrate_pages()
+-----------------------------------
 
 
 1. Remove pages from the LRU.
 1. Remove pages from the LRU.
 
 
    Lists of pages to be migrated are generated by scanning over
    Lists of pages to be migrated are generated by scanning over
    pages and moving them into lists. This is done by
    pages and moving them into lists. This is done by
-   calling isolate_lru_page() or __isolate_lru_page().
+   calling isolate_lru_page().
    Calling isolate_lru_page increases the references to the page
    Calling isolate_lru_page increases the references to the page
-   so that it cannot vanish under us.
+   so that it cannot vanish while the page migration occurs.
+   It also prevents the swapper or other scans to encounter
+   the page.
 
 
-2. Generate a list of newly allocates page to move the contents
-   of the first list to.
+2. Generate a list of newly allocates page. These pages will contain the
+   contents of the pages from the first list after page migration is
+   complete.
 
 
 3. The migrate_pages() function is called which attempts
 3. The migrate_pages() function is called which attempts
    to do the migration. It returns the moved pages in the
    to do the migration. It returns the moved pages in the
@@ -63,13 +75,17 @@ A. Use of migrate_pages()
 4. The leftover pages of various types are returned
 4. The leftover pages of various types are returned
    to the LRU using putback_to_lru_pages() or otherwise
    to the LRU using putback_to_lru_pages() or otherwise
    disposed of. The pages will still have the refcount as
    disposed of. The pages will still have the refcount as
-   increased by isolate_lru_pages()!
+   increased by isolate_lru_pages() if putback_to_lru_pages() is not
+   used! The kernel may want to handle the various cases of failures in
+   different ways.
 
 
-B. Operation of migrate_pages()
---------------------------------
+B. How migrate_pages() works
+----------------------------
 
 
-migrate_pages does several passes over its list of pages. A page is moved
-if all references to a page are removable at the time.
+migrate_pages() does several passes over its list of pages. A page is moved
+if all references to a page are removable at the time. The page has
+already been removed from the LRU via isolate_lru_page() and the refcount
+is increased so that the page cannot be freed while page migration occurs.
 
 
 Steps:
 Steps:
 
 
@@ -79,36 +95,40 @@ Steps:
 
 
 3. Make sure that the page has assigned swap cache entry if
 3. Make sure that the page has assigned swap cache entry if
    it is an anonyous page. The swap cache reference is necessary
    it is an anonyous page. The swap cache reference is necessary
-   to preserve the information contain in the page table maps.
+   to preserve the information contain in the page table maps while
+   page migration occurs.
 
 
 4. Prep the new page that we want to move to. It is locked
 4. Prep the new page that we want to move to. It is locked
    and set to not being uptodate so that all accesses to the new
    and set to not being uptodate so that all accesses to the new
-   page immediately lock while we are moving references.
+   page immediately lock while the move is in progress.
 
 
-5. All the page table references to the page are either dropped (file backed)
-   or converted to swap references (anonymous pages). This should decrease the
-   reference count.
+5. All the page table references to the page are either dropped (file
+   backed pages) or converted to swap references (anonymous pages).
+   This should decrease the reference count.
 
 
-6. The radix tree lock is taken
+6. The radix tree lock is taken. This will cause all processes trying
+   to reestablish a pte to block on the radix tree spinlock.
 
 
 7. The refcount of the page is examined and we back out if references remain
 7. The refcount of the page is examined and we back out if references remain
    otherwise we know that we are the only one referencing this page.
    otherwise we know that we are the only one referencing this page.
 
 
 8. The radix tree is checked and if it does not contain the pointer to this
 8. The radix tree is checked and if it does not contain the pointer to this
-   page then we back out.
+   page then we back out because someone else modified the mapping first.
 
 
 9. The mapping is checked. If the mapping is gone then a truncate action may
 9. The mapping is checked. If the mapping is gone then a truncate action may
    be in progress and we back out.
    be in progress and we back out.
 
 
-10. The new page is prepped with some settings from the old page so that accesses
-   to the new page will be discovered to have the correct settings.
+10. The new page is prepped with some settings from the old page so that
+   accesses to the new page will be discovered to have the correct settings.
 
 
 11. The radix tree is changed to point to the new page.
 11. The radix tree is changed to point to the new page.
 
 
-12. The reference count of the old page is dropped because the reference has now
-    been removed.
+12. The reference count of the old page is dropped because the radix tree
+    reference is gone.
 
 
-13. The radix tree lock is dropped.
+13. The radix tree lock is dropped. With that lookups become possible again
+    and other processes will move from spinning on the tree lock to sleeping on
+    the locked new page.
 
 
 14. The page contents are copied to the new page.
 14. The page contents are copied to the new page.
 
 
@@ -119,11 +139,37 @@ Steps:
 
 
 17. Queued up writeback on the new page is triggered.
 17. Queued up writeback on the new page is triggered.
 
 
-18. If swap pte's were generated for the page then remove them again.
+18. If swap pte's were generated for the page then replace them with real
+    ptes. This will reenable access for processes not blocked by the page lock.
+
+19. The page locks are dropped from the old and new page.
+    Processes waiting on the page lock can continue.
+
+20. The new page is moved to the LRU and can be scanned by the swapper
+    etc again.
+
+TODO list
+---------
+
+- Page migration requires the use of swap handles to preserve the
+  information of the anonymous page table entries. This means that swap
+  space is reserved but never used. The maximum number of swap handles used
+  is determined by CHUNK_SIZE (see mm/mempolicy.c) per ongoing migration.
+  Reservation of pages could be avoided by having a special type of swap
+  handle that does not require swap space and that would only track the page
+  references. Something like that was proposed by Marcelo Tosatti in the
+  past (search for migration cache on lkml or linux-mm@kvack.org).
 
 
-19. The locks are dropped from the old and new page.
+- Page migration unmaps ptes for file backed pages and requires page
+  faults to reestablish these ptes. This could be optimized by somehow
+  recording the references before migration and then reestablish them later.
+  However, there are several locking challenges that have to be overcome
+  before this is possible.
 
 
-20. The new page is moved to the LRU.
+- Page migration generates read ptes for anonymous pages. Dirty page
+  faults are required to make the pages writable again. It may be possible
+  to generate a pte marked dirty if it is known that the page is dirty and
+  that this process has the only reference to that page.
 
 
-Christoph Lameter, December 19, 2005.
+Christoph Lameter, March 8, 2006.
 
 

+ 4 - 0
Documentation/x86_64/boot-options.txt

@@ -52,6 +52,10 @@ APICs
 		 apicmaintimer. Useful when your PIT timer is totally
 		 apicmaintimer. Useful when your PIT timer is totally
 		 broken.
 		 broken.
 
 
+   disable_8254_timer / enable_8254_timer
+		 Enable interrupt 0 timer routing over the 8254 in addition to over
+	         the IO-APIC. The kernel tries to set a sensible default.
+
 Early Console
 Early Console
 
 
    syntax: earlyprintk=vga
    syntax: earlyprintk=vga

+ 19 - 4
MAINTAINERS

@@ -838,7 +838,6 @@ S:	Maintained
 
 
 DVB SUBSYSTEM AND DRIVERS
 DVB SUBSYSTEM AND DRIVERS
 P:	LinuxTV.org Project
 P:	LinuxTV.org Project
-M:	mchehab@infradead.org
 M:	v4l-dvb-maintainer@linuxtv.org
 M:	v4l-dvb-maintainer@linuxtv.org
 L: 	linux-dvb@linuxtv.org (subscription required)
 L: 	linux-dvb@linuxtv.org (subscription required)
 W:	http://linuxtv.org/
 W:	http://linuxtv.org/
@@ -1632,8 +1631,8 @@ S:	Supported
 
 
 LINUX SECURITY MODULE (LSM) FRAMEWORK
 LINUX SECURITY MODULE (LSM) FRAMEWORK
 P:	Chris Wright
 P:	Chris Wright
-M:	chrisw@osdl.org
-L:	linux-security-module@wirex.com
+M:	chrisw@sous-sol.org
+L:	linux-security-module@vger.kernel.org
 W:	http://lsm.immunix.org
 W:	http://lsm.immunix.org
 T:	git kernel.org:/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git
 T:	git kernel.org:/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git
 S:	Supported
 S:	Supported
@@ -2232,7 +2231,23 @@ P:	Martin Schwidefsky
 M:	schwidefsky@de.ibm.com
 M:	schwidefsky@de.ibm.com
 M:	linux390@de.ibm.com
 M:	linux390@de.ibm.com
 L:	linux-390@vm.marist.edu
 L:	linux-390@vm.marist.edu
-W:	http://oss.software.ibm.com/developerworks/opensource/linux390
+W:	http://www.ibm.com/developerworks/linux/linux390/
+S:	Supported
+
+S390 NETWORK DRIVERS
+P:	Frank Pavlic
+M:	fpavlic@de.ibm.com
+M:	linux390@de.ibm.com
+L:	linux-390@vm.marist.edu
+W:	http://www.ibm.com/developerworks/linux/linux390/
+S:	Supported
+
+S390 ZFCP DRIVER
+P:	Andreas Herrmann
+M:	aherrman@de.ibm.com
+M:	linux390@de.ibm.com
+L:	linux-390@vm.marist.edu
+W:	http://www.ibm.com/developerworks/linux/linux390/
 S:	Supported
 S:	Supported
 
 
 SAA7146 VIDEO4LINUX-2 DRIVER
 SAA7146 VIDEO4LINUX-2 DRIVER

+ 5 - 6
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 16
 SUBLEVEL = 16
-EXTRAVERSION =-rc2
+EXTRAVERSION =-rc6
 NAME=Sliding Snow Leopard
 NAME=Sliding Snow Leopard
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -106,13 +106,12 @@ KBUILD_OUTPUT := $(shell cd $(KBUILD_OUTPUT) && /bin/pwd)
 $(if $(KBUILD_OUTPUT),, \
 $(if $(KBUILD_OUTPUT),, \
      $(error output directory "$(saved-output)" does not exist))
      $(error output directory "$(saved-output)" does not exist))
 
 
-.PHONY: $(MAKECMDGOALS) cdbuilddir
-$(MAKECMDGOALS) _all: cdbuilddir
+.PHONY: $(MAKECMDGOALS)
 
 
-cdbuilddir:
+$(filter-out _all,$(MAKECMDGOALS)) _all:
 	$(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
 	$(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
 	KBUILD_SRC=$(CURDIR) \
 	KBUILD_SRC=$(CURDIR) \
-	KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $(MAKECMDGOALS)
+	KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $@
 
 
 # Leave processing to above invocation of make
 # Leave processing to above invocation of make
 skip-makefile := 1
 skip-makefile := 1
@@ -906,7 +905,7 @@ define filechk_version.h
 	)
 	)
 endef
 endef
 
 
-include/linux/version.h: $(srctree)/Makefile .config FORCE
+include/linux/version.h: $(srctree)/Makefile .config .kernelrelease FORCE
 	$(call filechk,version.h)
 	$(call filechk,version.h)
 
 
 # ---------------------------------------------------------------------------
 # ---------------------------------------------------------------------------

+ 6 - 1
arch/alpha/kernel/irq.c

@@ -151,8 +151,13 @@ handle_irq(int irq, struct pt_regs * regs)
 	}
 	}
 
 
 	irq_enter();
 	irq_enter();
+	/*
+	 * __do_IRQ() must be called with IPL_MAX. Note that we do not
+	 * explicitly enable interrupts afterwards - some MILO PALcode
+	 * (namely LX164 one) seems to have severe problems with RTI
+	 * at IPL 0.
+	 */
 	local_irq_disable();
 	local_irq_disable();
 	__do_IRQ(irq, regs);
 	__do_IRQ(irq, regs);
-	local_irq_enable();
 	irq_exit();
 	irq_exit();
 }
 }

+ 3 - 1
arch/arm/Kconfig

@@ -78,7 +78,7 @@ menu "System Type"
 
 
 choice
 choice
 	prompt "ARM system type"
 	prompt "ARM system type"
-	default ARCH_RPC
+	default ARCH_VERSATILE
 
 
 config ARCH_CLPS7500
 config ARCH_CLPS7500
 	bool "Cirrus-CL-PS7500FE"
 	bool "Cirrus-CL-PS7500FE"
@@ -799,6 +799,8 @@ source "drivers/i2c/Kconfig"
 
 
 source "drivers/spi/Kconfig"
 source "drivers/spi/Kconfig"
 
 
+source "drivers/w1/Kconfig"
+
 source "drivers/hwmon/Kconfig"
 source "drivers/hwmon/Kconfig"
 
 
 #source "drivers/l3/Kconfig"
 #source "drivers/l3/Kconfig"

+ 21 - 0
arch/arm/common/locomo.c

@@ -629,6 +629,22 @@ static int locomo_resume(struct platform_device *dev)
 }
 }
 #endif
 #endif
 
 
+
+#define LCM_ALC_EN	0x8000
+
+void frontlight_set(struct locomo *lchip, int duty, int vr, int bpwf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&lchip->lock, flags);
+	locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
+	udelay(100);
+	locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
+	locomo_writel(bpwf | LCM_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
+	spin_unlock_irqrestore(&lchip->lock, flags);
+}
+
+
 /**
 /**
  *	locomo_probe - probe for a single LoCoMo chip.
  *	locomo_probe - probe for a single LoCoMo chip.
  *	@phys_addr: physical address of device.
  *	@phys_addr: physical address of device.
@@ -688,6 +704,11 @@ __locomo_probe(struct device *me, struct resource *mem, int irq)
 	/* FrontLight */
 	/* FrontLight */
 	locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
 	locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
 	locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
 	locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
+
+	/* Same constants can be used for collie and poodle
+	   (depending on CONFIG options in original sharp code)? */
+	frontlight_set(lchip, 163, 0, 148);
+
 	/* Longtime timer */
 	/* Longtime timer */
 	locomo_writel(0, lchip->base + LOCOMO_LTINT);
 	locomo_writel(0, lchip->base + LOCOMO_LTINT);
 	/* SPI */
 	/* SPI */

+ 12 - 4
arch/arm/common/rtctime.c

@@ -128,19 +128,27 @@ EXPORT_SYMBOL(rtc_tm_to_time);
 /*
 /*
  * Calculate the next alarm time given the requested alarm time mask
  * Calculate the next alarm time given the requested alarm time mask
  * and the current time.
  * and the current time.
- *
- * FIXME: for now, we just copy the alarm time because we're lazy (and
- * is therefore buggy - setting a 10am alarm at 8pm will not result in
- * the alarm triggering.)
  */
  */
 void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm)
 void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm)
 {
 {
+	unsigned long next_time;
+	unsigned long now_time;
+
 	next->tm_year = now->tm_year;
 	next->tm_year = now->tm_year;
 	next->tm_mon = now->tm_mon;
 	next->tm_mon = now->tm_mon;
 	next->tm_mday = now->tm_mday;
 	next->tm_mday = now->tm_mday;
 	next->tm_hour = alrm->tm_hour;
 	next->tm_hour = alrm->tm_hour;
 	next->tm_min = alrm->tm_min;
 	next->tm_min = alrm->tm_min;
 	next->tm_sec = alrm->tm_sec;
 	next->tm_sec = alrm->tm_sec;
+
+	rtc_tm_to_time(now, &now_time);
+	rtc_tm_to_time(next, &next_time);
+
+	if (next_time < now_time) {
+		/* Advance one day */
+		next_time += 60 * 60 * 24;
+		rtc_time_to_tm(next_time, next);
+	}
 }
 }
 
 
 static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm)
 static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm)

+ 3 - 1
arch/arm/kernel/asm-offsets.c

@@ -57,7 +57,9 @@ int main(void)
   DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
   DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
   DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
   DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
   DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
   DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
-  DEFINE(TI_IWMMXT_STATE,	(offsetof(struct thread_info, fpstate)+4)&~7);
+#ifdef CONFIG_IWMMXT
+  DEFINE(TI_IWMMXT_STATE,	offsetof(struct thread_info, fpstate.iwmmxt));
+#endif
   BLANK();
   BLANK();
   DEFINE(S_R0,			offsetof(struct pt_regs, ARM_r0));
   DEFINE(S_R0,			offsetof(struct pt_regs, ARM_r0));
   DEFINE(S_R1,			offsetof(struct pt_regs, ARM_r1));
   DEFINE(S_R1,			offsetof(struct pt_regs, ARM_r1));

+ 1 - 1
arch/arm/kernel/calls.S

@@ -111,7 +111,7 @@
 		CALL(sys_statfs)
 		CALL(sys_statfs)
 /* 100 */	CALL(sys_fstatfs)
 /* 100 */	CALL(sys_fstatfs)
 		CALL(sys_ni_syscall)
 		CALL(sys_ni_syscall)
-		CALL(OBSOLETE(sys_socketcall))
+		CALL(OBSOLETE(ABI(sys_socketcall, sys_oabi_socketcall)))
 		CALL(sys_syslog)
 		CALL(sys_syslog)
 		CALL(sys_setitimer)
 		CALL(sys_setitimer)
 /* 105 */	CALL(sys_getitimer)
 /* 105 */	CALL(sys_getitimer)

+ 2 - 0
arch/arm/kernel/compat.c

@@ -27,6 +27,8 @@
 
 
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
 
 
+#include "compat.h"
+
 /*
 /*
  * Usage:
  * Usage:
  *  - do not go blindly adding fields, add them at the end
  *  - do not go blindly adding fields, add them at the end

+ 13 - 0
arch/arm/kernel/compat.h

@@ -0,0 +1,13 @@
+/*
+ *  linux/arch/arm/kernel/compat.h
+ *
+ *  Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+extern void convert_to_tag_list(struct tag *tags);
+
+extern void squash_mem_tags(struct tag *tag);

+ 1 - 1
arch/arm/kernel/entry-armv.S

@@ -566,7 +566,7 @@ ENTRY(__switch_to)
 	ldr	r6, [r2, #TI_CPU_DOMAIN]!
 	ldr	r6, [r2, #TI_CPU_DOMAIN]!
 #endif
 #endif
 #if __LINUX_ARM_ARCH__ >= 6
 #if __LINUX_ARM_ARCH__ >= 6
-#ifdef CONFIG_CPU_MPCORE
+#ifdef CONFIG_CPU_32v6K
 	clrex
 	clrex
 #else
 #else
 	strex	r5, r4, [ip]			@ Clear exclusive monitor
 	strex	r5, r4, [ip]			@ Clear exclusive monitor

+ 2 - 1
arch/arm/kernel/process.c

@@ -27,6 +27,7 @@
 #include <linux/kallsyms.h>
 #include <linux/kallsyms.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/cpu.h>
 #include <linux/cpu.h>
+#include <linux/elfcore.h>
 
 
 #include <asm/leds.h>
 #include <asm/leds.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
@@ -83,7 +84,7 @@ EXPORT_SYMBOL(pm_power_off);
  * This is our default idle handler.  We need to disable
  * This is our default idle handler.  We need to disable
  * interrupts here to ensure we don't miss a wakeup call.
  * interrupts here to ensure we don't miss a wakeup call.
  */
  */
-void default_idle(void)
+static void default_idle(void)
 {
 {
 	if (hlt_counter)
 	if (hlt_counter)
 		cpu_relax();
 		cpu_relax();

+ 4 - 10
arch/arm/kernel/ptrace.c

@@ -610,15 +610,12 @@ static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
 {
 {
 	struct thread_info *thread = task_thread_info(tsk);
 	struct thread_info *thread = task_thread_info(tsk);
-	void *ptr = &thread->fpstate;
 
 
 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
 		return -ENODATA;
 		return -ENODATA;
 	iwmmxt_task_disable(thread);  /* force it to ram */
 	iwmmxt_task_disable(thread);  /* force it to ram */
-	/* The iWMMXt state is stored doubleword-aligned.  */
-	if (((long) ptr) & 4)
-		ptr += 4;
-	return copy_to_user(ufp, ptr, 0x98) ? -EFAULT : 0;
+	return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
+		? -EFAULT : 0;
 }
 }
 
 
 /*
 /*
@@ -627,15 +624,12 @@ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
 {
 {
 	struct thread_info *thread = task_thread_info(tsk);
 	struct thread_info *thread = task_thread_info(tsk);
-	void *ptr = &thread->fpstate;
 
 
 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
 		return -EACCES;
 		return -EACCES;
 	iwmmxt_task_release(thread);  /* force a reload */
 	iwmmxt_task_release(thread);  /* force a reload */
-	/* The iWMMXt state is stored doubleword-aligned.  */
-	if (((long) ptr) & 4)
-		ptr += 4;
-	return copy_from_user(ptr, ufp, 0x98) ? -EFAULT : 0;
+	return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
+		? -EFAULT : 0;
 }
 }
 
 
 #endif
 #endif

+ 7 - 3
arch/arm/kernel/setup.c

@@ -23,6 +23,7 @@
 #include <linux/root_dev.h>
 #include <linux/root_dev.h>
 #include <linux/cpu.h>
 #include <linux/cpu.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
+#include <linux/smp.h>
 
 
 #include <asm/cpu.h>
 #include <asm/cpu.h>
 #include <asm/elf.h>
 #include <asm/elf.h>
@@ -36,6 +37,8 @@
 #include <asm/mach/irq.h>
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 #include <asm/mach/time.h>
 
 
+#include "compat.h"
+
 #ifndef MEM_SIZE
 #ifndef MEM_SIZE
 #define MEM_SIZE	(16*1024*1024)
 #define MEM_SIZE	(16*1024*1024)
 #endif
 #endif
@@ -52,10 +55,7 @@ static int __init fpe_setup(char *line)
 __setup("fpe=", fpe_setup);
 __setup("fpe=", fpe_setup);
 #endif
 #endif
 
 
-extern unsigned int mem_fclk_21285;
 extern void paging_init(struct meminfo *, struct machine_desc *desc);
 extern void paging_init(struct meminfo *, struct machine_desc *desc);
-extern void convert_to_tag_list(struct tag *tags);
-extern void squash_mem_tags(struct tag *tag);
 extern void reboot_setup(char *str);
 extern void reboot_setup(char *str);
 extern int root_mountflags;
 extern int root_mountflags;
 extern void _stext, _text, _etext, __data_start, _edata, _end;
 extern void _stext, _text, _etext, __data_start, _edata, _end;
@@ -771,6 +771,10 @@ void __init setup_arch(char **cmdline_p)
 	paging_init(&meminfo, mdesc);
 	paging_init(&meminfo, mdesc);
 	request_standard_resources(&meminfo, mdesc);
 	request_standard_resources(&meminfo, mdesc);
 
 
+#ifdef CONFIG_SMP
+	smp_init_cpus();
+#endif
+
 	cpu_init();
 	cpu_init();
 
 
 	/*
 	/*

+ 0 - 1
arch/arm/kernel/smp.c

@@ -338,7 +338,6 @@ void __init smp_prepare_boot_cpu(void)
 
 
 	per_cpu(cpu_data, cpu).idle = current;
 	per_cpu(cpu_data, cpu).idle = current;
 
 
-	cpu_set(cpu, cpu_possible_map);
 	cpu_set(cpu, cpu_present_map);
 	cpu_set(cpu, cpu_present_map);
 	cpu_set(cpu, cpu_online_map);
 	cpu_set(cpu, cpu_online_map);
 }
 }

+ 30 - 0
arch/arm/kernel/sys_oabi-compat.c

@@ -64,6 +64,7 @@
  * sys_connect:
  * sys_connect:
  * sys_sendmsg:
  * sys_sendmsg:
  * sys_sendto:
  * sys_sendto:
+ * sys_socketcall:
  *
  *
  *   struct sockaddr_un loses its padding with EABI.  Since the size of the
  *   struct sockaddr_un loses its padding with EABI.  Since the size of the
  *   structure is used as a validation test in unix_mkname(), we need to
  *   structure is used as a validation test in unix_mkname(), we need to
@@ -78,6 +79,7 @@
 #include <linux/eventpoll.h>
 #include <linux/eventpoll.h>
 #include <linux/sem.h>
 #include <linux/sem.h>
 #include <linux/socket.h>
 #include <linux/socket.h>
+#include <linux/net.h>
 #include <asm/ipc.h>
 #include <asm/ipc.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 
 
@@ -408,3 +410,31 @@ asmlinkage long sys_oabi_sendmsg(int fd, struct msghdr __user *msg, unsigned fla
 	return sys_sendmsg(fd, msg, flags);
 	return sys_sendmsg(fd, msg, flags);
 }
 }
 
 
+asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args)
+{
+	unsigned long r = -EFAULT, a[6];
+
+	switch (call) {
+	case SYS_BIND:
+		if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
+			r = sys_oabi_bind(a[0], (struct sockaddr __user *)a[1], a[2]);
+		break;
+	case SYS_CONNECT:
+		if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
+			r = sys_oabi_connect(a[0], (struct sockaddr __user *)a[1], a[2]);
+		break;
+	case SYS_SENDTO:
+		if (copy_from_user(a, args, 6 * sizeof(long)) == 0)
+			r = sys_oabi_sendto(a[0], (void __user *)a[1], a[2], a[3],
+					    (struct sockaddr __user *)a[4], a[5]);
+		break;
+	case SYS_SENDMSG:
+		if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
+			r = sys_oabi_sendmsg(a[0], (struct msghdr __user *)a[1], a[2]);
+		break;
+	default:
+		r = sys_socketcall(call, args);
+	}
+
+	return r;
+}

+ 6 - 4
arch/arm/kernel/time.c

@@ -422,12 +422,14 @@ static int timer_dyn_tick_disable(void)
 void timer_dyn_reprogram(void)
 void timer_dyn_reprogram(void)
 {
 {
 	struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick;
 	struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick;
+	unsigned long next, seq;
 
 
-	if (dyn_tick) {
-		write_seqlock(&xtime_lock);
-		if (dyn_tick->state & DYN_TICK_ENABLED)
+	if (dyn_tick && (dyn_tick->state & DYN_TICK_ENABLED)) {
+		next = next_timer_interrupt();
+		do {
+			seq = read_seqbegin(&xtime_lock);
 			dyn_tick->reprogram(next_timer_interrupt() - jiffies);
 			dyn_tick->reprogram(next_timer_interrupt() - jiffies);
-		write_sequnlock(&xtime_lock);
+		} while (read_seqretry(&xtime_lock, seq));
 	}
 	}
 }
 }
 
 

+ 8 - 0
arch/arm/kernel/traps.c

@@ -19,6 +19,7 @@
 #include <linux/personality.h>
 #include <linux/personality.h>
 #include <linux/ptrace.h>
 #include <linux/ptrace.h>
 #include <linux/kallsyms.h>
 #include <linux/kallsyms.h>
+#include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/init.h>
 
 
 #include <asm/atomic.h>
 #include <asm/atomic.h>
@@ -231,6 +232,13 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
 	__die(str, err, thread, regs);
 	__die(str, err, thread, regs);
 	bust_spinlocks(0);
 	bust_spinlocks(0);
 	spin_unlock_irq(&die_lock);
 	spin_unlock_irq(&die_lock);
+
+	if (panic_on_oops) {
+		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+		ssleep(5);
+		panic("Fatal exception");
+	}
+
 	do_exit(SIGSEGV);
 	do_exit(SIGSEGV);
 }
 }
 
 

+ 2 - 2
arch/arm/lib/muldi3.S

@@ -29,8 +29,8 @@ ENTRY(__aeabi_lmul)
 
 
 	mul	xh, yl, xh
 	mul	xh, yl, xh
 	mla	xh, xl, yh, xh
 	mla	xh, xl, yh, xh
-	mov	ip, xl, asr #16
-	mov	yh, yl, asr #16
+	mov	ip, xl, lsr #16
+	mov	yh, yl, lsr #16
 	bic	xl, xl, ip, lsl #16
 	bic	xl, xl, ip, lsl #16
 	bic	yl, yl, yh, lsl #16
 	bic	yl, yl, yh, lsl #16
 	mla	xh, yh, ip, xh
 	mla	xh, yh, ip, xh

+ 3 - 1
arch/arm/mach-at91rm9200/devices.c

@@ -100,8 +100,10 @@ void __init at91_add_device_udc(struct at91_udc_data *data)
 		at91_set_gpio_input(data->vbus_pin, 0);
 		at91_set_gpio_input(data->vbus_pin, 0);
 		at91_set_deglitch(data->vbus_pin, 1);
 		at91_set_deglitch(data->vbus_pin, 1);
 	}
 	}
-	if (data->pullup_pin)
+	if (data->pullup_pin) {
 		at91_set_gpio_output(data->pullup_pin, 0);
 		at91_set_gpio_output(data->pullup_pin, 0);
+		at91_set_multi_drive(data->pullup_pin, 1);
+	}
 
 
 	udc_data = *data;
 	udc_data = *data;
 	platform_device_register(&at91rm9200_udc_device);
 	platform_device_register(&at91rm9200_udc_device);

+ 29 - 2
arch/arm/mach-at91rm9200/gpio.c

@@ -159,6 +159,23 @@ int __init_or_module at91_set_deglitch(unsigned pin, int is_on)
 }
 }
 EXPORT_SYMBOL(at91_set_deglitch);
 EXPORT_SYMBOL(at91_set_deglitch);
 
 
+/*
+ * enable/disable the multi-driver; This is only valid for output and
+ * allows the output pin to run as an open collector output.
+ */
+int __init_or_module at91_set_multi_drive(unsigned pin, int is_on)
+{
+	void __iomem	*pio = pin_to_controller(pin);
+	unsigned	mask = pin_to_mask(pin);
+
+	if (!pio)
+		return -EINVAL;
+
+	__raw_writel(mask, pio + (is_on ? PIO_MDER : PIO_MDDR));
+	return 0;
+}
+EXPORT_SYMBOL(at91_set_multi_drive);
+
 /*--------------------------------------------------------------------------*/
 /*--------------------------------------------------------------------------*/
 
 
 
 
@@ -257,8 +274,18 @@ static void gpio_irq_handler(unsigned irq, struct irqdesc *desc, struct pt_regs
 		gpio = &irq_desc[pin];
 		gpio = &irq_desc[pin];
 
 
 		while (isr) {
 		while (isr) {
-			if (isr & 1)
-				gpio->handle(pin, gpio, regs);
+			if (isr & 1) {
+				if (unlikely(gpio->disable_depth)) {
+					/*
+					 * The core ARM interrupt handler lazily disables IRQs so
+					 * another IRQ must be generated before it actually gets
+					 * here to be disabled on the GPIO controller.
+					 */
+					gpio_irq_mask(pin);
+				}
+				else
+					gpio->handle(pin, gpio, regs);
+			}
 			pin++;
 			pin++;
 			gpio++;
 			gpio++;
 			isr >>= 1;
 			isr >>= 1;

+ 15 - 6
arch/arm/mach-integrator/platsmp.c

@@ -140,6 +140,18 @@ static void __init poke_milo(void)
 	mb();
 	mb();
 }
 }
 
 
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+	unsigned int i, ncores = get_core_count();
+
+	for (i = 0; i < ncores; i++)
+		cpu_set(i, cpu_possible_map);
+}
+
 void __init smp_prepare_cpus(unsigned int max_cpus)
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
 {
 	unsigned int ncores = get_core_count();
 	unsigned int ncores = get_core_count();
@@ -176,14 +188,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 		max_cpus = ncores;
 		max_cpus = ncores;
 
 
 	/*
 	/*
-	 * Initialise the possible/present maps.
-	 * cpu_possible_map describes the set of CPUs which may be present
-	 * cpu_present_map describes the set of CPUs populated
+	 * Initialise the present map, which describes the set of CPUs
+	 * actually populated at the present time.
 	 */
 	 */
-	for (i = 0; i < max_cpus; i++) {
-		cpu_set(i, cpu_possible_map);
+	for (i = 0; i < max_cpus; i++)
 		cpu_set(i, cpu_present_map);
 		cpu_set(i, cpu_present_map);
-	}
 
 
 	/*
 	/*
 	 * Do we need any more CPUs? If so, then let them know where
 	 * Do we need any more CPUs? If so, then let them know where

+ 0 - 1
arch/arm/mach-iop3xx/iop321-setup.c

@@ -13,7 +13,6 @@
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/config.h>
 #include <linux/config.h>
-#include <linux/init.h>
 #include <linux/major.h>
 #include <linux/major.h>
 #include <linux/fs.h>
 #include <linux/fs.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>

+ 0 - 1
arch/arm/mach-iop3xx/iop331-setup.c

@@ -12,7 +12,6 @@
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/config.h>
 #include <linux/config.h>
-#include <linux/init.h>
 #include <linux/major.h>
 #include <linux/major.h>
 #include <linux/fs.h>
 #include <linux/fs.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>

+ 1 - 3
arch/arm/mach-ixp4xx/Kconfig

@@ -8,11 +8,9 @@ menu "Intel IXP4xx Implementation Options"
 
 
 comment "IXP4xx Platforms"
 comment "IXP4xx Platforms"
 
 
-# This entry is placed on top because otherwise it would have
-# been shown as a submenu.
 config MACH_NSLU2
 config MACH_NSLU2
 	bool
 	bool
-	prompt "NSLU2" if !(MACH_IXDP465 || MACH_IXDPG425 || ARCH_IXDP425 || ARCH_ADI_COYOTE || ARCH_AVILA || ARCH_IXCDP1100 || ARCH_PRPMC1100 || MACH_GTWX5715)
+	prompt "Linksys NSLU2"
 	help
 	help
 	  Say 'Y' here if you want your kernel to support Linksys's
 	  Say 'Y' here if you want your kernel to support Linksys's
 	  NSLU2 NAS device. For more information on this platform,
 	  NSLU2 NAS device. For more information on this platform,

+ 13 - 7
arch/arm/mach-ixp4xx/common.c

@@ -111,24 +111,30 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type)
 	if (line < 0)
 	if (line < 0)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (type & IRQT_BOTHEDGE) {
+	switch (type){
+	case IRQT_BOTHEDGE:
 		int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
 		int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
 		irq_type = IXP4XX_IRQ_EDGE;
 		irq_type = IXP4XX_IRQ_EDGE;
-	} else  if (type & IRQT_RISING) {
+		break;
+	case IRQT_RISING:
 		int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
 		int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
 		irq_type = IXP4XX_IRQ_EDGE;
 		irq_type = IXP4XX_IRQ_EDGE;
-	} else if (type & IRQT_FALLING) {
+		break;
+	case IRQT_FALLING:
 		int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
 		int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
 		irq_type = IXP4XX_IRQ_EDGE;
 		irq_type = IXP4XX_IRQ_EDGE;
-	} else if (type & IRQT_HIGH) {
+		break;
+	case IRQT_HIGH:
 		int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
 		int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
 		irq_type = IXP4XX_IRQ_LEVEL;
 		irq_type = IXP4XX_IRQ_LEVEL;
-	} else if (type & IRQT_LOW) {
+		break;
+	case IRQT_LOW:
 		int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
 		int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
 		irq_type = IXP4XX_IRQ_LEVEL;
 		irq_type = IXP4XX_IRQ_LEVEL;
-	} else
+		break;
+	default:
 		return -EINVAL;
 		return -EINVAL;
-
+	}
 	ixp4xx_config_irq(irq, irq_type);
 	ixp4xx_config_irq(irq, irq_type);
 
 
 	if (line >= 8) {	/* pins 8-15 */
 	if (line >= 8) {	/* pins 8-15 */

+ 3 - 0
arch/arm/mach-ixp4xx/nas100d-power.c

@@ -56,6 +56,9 @@ static int __init nas100d_power_init(void)
 
 
 static void __exit nas100d_power_exit(void)
 static void __exit nas100d_power_exit(void)
 {
 {
+	if (!(machine_is_nas100d()))
+		return;
+
 	free_irq(NAS100D_RB_IRQ, NULL);
 	free_irq(NAS100D_RB_IRQ, NULL);
 }
 }
 
 

+ 3 - 0
arch/arm/mach-ixp4xx/nas100d-setup.c

@@ -113,6 +113,9 @@ static void __init nas100d_init(void)
 {
 {
 	ixp4xx_sys_init();
 	ixp4xx_sys_init();
 
 
+	/* gpio 14 and 15 are _not_ clocks */
+	*IXP4XX_GPIO_GPCLKR = 0;
+
 	nas100d_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
 	nas100d_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
 	nas100d_flash_resource.end =
 	nas100d_flash_resource.end =
 		IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
 		IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;

+ 3 - 0
arch/arm/mach-ixp4xx/nslu2-power.c

@@ -77,6 +77,9 @@ static int __init nslu2_power_init(void)
 
 
 static void __exit nslu2_power_exit(void)
 static void __exit nslu2_power_exit(void)
 {
 {
+	if (!(machine_is_nslu2()))
+		return;
+
 	free_irq(NSLU2_RB_IRQ, NULL);
 	free_irq(NSLU2_RB_IRQ, NULL);
 	free_irq(NSLU2_PB_IRQ, NULL);
 	free_irq(NSLU2_PB_IRQ, NULL);
 }
 }

+ 11 - 2
arch/arm/mach-ixp4xx/nslu2-setup.c

@@ -27,8 +27,6 @@ static struct flash_platform_data nslu2_flash_data = {
 };
 };
 
 
 static struct resource nslu2_flash_resource = {
 static struct resource nslu2_flash_resource = {
-	.start			= NSLU2_FLASH_BASE,
-	.end			= NSLU2_FLASH_BASE + NSLU2_FLASH_SIZE,
 	.flags			= IORESOURCE_MEM,
 	.flags			= IORESOURCE_MEM,
 };
 };
 
 
@@ -52,6 +50,12 @@ static struct platform_device nslu2_i2c_controller = {
 	.num_resources		= 0,
 	.num_resources		= 0,
 };
 };
 
 
+static struct platform_device nslu2_beeper = {
+	.name			= "ixp4xx-beeper",
+	.id			= NSLU2_GPIO_BUZZ,
+	.num_resources		= 0,
+};
+
 static struct resource nslu2_uart_resources[] = {
 static struct resource nslu2_uart_resources[] = {
 	{
 	{
 		.start		= IXP4XX_UART1_BASE_PHYS,
 		.start		= IXP4XX_UART1_BASE_PHYS,
@@ -99,6 +103,7 @@ static struct platform_device *nslu2_devices[] __initdata = {
 	&nslu2_i2c_controller,
 	&nslu2_i2c_controller,
 	&nslu2_flash,
 	&nslu2_flash,
 	&nslu2_uart,
 	&nslu2_uart,
+	&nslu2_beeper,
 };
 };
 
 
 static void nslu2_power_off(void)
 static void nslu2_power_off(void)
@@ -116,6 +121,10 @@ static void __init nslu2_init(void)
 {
 {
 	ixp4xx_sys_init();
 	ixp4xx_sys_init();
 
 
+	nslu2_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
+	nslu2_flash_resource.end =
+		IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
+
 	pm_power_off = nslu2_power_off;
 	pm_power_off = nslu2_power_off;
 
 
 	platform_add_devices(nslu2_devices, ARRAY_SIZE(nslu2_devices));
 	platform_add_devices(nslu2_devices, ARRAY_SIZE(nslu2_devices));

+ 15 - 6
arch/arm/mach-realview/platsmp.c

@@ -143,6 +143,18 @@ static void __init poke_milo(void)
 	mb();
 	mb();
 }
 }
 
 
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+	unsigned int i, ncores = get_core_count();
+
+	for (i = 0; i < ncores; i++)
+		cpu_set(i, cpu_possible_map);
+}
+
 void __init smp_prepare_cpus(unsigned int max_cpus)
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
 {
 	unsigned int ncores = get_core_count();
 	unsigned int ncores = get_core_count();
@@ -179,14 +191,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	local_timer_setup(cpu);
 	local_timer_setup(cpu);
 
 
 	/*
 	/*
-	 * Initialise the possible/present maps.
-	 * cpu_possible_map describes the set of CPUs which may be present
-	 * cpu_present_map describes the set of CPUs populated
+	 * Initialise the present map, which describes the set of CPUs
+	 * actually populated at the present time.
 	 */
 	 */
-	for (i = 0; i < max_cpus; i++) {
-		cpu_set(i, cpu_possible_map);
+	for (i = 0; i < max_cpus; i++)
 		cpu_set(i, cpu_present_map);
 		cpu_set(i, cpu_present_map);
-	}
 
 
 	/*
 	/*
 	 * Do we need any more CPUs? If so, then let them know where
 	 * Do we need any more CPUs? If so, then let them know where

+ 12 - 0
arch/arm/mach-s3c2410/devs.c

@@ -334,11 +334,17 @@ static struct resource s3c_spi0_resource[] = {
 
 
 };
 };
 
 
+static u64 s3c_device_spi0_dmamask = 0xffffffffUL;
+
 struct platform_device s3c_device_spi0 = {
 struct platform_device s3c_device_spi0 = {
 	.name		  = "s3c2410-spi",
 	.name		  = "s3c2410-spi",
 	.id		  = 0,
 	.id		  = 0,
 	.num_resources	  = ARRAY_SIZE(s3c_spi0_resource),
 	.num_resources	  = ARRAY_SIZE(s3c_spi0_resource),
 	.resource	  = s3c_spi0_resource,
 	.resource	  = s3c_spi0_resource,
+        .dev              = {
+                .dma_mask = &s3c_device_spi0_dmamask,
+                .coherent_dma_mask = 0xffffffffUL
+        }
 };
 };
 
 
 EXPORT_SYMBOL(s3c_device_spi0);
 EXPORT_SYMBOL(s3c_device_spi0);
@@ -359,11 +365,17 @@ static struct resource s3c_spi1_resource[] = {
 
 
 };
 };
 
 
+static u64 s3c_device_spi1_dmamask = 0xffffffffUL;
+
 struct platform_device s3c_device_spi1 = {
 struct platform_device s3c_device_spi1 = {
 	.name		  = "s3c2410-spi",
 	.name		  = "s3c2410-spi",
 	.id		  = 1,
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s3c_spi1_resource),
 	.num_resources	  = ARRAY_SIZE(s3c_spi1_resource),
 	.resource	  = s3c_spi1_resource,
 	.resource	  = s3c_spi1_resource,
+        .dev              = {
+                .dma_mask = &s3c_device_spi1_dmamask,
+                .coherent_dma_mask = 0xffffffffUL
+        }
 };
 };
 
 
 EXPORT_SYMBOL(s3c_device_spi1);
 EXPORT_SYMBOL(s3c_device_spi1);

+ 27 - 2
arch/arm/mach-s3c2410/mach-h1940.c

@@ -46,10 +46,11 @@
 #include <asm/irq.h>
 #include <asm/irq.h>
 #include <asm/mach-types.h>
 #include <asm/mach-types.h>
 
 
-//#include <asm/debug-ll.h>
+
 #include <asm/arch/regs-serial.h>
 #include <asm/arch/regs-serial.h>
 #include <asm/arch/regs-lcd.h>
 #include <asm/arch/regs-lcd.h>
 
 
+#include <asm/arch/h1940-latch.h>
 #include <asm/arch/fb.h>
 #include <asm/arch/fb.h>
 
 
 #include <linux/serial_core.h>
 #include <linux/serial_core.h>
@@ -59,7 +60,12 @@
 #include "cpu.h"
 #include "cpu.h"
 
 
 static struct map_desc h1940_iodesc[] __initdata = {
 static struct map_desc h1940_iodesc[] __initdata = {
-	/* nothing here yet */
+	[0] = {
+		.virtual	= (unsigned long)H1940_LATCH,
+		.pfn		= __phys_to_pfn(H1940_PA_LATCH),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE
+	},
 };
 };
 
 
 #define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
 #define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
@@ -92,6 +98,25 @@ static struct s3c2410_uartcfg h1940_uartcfgs[] = {
 	}
 	}
 };
 };
 
 
+/* Board control latch control */
+
+static unsigned int latch_state = H1940_LATCH_DEFAULT;
+
+void h1940_latch_control(unsigned int clear, unsigned int set)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	latch_state &= ~clear;
+	latch_state |= set;
+
+	__raw_writel(latch_state, H1940_LATCH);
+
+	local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL_GPL(h1940_latch_control);
 
 
 
 
 /**
 /**

+ 31 - 0
arch/arm/mach-s3c2410/s3c2400.h

@@ -0,0 +1,31 @@
+/* arch/arm/mach-s3c2410/s3c2400.h
+ *
+ * Copyright (c) 2004 Simtec Electronics
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * Header file for S3C2400 cpu support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Modifications:
+ *     09-Fev-2006 LCVR  First version, based on s3c2410.h
+*/
+
+#ifdef CONFIG_CPU_S3C2400
+
+extern  int s3c2400_init(void);
+
+extern void s3c2400_map_io(struct map_desc *mach_desc, int size);
+
+extern void s3c2400_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+extern void s3c2400_init_clocks(int xtal);
+
+#else
+#define s3c2400_init_clocks NULL
+#define s3c2400_init_uarts NULL
+#define s3c2400_map_io NULL
+#define s3c2400_init NULL
+#endif

+ 48 - 45
arch/arm/mach-versatile/pci.c

@@ -240,6 +240,14 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
         int i;
         int i;
         int myslot = -1;
         int myslot = -1;
 	unsigned long val;
 	unsigned long val;
+	void __iomem *local_pci_cfg_base;
+
+	val = __raw_readl(SYS_PCICTL);
+	if (!(val & 1)) {
+		printk("Not plugged into PCI backplane!\n");
+		ret = -EIO;
+		goto out;
+	}
 
 
 	if (nr == 0) {
 	if (nr == 0) {
 		sys->mem_offset = 0;
 		sys->mem_offset = 0;
@@ -253,48 +261,45 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	__raw_writel(VERSATILE_PCI_MEM_BASE0 >> 28,PCI_IMAP0);
-	__raw_writel(VERSATILE_PCI_MEM_BASE1 >> 28,PCI_IMAP1);
-	__raw_writel(VERSATILE_PCI_MEM_BASE2 >> 28,PCI_IMAP2);
-
-	__raw_writel(1, SYS_PCICTL);
-
-	val = __raw_readl(SYS_PCICTL);
-	if (!(val & 1)) {
-		printk("Not plugged into PCI backplane!\n");
-		ret = -EIO;
-		goto out;
-	}
-
 	/*
 	/*
 	 *  We need to discover the PCI core first to configure itself
 	 *  We need to discover the PCI core first to configure itself
 	 *  before the main PCI probing is performed
 	 *  before the main PCI probing is performed
 	 */
 	 */
-	for (i=0; i<32; i++) {
+	for (i=0; i<32; i++)
 		if ((__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+DEVICE_ID_OFFSET) == VP_PCI_DEVICE_ID) &&
 		if ((__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+DEVICE_ID_OFFSET) == VP_PCI_DEVICE_ID) &&
 		    (__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+CLASS_ID_OFFSET) == VP_PCI_CLASS_ID)) {
 		    (__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+CLASS_ID_OFFSET) == VP_PCI_CLASS_ID)) {
 			myslot = i;
 			myslot = i;
-
-			__raw_writel(myslot, PCI_SELFID);
-			val = __raw_readl(VERSATILE_PCI_CFG_VIRT_BASE+(myslot<<11)+CSR_OFFSET);
-			val |= (1<<2);
-			__raw_writel(val, VERSATILE_PCI_CFG_VIRT_BASE+(myslot<<11)+CSR_OFFSET);
 			break;
 			break;
 		}
 		}
-	}
 
 
 	if (myslot == -1) {
 	if (myslot == -1) {
 		printk("Cannot find PCI core!\n");
 		printk("Cannot find PCI core!\n");
 		ret = -EIO;
 		ret = -EIO;
-	} else {
-		printk("PCI core found (slot %d)\n",myslot);
-		/* Do not to map Versatile FPGA PCI device
-		   into memory space as we are short of
-		   mappable memory */
-		pci_slot_ignore |= (1 << myslot);
-		ret = 1;
+		goto out;
 	}
 	}
 
 
+	printk("PCI core found (slot %d)\n",myslot);
+
+	__raw_writel(myslot, PCI_SELFID);
+	local_pci_cfg_base = (void *) VERSATILE_PCI_CFG_VIRT_BASE + (myslot << 11);
+
+	val = __raw_readl(local_pci_cfg_base + CSR_OFFSET);
+	val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
+	__raw_writel(val, local_pci_cfg_base + CSR_OFFSET);
+
+	/*
+	 * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
+	 */
+	__raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
+	__raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
+	__raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+
+	/*
+	 * Do not to map Versatile FPGA PCI device into memory space
+	 */
+	pci_slot_ignore |= (1 << myslot);
+	ret = 1;
+
  out:
  out:
 	return ret;
 	return ret;
 }
 }
@@ -305,18 +310,18 @@ struct pci_bus *pci_versatile_scan_bus(int nr, struct pci_sys_data *sys)
 	return pci_scan_bus(sys->busnr, &pci_versatile_ops, sys);
 	return pci_scan_bus(sys->busnr, &pci_versatile_ops, sys);
 }
 }
 
 
-/*
- * V3_LB_BASE? - local bus address
- * V3_LB_MAP?  - pci bus address
- */
 void __init pci_versatile_preinit(void)
 void __init pci_versatile_preinit(void)
 {
 {
-}
+	__raw_writel(VERSATILE_PCI_MEM_BASE0 >> 28, PCI_IMAP0);
+	__raw_writel(VERSATILE_PCI_MEM_BASE1 >> 28, PCI_IMAP1);
+	__raw_writel(VERSATILE_PCI_MEM_BASE2 >> 28, PCI_IMAP2);
 
 
-void __init pci_versatile_postinit(void)
-{
-}
+	__raw_writel(PHYS_OFFSET >> 28, PCI_SMAP0);
+	__raw_writel(PHYS_OFFSET >> 28, PCI_SMAP1);
+	__raw_writel(PHYS_OFFSET >> 28, PCI_SMAP2);
 
 
+	__raw_writel(1, SYS_PCICTL);
+}
 
 
 /*
 /*
  * map the specified device/slot/pin to an IRQ.   Different backplanes may need to modify this.
  * map the specified device/slot/pin to an IRQ.   Different backplanes may need to modify this.
@@ -326,16 +331,15 @@ static int __init versatile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 	int irq;
 	int irq;
 	int devslot = PCI_SLOT(dev->devfn);
 	int devslot = PCI_SLOT(dev->devfn);
 
 
-	/* slot,  pin,  irq
-	    24	  1	27
-	    25    1	28	untested
-	    26	  1	29
-	    27    1	30	untested
-	*/
-
-	irq = 27 + ((slot + pin + 2) % 3);	/* Fudged */
+	/* slot,  pin,	irq
+	 *  24     1     27
+	 *  25     1     28
+	 *  26     1     29
+	 *  27     1     30
+	 */
+	irq = 27 + ((slot + pin - 1) & 3);
 
 
-	printk("map irq: slot %d, pin %d, devslot %d, irq: %d\n",slot,pin,devslot,irq);
+	printk("PCI map irq: slot %d, pin %d, devslot %d, irq: %d\n",slot,pin,devslot,irq);
 
 
 	return irq;
 	return irq;
 }
 }
@@ -347,7 +351,6 @@ static struct hw_pci versatile_pci __initdata = {
 	.setup			= pci_versatile_setup,
 	.setup			= pci_versatile_setup,
 	.scan			= pci_versatile_scan_bus,
 	.scan			= pci_versatile_scan_bus,
 	.preinit		= pci_versatile_preinit,
 	.preinit		= pci_versatile_preinit,
-	.postinit		= pci_versatile_postinit,
 };
 };
 
 
 static int __init versatile_pci_init(void)
 static int __init versatile_pci_init(void)

+ 1 - 1
arch/arm/mm/abort-ev6.S

@@ -20,7 +20,7 @@
  */
  */
 	.align	5
 	.align	5
 ENTRY(v6_early_abort)
 ENTRY(v6_early_abort)
-#ifdef CONFIG_CPU_MPCORE
+#ifdef CONFIG_CPU_32v6K
 	clrex
 	clrex
 #else
 #else
 	strex	r0, r1, [sp]			@ Clear the exclusive monitor
 	strex	r0, r1, [sp]			@ Clear the exclusive monitor

+ 4 - 3
arch/arm/mm/cache-v6.S

@@ -96,15 +96,16 @@ ENTRY(v6_coherent_user_range)
 #ifdef HARVARD_CACHE
 #ifdef HARVARD_CACHE
 	bic	r0, r0, #CACHE_LINE_SIZE - 1
 	bic	r0, r0, #CACHE_LINE_SIZE - 1
 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D line
 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D line
-	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I line
 	add	r0, r0, #CACHE_LINE_SIZE
 	add	r0, r0, #CACHE_LINE_SIZE
 	cmp	r0, r1
 	cmp	r0, r1
 	blo	1b
 	blo	1b
 #endif
 #endif
-	mcr	p15, 0, r0, c7, c5, 6		@ invalidate BTB
-#ifdef HARVARD_CACHE
 	mov	r0, #0
 	mov	r0, #0
+#ifdef HARVARD_CACHE
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
+	mcr	p15, 0, r0, c7, c5, 0		@ I+BTB cache invalidate
+#else
+	mcr	p15, 0, r0, c7, c5, 6		@ invalidate BTB
 #endif
 #endif
 	mov	pc, lr
 	mov	pc, lr
 
 

+ 4 - 2
arch/arm/mm/flush.c

@@ -24,14 +24,16 @@
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 {
 {
 	unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 	unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
+	const int zero = 0;
 
 
 	set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
 	set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
 	flush_tlb_kernel_page(to);
 	flush_tlb_kernel_page(to);
 
 
 	asm(	"mcrr	p15, 0, %1, %0, c14\n"
 	asm(	"mcrr	p15, 0, %1, %0, c14\n"
-	"	mcrr	p15, 0, %1, %0, c5\n"
+	"	mcr	p15, 0, %2, c7, c10, 4\n"
+	"	mcr	p15, 0, %2, c7, c5, 0\n"
 	    :
 	    :
-	    : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
+	    : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
 	    : "cc");
 	    : "cc");
 }
 }
 
 

+ 1 - 0
arch/arm/mm/tlb-v6.S

@@ -80,6 +80,7 @@ ENTRY(v6wbi_flush_kern_tlb_range)
 	add	r0, r0, #PAGE_SZ
 	add	r0, r0, #PAGE_SZ
 	cmp	r0, r1
 	cmp	r0, r1
 	blo	1b
 	blo	1b
+	mcr	p15, 0, r2, c7, c10, 4		@ data synchronization barrier
 	mov	pc, lr
 	mov	pc, lr
 
 
 	.section ".text.init", #alloc, #execinstr
 	.section ".text.init", #alloc, #execinstr

+ 0 - 1
arch/arm/plat-omap/pm.c

@@ -38,7 +38,6 @@
 #include <linux/pm.h>
 #include <linux/pm.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/proc_fs.h>
 #include <linux/proc_fs.h>
-#include <linux/pm.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 
 
 #include <asm/io.h>
 #include <asm/io.h>

+ 33 - 2
arch/arm/tools/mach-types

@@ -12,7 +12,7 @@
 #
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
 #
-# Last update: Mon Jan 9 12:56:42 2006
+# Last update: Mon Feb 20 10:18:02 2006
 #
 #
 # machine_is_xxx	CONFIG_xxxx		MACH_TYPE_xxx		number
 # machine_is_xxx	CONFIG_xxxx		MACH_TYPE_xxx		number
 #
 #
@@ -904,7 +904,7 @@ wg302v2			MACH_WG302V2		WG302V2			890
 eb42x			MACH_EB42X		EB42X			891
 eb42x			MACH_EB42X		EB42X			891
 iq331es			MACH_IQ331ES		IQ331ES			892
 iq331es			MACH_IQ331ES		IQ331ES			892
 cosydsp			MACH_COSYDSP		COSYDSP			893
 cosydsp			MACH_COSYDSP		COSYDSP			893
-uplat7d			MACH_UPLAT7D		UPLAT7D			894
+uplat7d_proto		MACH_UPLAT7D		UPLAT7D			894
 ptdavinci		MACH_PTDAVINCI		PTDAVINCI		895
 ptdavinci		MACH_PTDAVINCI		PTDAVINCI		895
 mbus			MACH_MBUS		MBUS			896
 mbus			MACH_MBUS		MBUS			896
 nadia2vb		MACH_NADIA2VB		NADIA2VB		897
 nadia2vb		MACH_NADIA2VB		NADIA2VB		897
@@ -938,3 +938,34 @@ auckland		MACH_AUCKLAND		AUCKLAND		924
 ak3220m			MACH_AK3320M		AK3320M			925
 ak3220m			MACH_AK3320M		AK3320M			925
 duramax			MACH_DURAMAX		DURAMAX			926
 duramax			MACH_DURAMAX		DURAMAX			926
 n35			MACH_N35		N35			927
 n35			MACH_N35		N35			927
+pronghorn		MACH_PRONGHORN		PRONGHORN		928
+fundy			MACH_FUNDY		FUNDY			929
+logicpd_pxa270		MACH_LOGICPD_PXA270	LOGICPD_PXA270		930
+cpu777			MACH_CPU777		CPU777			931
+simicon9201		MACH_SIMICON9201	SIMICON9201		932
+leap2_hpm		MACH_LEAP2_HPM		LEAP2_HPM		933
+cm922txa10		MACH_CM922TXA10		CM922TXA10		934
+sandgate		MACH_PXA		PXA			935
+sandgate2		MACH_SANDGATE2		SANDGATE2		936
+sandgate2g		MACH_SANDGATE2G		SANDGATE2G		937
+sandgate2p		MACH_SANDGATE2P		SANDGATE2P		938
+fred_jack		MACH_FRED_JACK		FRED_JACK		939
+ttg_color1		MACH_TTG_COLOR1		TTG_COLOR1		940
+nxeb500hmi		MACH_NXEB500HMI		NXEB500HMI		941
+netdcu8			MACH_NETDCU8		NETDCU8			942
+ml675050_cpu_boa	MACH_ML675050_CPU_BOA	ML675050_CPU_BOA	943
+ng_fvx538		MACH_NG_FVX538		NG_FVX538		944
+ng_fvs338		MACH_NG_FVS338		NG_FVS338		945
+pnx4103			MACH_PNX4103		PNX4103			946
+hesdb			MACH_HESDB		HESDB			947
+xsilo			MACH_XSILO		XSILO			948
+espresso		MACH_ESPRESSO		ESPRESSO		949
+emlc			MACH_EMLC		EMLC			950
+sisteron		MACH_SISTERON		SISTERON		951
+rx1950			MACH_RX1950		RX1950			952
+tsc_venus		MACH_TSC_VENUS		TSC_VENUS		953
+ds101j			MACH_DS101J		DS101J			954
+mxc300_30ads		MACH_MXC30030ADS	MXC30030ADS		955
+fujitsu_wimaxsoc	MACH_FUJITSU_WIMAXSOC	FUJITSU_WIMAXSOC	956
+dualpcmodem		MACH_DUALPCMODEM	DUALPCMODEM		957
+gesbc9312		MACH_GESBC9312		GESBC9312		958

+ 4 - 0
arch/frv/Kconfig

@@ -25,6 +25,10 @@ config GENERIC_HARDIRQS
 	bool
 	bool
 	default n
 	default n
 
 
+config TIME_LOW_RES
+	bool
+	default y
+
 mainmenu "Fujitsu FR-V Kernel Configuration"
 mainmenu "Fujitsu FR-V Kernel Configuration"
 
 
 source "init/Kconfig"
 source "init/Kconfig"

+ 1 - 1
arch/frv/Makefile

@@ -81,7 +81,7 @@ endif
 # - reserve CC3 for use with atomic ops
 # - reserve CC3 for use with atomic ops
 # - all the extra registers are dealt with only at context switch time
 # - all the extra registers are dealt with only at context switch time
 CFLAGS		+= -mno-fdpic -mgpr-32 -msoft-float -mno-media
 CFLAGS		+= -mno-fdpic -mgpr-32 -msoft-float -mno-media
-CFLAGS		+= -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15
+CFLAGS		+= -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 -ffixed-icc2
 AFLAGS		+= -mno-fdpic
 AFLAGS		+= -mno-fdpic
 ASFLAGS		+= -mno-fdpic
 ASFLAGS		+= -mno-fdpic
 
 

+ 73 - 4
arch/frv/kernel/break.S

@@ -200,12 +200,20 @@ __break_step:
 	movsg		bpcsr,gr2
 	movsg		bpcsr,gr2
 	sethi.p		%hi(__entry_kernel_external_interrupt),gr3
 	sethi.p		%hi(__entry_kernel_external_interrupt),gr3
 	setlo		%lo(__entry_kernel_external_interrupt),gr3
 	setlo		%lo(__entry_kernel_external_interrupt),gr3
-	subcc		gr2,gr3,gr0,icc0
+	subcc.p		gr2,gr3,gr0,icc0
+	sethi		%hi(__entry_uspace_external_interrupt),gr3
+	setlo.p		%lo(__entry_uspace_external_interrupt),gr3
 	beq		icc0,#2,__break_step_kernel_external_interrupt
 	beq		icc0,#2,__break_step_kernel_external_interrupt
-	sethi.p		%hi(__entry_uspace_external_interrupt),gr3
-	setlo		%lo(__entry_uspace_external_interrupt),gr3
-	subcc		gr2,gr3,gr0,icc0
+	subcc.p		gr2,gr3,gr0,icc0
+	sethi		%hi(__entry_kernel_external_interrupt_virtually_disabled),gr3
+	setlo.p		%lo(__entry_kernel_external_interrupt_virtually_disabled),gr3
 	beq		icc0,#2,__break_step_uspace_external_interrupt
 	beq		icc0,#2,__break_step_uspace_external_interrupt
+	subcc.p		gr2,gr3,gr0,icc0
+	sethi		%hi(__entry_kernel_external_interrupt_virtual_reenable),gr3
+	setlo.p		%lo(__entry_kernel_external_interrupt_virtual_reenable),gr3
+	beq		icc0,#2,__break_step_kernel_external_interrupt_virtually_disabled
+	subcc		gr2,gr3,gr0,icc0
+	beq		icc0,#2,__break_step_kernel_external_interrupt_virtual_reenable
 
 
 	LEDS		0x2007,gr2
 	LEDS		0x2007,gr2
 
 
@@ -254,6 +262,9 @@ __break_step_kernel_softprog_interrupt:
 # step through an external interrupt from kernel mode
 # step through an external interrupt from kernel mode
 	.globl		__break_step_kernel_external_interrupt
 	.globl		__break_step_kernel_external_interrupt
 __break_step_kernel_external_interrupt:
 __break_step_kernel_external_interrupt:
+	# deal with virtual interrupt disablement
+	beq		icc2,#0,__break_step_kernel_external_interrupt_virtually_disabled
+
 	sethi.p		%hi(__entry_kernel_external_interrupt_reentry),gr3
 	sethi.p		%hi(__entry_kernel_external_interrupt_reentry),gr3
 	setlo		%lo(__entry_kernel_external_interrupt_reentry),gr3
 	setlo		%lo(__entry_kernel_external_interrupt_reentry),gr3
 
 
@@ -294,6 +305,64 @@ __break_return_as_kernel_prologue:
 #endif
 #endif
 	rett		#1
 	rett		#1
 
 
+# we single-stepped into an interrupt handler whilst interrupts were merely virtually disabled
+# need to really disable interrupts, set flag, fix up and return
+__break_step_kernel_external_interrupt_virtually_disabled:
+	movsg		psr,gr2
+	andi		gr2,#~PSR_PIL,gr2
+	ori		gr2,#PSR_PIL_14,gr2	/* debugging interrupts only */
+	movgs		gr2,psr
+
+	ldi		@(gr31,#REG_CCR),gr3
+	movgs		gr3,ccr
+	subcc.p		gr0,gr0,gr0,icc2	/* leave Z set, clear C */
+
+	# exceptions must've been enabled and we must've been in supervisor mode
+	setlos		BPSR_BET|BPSR_BS,gr3
+	movgs		gr3,bpsr
+
+	# return to where the interrupt happened
+	movsg		pcsr,gr2
+	movgs		gr2,bpcsr
+
+	lddi.p		@(gr31,#REG_GR(2)),gr2
+
+	xor		gr31,gr31,gr31
+	movgs		gr0,brr
+#ifdef CONFIG_MMU
+	movsg		scr3,gr31
+#endif
+	rett		#1
+
+# we stepped through into the virtual interrupt reenablement trap
+#
+# we also want to single step anyway, but after fixing up so that we get an event on the
+# instruction after the broken-into exception returns
+	.globl		__break_step_kernel_external_interrupt_virtual_reenable
+__break_step_kernel_external_interrupt_virtual_reenable:
+	movsg		psr,gr2
+	andi		gr2,#~PSR_PIL,gr2
+	movgs		gr2,psr
+
+	ldi		@(gr31,#REG_CCR),gr3
+	movgs		gr3,ccr
+	subicc		gr0,#1,gr0,icc2		/* clear Z, set C */
+
+	# save the adjusted ICC2
+	movsg		ccr,gr3
+	sti		gr3,@(gr31,#REG_CCR)
+
+	# exceptions must've been enabled and we must've been in supervisor mode
+	setlos		BPSR_BET|BPSR_BS,gr3
+	movgs		gr3,bpsr
+
+	# return to where the trap happened
+	movsg		pcsr,gr2
+	movgs		gr2,bpcsr
+
+	# and then process the single step
+	bra		__break_continue
+
 # step through an internal exception from uspace mode
 # step through an internal exception from uspace mode
 	.globl		__break_step_uspace_softprog_interrupt
 	.globl		__break_step_uspace_softprog_interrupt
 __break_step_uspace_softprog_interrupt:
 __break_step_uspace_softprog_interrupt:

+ 34 - 5
arch/frv/kernel/entry-table.S

@@ -116,6 +116,8 @@ __break_kerneltrap_fixup_table:
 	.long		__break_step_uspace_external_interrupt
 	.long		__break_step_uspace_external_interrupt
 	.section .trap.kernel
 	.section .trap.kernel
 	.org		\tbr_tt
 	.org		\tbr_tt
+	# deal with virtual interrupt disablement
+	beq		icc2,#0,__entry_kernel_external_interrupt_virtually_disabled
 	bra		__entry_kernel_external_interrupt
 	bra		__entry_kernel_external_interrupt
 	.section .trap.fixup.kernel
 	.section .trap.fixup.kernel
 	.org		\tbr_tt >> 2
 	.org		\tbr_tt >> 2
@@ -259,25 +261,52 @@ __trap_fixup_kernel_data_tlb_miss:
 	.org		TBR_TT_TRAP0
 	.org		TBR_TT_TRAP0
 	.rept		127
 	.rept		127
 	bra		__entry_uspace_softprog_interrupt
 	bra		__entry_uspace_softprog_interrupt
-	bra		__break_step_uspace_softprog_interrupt
-	.long		0,0
+	.long		0,0,0
 	.endr
 	.endr
 	.org		TBR_TT_BREAK
 	.org		TBR_TT_BREAK
 	bra		__entry_break
 	bra		__entry_break
 	.long		0,0,0
 	.long		0,0,0
 
 
+	.section	.trap.fixup.user
+	.org		TBR_TT_TRAP0 >> 2
+	.rept		127
+	.long		__break_step_uspace_softprog_interrupt
+	.endr
+	.org		TBR_TT_BREAK >> 2
+	.long		0
+
 	# miscellaneous kernel mode entry points
 	# miscellaneous kernel mode entry points
 	.section	.trap.kernel
 	.section	.trap.kernel
 	.org		TBR_TT_TRAP0
 	.org		TBR_TT_TRAP0
-	.rept		127
 	bra		__entry_kernel_softprog_interrupt
 	bra		__entry_kernel_softprog_interrupt
-	bra		__break_step_kernel_softprog_interrupt
-	.long		0,0
+	.org		TBR_TT_TRAP1
+	bra		__entry_kernel_softprog_interrupt
+
+	# trap #2 in kernel - reenable interrupts
+	.org		TBR_TT_TRAP2
+	bra		__entry_kernel_external_interrupt_virtual_reenable
+
+	# miscellaneous kernel traps
+	.org		TBR_TT_TRAP3
+	.rept		124
+	bra		__entry_kernel_softprog_interrupt
+	.long		0,0,0
 	.endr
 	.endr
 	.org		TBR_TT_BREAK
 	.org		TBR_TT_BREAK
 	bra		__entry_break
 	bra		__entry_break
 	.long		0,0,0
 	.long		0,0,0
 
 
+	.section	.trap.fixup.kernel
+	.org		TBR_TT_TRAP0 >> 2
+	.long		__break_step_kernel_softprog_interrupt
+	.long		__break_step_kernel_softprog_interrupt
+	.long		__break_step_kernel_external_interrupt_virtual_reenable
+	.rept		124
+	.long		__break_step_kernel_softprog_interrupt
+	.endr
+	.org		TBR_TT_BREAK >> 2
+	.long		0
+
 	# miscellaneous debug mode entry points
 	# miscellaneous debug mode entry points
 	.section	.trap.break
 	.section	.trap.break
 	.org		TBR_TT_BREAK
 	.org		TBR_TT_BREAK

+ 79 - 12
arch/frv/kernel/entry.S

@@ -141,7 +141,10 @@ __entry_uspace_external_interrupt_reentry:
 
 
 	movsg		gner0,gr4
 	movsg		gner0,gr4
 	movsg		gner1,gr5
 	movsg		gner1,gr5
-	stdi		gr4,@(gr28,#REG_GNER0)
+	stdi.p		gr4,@(gr28,#REG_GNER0)
+
+	# interrupts start off fully disabled in the interrupt handler
+	subcc		gr0,gr0,gr0,icc2		/* set Z and clear C */
 
 
 	# set up kernel global registers
 	# set up kernel global registers
 	sethi.p		%hi(__kernel_current_task),gr5
 	sethi.p		%hi(__kernel_current_task),gr5
@@ -193,9 +196,8 @@ __entry_uspace_external_interrupt_reentry:
         .type		__entry_kernel_external_interrupt,@function
         .type		__entry_kernel_external_interrupt,@function
 __entry_kernel_external_interrupt:
 __entry_kernel_external_interrupt:
 	LEDS		0x6210
 	LEDS		0x6210
-
-	sub		sp,gr15,gr31
-	LEDS32
+//	sub		sp,gr15,gr31
+//	LEDS32
 
 
 	# set up the stack pointer
 	# set up the stack pointer
 	or.p		sp,gr0,gr30
 	or.p		sp,gr0,gr30
@@ -231,7 +233,10 @@ __entry_kernel_external_interrupt_reentry:
 	stdi		gr24,@(gr28,#REG_GR(24))
 	stdi		gr24,@(gr28,#REG_GR(24))
 	stdi		gr26,@(gr28,#REG_GR(26))
 	stdi		gr26,@(gr28,#REG_GR(26))
 	sti		gr29,@(gr28,#REG_GR(29))
 	sti		gr29,@(gr28,#REG_GR(29))
-	stdi		gr30,@(gr28,#REG_GR(30))
+	stdi.p		gr30,@(gr28,#REG_GR(30))
+
+	# note virtual interrupts will be fully enabled upon return
+	subicc		gr0,#1,gr0,icc2			/* clear Z, set C */
 
 
 	movsg		tbr ,gr20
 	movsg		tbr ,gr20
 	movsg		psr ,gr22
 	movsg		psr ,gr22
@@ -267,7 +272,10 @@ __entry_kernel_external_interrupt_reentry:
 
 
 	movsg		gner0,gr4
 	movsg		gner0,gr4
 	movsg		gner1,gr5
 	movsg		gner1,gr5
-	stdi		gr4,@(gr28,#REG_GNER0)
+	stdi.p		gr4,@(gr28,#REG_GNER0)
+
+	# interrupts start off fully disabled in the interrupt handler
+	subcc		gr0,gr0,gr0,icc2			/* set Z and clear C */
 
 
 	# set the return address
 	# set the return address
 	sethi.p		%hi(__entry_return_from_kernel_interrupt),gr4
 	sethi.p		%hi(__entry_return_from_kernel_interrupt),gr4
@@ -291,6 +299,45 @@ __entry_kernel_external_interrupt_reentry:
 
 
 	.size		__entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
 	.size		__entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
 
 
+###############################################################################
+#
+# deal with interrupts that were actually virtually disabled
+# - we need to really disable them, flag the fact and return immediately
+# - if you change this, you must alter break.S also
+#
+###############################################################################
+	.balign		L1_CACHE_BYTES
+	.globl		__entry_kernel_external_interrupt_virtually_disabled
+	.type		__entry_kernel_external_interrupt_virtually_disabled,@function
+__entry_kernel_external_interrupt_virtually_disabled:
+	movsg		psr,gr30
+	andi		gr30,#~PSR_PIL,gr30
+	ori		gr30,#PSR_PIL_14,gr30		; debugging interrupts only
+	movgs		gr30,psr
+	subcc		gr0,gr0,gr0,icc2		; leave Z set, clear C
+	rett		#0
+
+	.size		__entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled
+
+###############################################################################
+#
+# deal with re-enablement of interrupts that were pending when virtually re-enabled
+# - set ICC2.C, re-enable the real interrupts and return
+# - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI]
+# - if you change this, you must alter break.S also
+#
+###############################################################################
+	.balign		L1_CACHE_BYTES
+	.globl		__entry_kernel_external_interrupt_virtual_reenable
+	.type		__entry_kernel_external_interrupt_virtual_reenable,@function
+__entry_kernel_external_interrupt_virtual_reenable:
+	movsg		psr,gr30
+	andi		gr30,#~PSR_PIL,gr30		; re-enable interrupts
+	movgs		gr30,psr
+	subicc		gr0,#1,gr0,icc2			; clear Z, set C
+	rett		#0
+
+	.size		__entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable
 
 
 ###############################################################################
 ###############################################################################
 #
 #
@@ -335,6 +382,7 @@ __entry_uspace_softprog_interrupt_reentry:
 
 
 	sethi.p		%hi(__entry_return_from_user_exception),gr23
 	sethi.p		%hi(__entry_return_from_user_exception),gr23
 	setlo		%lo(__entry_return_from_user_exception),gr23
 	setlo		%lo(__entry_return_from_user_exception),gr23
+
 	bra		__entry_common
 	bra		__entry_common
 
 
 	.size		__entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
 	.size		__entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
@@ -495,7 +543,10 @@ __entry_common:
 
 
 	movsg		gner0,gr4
 	movsg		gner0,gr4
 	movsg		gner1,gr5
 	movsg		gner1,gr5
-	stdi		gr4,@(gr28,#REG_GNER0)
+	stdi.p		gr4,@(gr28,#REG_GNER0)
+
+	# set up virtual interrupt disablement
+	subicc		gr0,#1,gr0,icc2			/* clear Z flag, set C flag */
 
 
 	# set up kernel global registers
 	# set up kernel global registers
 	sethi.p		%hi(__kernel_current_task),gr5
 	sethi.p		%hi(__kernel_current_task),gr5
@@ -1418,11 +1469,27 @@ sys_call_table:
 	.long sys_add_key
 	.long sys_add_key
 	.long sys_request_key
 	.long sys_request_key
 	.long sys_keyctl
 	.long sys_keyctl
-	.long sys_ni_syscall // sys_vperfctr_open
-	.long sys_ni_syscall // sys_vperfctr_control	/* 290 */
-	.long sys_ni_syscall // sys_vperfctr_unlink
-	.long sys_ni_syscall // sys_vperfctr_iresume
-	.long sys_ni_syscall // sys_vperfctr_read
+	.long sys_ioprio_set
+	.long sys_ioprio_get		/* 290 */
+	.long sys_inotify_init
+	.long sys_inotify_add_watch
+	.long sys_inotify_rm_watch
+	.long sys_migrate_pages
+	.long sys_openat		/* 295 */
+	.long sys_mkdirat
+	.long sys_mknodat
+	.long sys_fchownat
+	.long sys_futimesat
+	.long sys_newfstatat		/* 300 */
+	.long sys_unlinkat
+	.long sys_renameat
+	.long sys_linkat
+	.long sys_symlinkat
+	.long sys_readlinkat		/* 305 */
+	.long sys_fchmodat
+	.long sys_faccessat
+	.long sys_pselect6
+	.long sys_ppoll
 
 
 
 
 syscall_table_size = (. - sys_call_table)
 syscall_table_size = (. - sys_call_table)

+ 3 - 0
arch/frv/kernel/head.S

@@ -513,6 +513,9 @@ __head_mmu_enabled:
 	movgs		gr0,ccr
 	movgs		gr0,ccr
 	movgs		gr0,cccr
 	movgs		gr0,cccr
 
 
+	# initialise the virtual interrupt handling
+	subcc		gr0,gr0,gr0,icc2		/* set Z, clear C */
+
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
 	movgs		gr3,scr2
 	movgs		gr3,scr2
 	movgs		gr3,scr3
 	movgs		gr3,scr3

+ 3 - 38
arch/frv/kernel/irq.c

@@ -287,18 +287,11 @@ asmlinkage void do_IRQ(void)
 	struct irq_source *source;
 	struct irq_source *source;
 	int level, cpu;
 	int level, cpu;
 
 
+	irq_enter();
+
 	level = (__frame->tbr >> 4) & 0xf;
 	level = (__frame->tbr >> 4) & 0xf;
 	cpu = smp_processor_id();
 	cpu = smp_processor_id();
 
 
-#if 0
-	{
-		static u32 irqcount;
-		*(volatile u32 *) 0xe1200004 = ~((irqcount++ << 8) | level);
-		*(volatile u16 *) 0xffc00100 = (u16) ~0x9999;
-		mb();
-	}
-#endif
-
 	if ((unsigned long) __frame - (unsigned long) (current + 1) < 512)
 	if ((unsigned long) __frame - (unsigned long) (current + 1) < 512)
 		BUG();
 		BUG();
 
 
@@ -308,40 +301,12 @@ asmlinkage void do_IRQ(void)
 
 
 	kstat_this_cpu.irqs[level]++;
 	kstat_this_cpu.irqs[level]++;
 
 
-	irq_enter();
-
 	for (source = frv_irq_levels[level].sources; source; source = source->next)
 	for (source = frv_irq_levels[level].sources; source; source = source->next)
 		source->doirq(source);
 		source->doirq(source);
 
 
-	irq_exit();
-
 	__clr_MASK(level);
 	__clr_MASK(level);
 
 
-	/* only process softirqs if we didn't interrupt another interrupt handler */
-	if ((__frame->psr & PSR_PIL) == PSR_PIL_0)
-		if (local_softirq_pending())
-			do_softirq();
-
-#ifdef CONFIG_PREEMPT
-	local_irq_disable();
-	while (--current->preempt_count == 0) {
-		if (!(__frame->psr & PSR_S) ||
-		    current->need_resched == 0 ||
-		    in_interrupt())
-			break;
-		current->preempt_count++;
-		local_irq_enable();
-		preempt_schedule();
-		local_irq_disable();
-	}
-#endif
-
-#if 0
-	{
-		*(volatile u16 *) 0xffc00100 = (u16) ~0x6666;
-		mb();
-	}
-#endif
+	irq_exit();
 
 
 } /* end do_IRQ() */
 } /* end do_IRQ() */
 
 

+ 0 - 9
arch/frv/mm/kmap.c

@@ -43,15 +43,6 @@ void iounmap(void *addr)
 {
 {
 }
 }
 
 
-/*
- * __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
- */
-void __iounmap(void *addr, unsigned long size)
-{
-}
-
 /*
 /*
  * Set new cache mode for some kernel address space.
  * Set new cache mode for some kernel address space.
  * The caller must push data for that range itself, if such data may already
  * The caller must push data for that range itself, if such data may already

+ 4 - 0
arch/h8300/Kconfig

@@ -33,6 +33,10 @@ config GENERIC_CALIBRATE_DELAY
 	bool
 	bool
 	default y
 	default y
 
 
+config TIME_LOW_RES
+	bool
+	default y
+
 config ISA
 config ISA
 	bool
 	bool
 	default y
 	default y

+ 1 - 1
arch/h8300/Kconfig.cpu

@@ -169,7 +169,7 @@ endif
 
 
 config CPU_H8300H
 config CPU_H8300H
 	bool
 	bool
-	depends on (H8002 || H83007 || H83048 || H83068)
+	depends on (H83002 || H83007 || H83048 || H83068)
 	default y
 	default y
 
 
 config CPU_H8S
 config CPU_H8S

+ 1 - 1
arch/h8300/Kconfig.debug

@@ -34,7 +34,7 @@ config GDB_DEBUG
 	help
 	help
 	  gdb stub exception support
 	  gdb stub exception support
 
 
-config CONFIG_SH_STANDARD_BIOS
+config SH_STANDARD_BIOS
 	bool "Use gdb protocol serial console"
 	bool "Use gdb protocol serial console"
 	depends on (!H8300H_SIM && !H8S_SIM)
 	depends on (!H8300H_SIM && !H8S_SIM)
 	help
 	help

+ 1 - 1
arch/h8300/defconfig

@@ -328,7 +328,7 @@ CONFIG_FULLDEBUG=y
 CONFIG_NO_KERNEL_MSG=y
 CONFIG_NO_KERNEL_MSG=y
 # CONFIG_SYSCALL_PRINT is not set
 # CONFIG_SYSCALL_PRINT is not set
 # CONFIG_GDB_DEBUG is not set
 # CONFIG_GDB_DEBUG is not set
-# CONFIG_CONFIG_SH_STANDARD_BIOS is not set
+# CONFIG_SH_STANDARD_BIOS is not set
 # CONFIG_DEFAULT_CMDLINE is not set
 # CONFIG_DEFAULT_CMDLINE is not set
 # CONFIG_BLKDEV_RESERVE is not set
 # CONFIG_BLKDEV_RESERVE is not set
 
 

+ 3 - 0
arch/h8300/kernel/process.c

@@ -45,6 +45,9 @@
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
 
 
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
 asmlinkage void ret_from_fork(void);
 asmlinkage void ret_from_fork(void);
 
 
 /*
 /*

+ 2 - 1
arch/i386/Kconfig

@@ -733,7 +733,7 @@ config PHYSICAL_START
 
 
 config HOTPLUG_CPU
 config HOTPLUG_CPU
 	bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
 	bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
-	depends on SMP && HOTPLUG && EXPERIMENTAL
+	depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
 	---help---
 	---help---
 	  Say Y here to experiment with turning CPUs off and on.  CPUs
 	  Say Y here to experiment with turning CPUs off and on.  CPUs
 	  can be controlled through /sys/devices/system/cpu.
 	  can be controlled through /sys/devices/system/cpu.
@@ -1060,6 +1060,7 @@ source "arch/i386/oprofile/Kconfig"
 
 
 config KPROBES
 config KPROBES
 	bool "Kprobes (EXPERIMENTAL)"
 	bool "Kprobes (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && MODULES
 	help
 	help
 	  Kprobes allows you to trap at almost any kernel address and
 	  Kprobes allows you to trap at almost any kernel address and
 	  execute a callback function.  register_kprobe() establishes
 	  execute a callback function.  register_kprobe() establishes

+ 3 - 0
arch/i386/boot/.gitignore

@@ -0,0 +1,3 @@
+bootsect
+bzImage
+setup

+ 1 - 0
arch/i386/boot/tools/.gitignore

@@ -0,0 +1 @@
+build

+ 1 - 0
arch/i386/kernel/.gitignore

@@ -0,0 +1 @@
+vsyscall.lds

+ 2 - 2
arch/i386/kernel/Makefile

@@ -7,11 +7,11 @@ extra-y := head.o init_task.o vmlinux.lds
 obj-y	:= process.o semaphore.o signal.o entry.o traps.o irq.o \
 obj-y	:= process.o semaphore.o signal.o entry.o traps.o irq.o \
 		ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
 		ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
 		pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
 		pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
-		quirks.o i8237.o
+		quirks.o i8237.o topology.o
 
 
 obj-y				+= cpu/
 obj-y				+= cpu/
 obj-y				+= timers/
 obj-y				+= timers/
-obj-$(CONFIG_ACPI)		+= acpi/
+obj-y				+= acpi/
 obj-$(CONFIG_X86_BIOS_REBOOT)	+= reboot.o
 obj-$(CONFIG_X86_BIOS_REBOOT)	+= reboot.o
 obj-$(CONFIG_MCA)		+= mca.o
 obj-$(CONFIG_MCA)		+= mca.o
 obj-$(CONFIG_X86_MSR)		+= msr.o
 obj-$(CONFIG_X86_MSR)		+= msr.o

+ 1 - 1
arch/i386/kernel/acpi/Makefile

@@ -1,4 +1,4 @@
-obj-y				:= boot.o
+obj-$(CONFIG_ACPI)		+= boot.o
 obj-$(CONFIG_X86_IO_APIC)	+= earlyquirk.o
 obj-$(CONFIG_X86_IO_APIC)	+= earlyquirk.o
 obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup.o
 obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup.o
 
 

+ 0 - 3
arch/i386/kernel/acpi/boot.c

@@ -1111,9 +1111,6 @@ int __init acpi_boot_table_init(void)
 		disable_acpi();
 		disable_acpi();
 		return error;
 		return error;
 	}
 	}
-#ifdef __i386__
-	check_acpi_pci();
-#endif
 
 
 	acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
 	acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
 
 

+ 8 - 0
arch/i386/kernel/acpi/earlyquirk.c

@@ -7,14 +7,22 @@
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <asm/pci-direct.h>
 #include <asm/pci-direct.h>
 #include <asm/acpi.h>
 #include <asm/acpi.h>
+#include <asm/apic.h>
 
 
 static int __init check_bridge(int vendor, int device)
 static int __init check_bridge(int vendor, int device)
 {
 {
+#ifdef CONFIG_ACPI
 	/* According to Nvidia all timer overrides are bogus. Just ignore
 	/* According to Nvidia all timer overrides are bogus. Just ignore
 	   them all. */
 	   them all. */
 	if (vendor == PCI_VENDOR_ID_NVIDIA) {
 	if (vendor == PCI_VENDOR_ID_NVIDIA) {
 		acpi_skip_timer_override = 1;
 		acpi_skip_timer_override = 1;
 	}
 	}
+#endif
+	if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
+		timer_over_8254 = 0;
+		printk(KERN_INFO "ATI board detected. Disabling timer routing "
+				"over 8254.\n");
+	}
 	return 0;
 	return 0;
 }
 }
 
 

+ 4 - 2
arch/i386/kernel/apic.c

@@ -570,16 +570,18 @@ void __devinit setup_local_APIC(void)
  */
  */
 void lapic_shutdown(void)
 void lapic_shutdown(void)
 {
 {
+	unsigned long flags;
+
 	if (!cpu_has_apic)
 	if (!cpu_has_apic)
 		return;
 		return;
 
 
-	local_irq_disable();
+	local_irq_save(flags);
 	clear_local_APIC();
 	clear_local_APIC();
 
 
 	if (enabled_via_apicbase)
 	if (enabled_via_apicbase)
 		disable_local_APIC();
 		disable_local_APIC();
 
 
-	local_irq_enable();
+	local_irq_restore(flags);
 }
 }
 
 
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM

+ 30 - 6
arch/i386/kernel/cpu/common.c

@@ -4,6 +4,7 @@
 #include <linux/smp.h>
 #include <linux/smp.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
 #include <linux/percpu.h>
+#include <linux/bootmem.h>
 #include <asm/semaphore.h>
 #include <asm/semaphore.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
 #include <asm/i387.h>
@@ -18,6 +19,9 @@
 
 
 #include "cpu.h"
 #include "cpu.h"
 
 
+DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
+EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
+
 DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
 DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
 EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
 EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
 
 
@@ -274,10 +278,10 @@ void __devinit generic_identify(struct cpuinfo_x86 * c)
 			c->x86_capability[4] = excap;
 			c->x86_capability[4] = excap;
 			c->x86 = (tfms >> 8) & 15;
 			c->x86 = (tfms >> 8) & 15;
 			c->x86_model = (tfms >> 4) & 15;
 			c->x86_model = (tfms >> 4) & 15;
-			if (c->x86 == 0xf) {
+			if (c->x86 == 0xf)
 				c->x86 += (tfms >> 20) & 0xff;
 				c->x86 += (tfms >> 20) & 0xff;
+			if (c->x86 >= 0x6)
 				c->x86_model += ((tfms >> 16) & 0xF) << 4;
 				c->x86_model += ((tfms >> 16) & 0xF) << 4;
-			} 
 			c->x86_mask = tfms & 15;
 			c->x86_mask = tfms & 15;
 		} else {
 		} else {
 			/* Have CPUID level 0 only - unheard of */
 			/* Have CPUID level 0 only - unheard of */
@@ -571,8 +575,9 @@ void __devinit cpu_init(void)
 	int cpu = smp_processor_id();
 	int cpu = smp_processor_id();
 	struct tss_struct * t = &per_cpu(init_tss, cpu);
 	struct tss_struct * t = &per_cpu(init_tss, cpu);
 	struct thread_struct *thread = &current->thread;
 	struct thread_struct *thread = &current->thread;
-	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+	struct desc_struct *gdt;
 	__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
 	__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
+	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
 
 
 	if (cpu_test_and_set(cpu, cpu_initialized)) {
 	if (cpu_test_and_set(cpu, cpu_initialized)) {
 		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
 		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
@@ -589,6 +594,25 @@ void __devinit cpu_init(void)
 		set_in_cr4(X86_CR4_TSD);
 		set_in_cr4(X86_CR4_TSD);
 	}
 	}
 
 
+	/*
+	 * This is a horrible hack to allocate the GDT.  The problem
+	 * is that cpu_init() is called really early for the boot CPU
+	 * (and hence needs bootmem) but much later for the secondary
+	 * CPUs, when bootmem will have gone away
+	 */
+	if (NODE_DATA(0)->bdata->node_bootmem_map) {
+		gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
+		/* alloc_bootmem_pages panics on failure, so no check */
+		memset(gdt, 0, PAGE_SIZE);
+	} else {
+		gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
+		if (unlikely(!gdt)) {
+			printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
+			for (;;)
+				local_irq_enable();
+		}
+	}
+
 	/*
 	/*
 	 * Initialize the per-CPU GDT with the boot GDT,
 	 * Initialize the per-CPU GDT with the boot GDT,
 	 * and set up the GDT descriptor:
 	 * and set up the GDT descriptor:
@@ -601,10 +625,10 @@ void __devinit cpu_init(void)
 		((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
 		((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
 		(CPU_16BIT_STACK_SIZE - 1);
 		(CPU_16BIT_STACK_SIZE - 1);
 
 
-	cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
- 	cpu_gdt_descr[cpu].address = (unsigned long)gdt;
+	cpu_gdt_descr->size = GDT_SIZE - 1;
+ 	cpu_gdt_descr->address = (unsigned long)gdt;
 
 
-	load_gdt(&cpu_gdt_descr[cpu]);
+	load_gdt(cpu_gdt_descr);
 	load_idt(&idt_descr);
 	load_idt(&idt_descr);
 
 
 	/*
 	/*

+ 1 - 0
arch/i386/kernel/cpu/transmeta.c

@@ -1,4 +1,5 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
+#include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
 #include <asm/msr.h>

+ 9 - 5
arch/i386/kernel/efi.c

@@ -70,10 +70,13 @@ static void efi_call_phys_prelog(void)
 {
 {
 	unsigned long cr4;
 	unsigned long cr4;
 	unsigned long temp;
 	unsigned long temp;
+	struct Xgt_desc_struct *cpu_gdt_descr;
 
 
 	spin_lock(&efi_rt_lock);
 	spin_lock(&efi_rt_lock);
 	local_irq_save(efi_rt_eflags);
 	local_irq_save(efi_rt_eflags);
 
 
+	cpu_gdt_descr = &per_cpu(cpu_gdt_descr, 0);
+
 	/*
 	/*
 	 * If I don't have PSE, I should just duplicate two entries in page
 	 * If I don't have PSE, I should just duplicate two entries in page
 	 * directory. If I have PSE, I just need to duplicate one entry in
 	 * directory. If I have PSE, I just need to duplicate one entry in
@@ -103,17 +106,18 @@ static void efi_call_phys_prelog(void)
 	 */
 	 */
 	local_flush_tlb();
 	local_flush_tlb();
 
 
-	cpu_gdt_descr[0].address = __pa(cpu_gdt_descr[0].address);
-	load_gdt((struct Xgt_desc_struct *) __pa(&cpu_gdt_descr[0]));
+	cpu_gdt_descr->address = __pa(cpu_gdt_descr->address);
+	load_gdt(cpu_gdt_descr);
 }
 }
 
 
 static void efi_call_phys_epilog(void)
 static void efi_call_phys_epilog(void)
 {
 {
 	unsigned long cr4;
 	unsigned long cr4;
+	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, 0);
+
+	cpu_gdt_descr->address = __va(cpu_gdt_descr->address);
+	load_gdt(cpu_gdt_descr);
 
 
-	cpu_gdt_descr[0].address =
-		(unsigned long) __va(cpu_gdt_descr[0].address);
-	load_gdt(&cpu_gdt_descr[0]);
 	cr4 = read_cr4();
 	cr4 = read_cr4();
 
 
 	if (cr4 & X86_CR4_PSE) {
 	if (cr4 & X86_CR4_PSE) {

+ 4 - 2
arch/i386/kernel/head.S

@@ -398,7 +398,11 @@ ignore_int:
 	pushl 32(%esp)
 	pushl 32(%esp)
 	pushl 40(%esp)
 	pushl 40(%esp)
 	pushl $int_msg
 	pushl $int_msg
+#ifdef CONFIG_EARLY_PRINTK
+	call early_printk
+#else
 	call printk
 	call printk
+#endif
 	addl $(5*4),%esp
 	addl $(5*4),%esp
 	popl %ds
 	popl %ds
 	popl %es
 	popl %es
@@ -530,5 +534,3 @@ ENTRY(cpu_gdt_table)
 	.quad 0x0000000000000000	/* 0xf0 - unused */
 	.quad 0x0000000000000000	/* 0xf0 - unused */
 	.quad 0x0000000000000000	/* 0xf8 - GDT entry 31: double-fault TSS */
 	.quad 0x0000000000000000	/* 0xf8 - GDT entry 31: double-fault TSS */
 
 
-	/* Be sure this is zeroed to avoid false validations in Xen */
-	.fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0

+ 0 - 2
arch/i386/kernel/i386_ksyms.c

@@ -3,8 +3,6 @@
 #include <asm/checksum.h>
 #include <asm/checksum.h>
 #include <asm/desc.h>
 #include <asm/desc.h>
 
 
-EXPORT_SYMBOL_GPL(cpu_gdt_descr);
-
 EXPORT_SYMBOL(__down_failed);
 EXPORT_SYMBOL(__down_failed);
 EXPORT_SYMBOL(__down_failed_interruptible);
 EXPORT_SYMBOL(__down_failed_interruptible);
 EXPORT_SYMBOL(__down_failed_trylock);
 EXPORT_SYMBOL(__down_failed_trylock);

+ 22 - 3
arch/i386/kernel/io_apic.c

@@ -51,6 +51,8 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
 
 
 static DEFINE_SPINLOCK(ioapic_lock);
 static DEFINE_SPINLOCK(ioapic_lock);
 
 
+int timer_over_8254 __initdata = 1;
+
 /*
 /*
  *	Is the SiS APIC rmw bug present ?
  *	Is the SiS APIC rmw bug present ?
  *	-1 = don't know, 0 = no, 1 = yes
  *	-1 = don't know, 0 = no, 1 = yes
@@ -2267,7 +2269,8 @@ static inline void check_timer(void)
 	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 	init_8259A(1);
 	init_8259A(1);
 	timer_ack = 1;
 	timer_ack = 1;
-	enable_8259A_irq(0);
+	if (timer_over_8254 > 0)
+		enable_8259A_irq(0);
 
 
 	pin1  = find_isa_irq_pin(0, mp_INT);
 	pin1  = find_isa_irq_pin(0, mp_INT);
 	apic1 = find_isa_irq_apic(0, mp_INT);
 	apic1 = find_isa_irq_apic(0, mp_INT);
@@ -2392,6 +2395,20 @@ void __init setup_IO_APIC(void)
 		print_IO_APIC();
 		print_IO_APIC();
 }
 }
 
 
+static int __init setup_disable_8254_timer(char *s)
+{
+	timer_over_8254 = -1;
+	return 1;
+}
+static int __init setup_enable_8254_timer(char *s)
+{
+	timer_over_8254 = 2;
+	return 1;
+}
+
+__setup("disable_8254_timer", setup_disable_8254_timer);
+__setup("enable_8254_timer", setup_enable_8254_timer);
+
 /*
 /*
  *	Called after all the initialization is done. If we didnt find any
  *	Called after all the initialization is done. If we didnt find any
  *	APIC bugs then we can allow the modify fast path
  *	APIC bugs then we can allow the modify fast path
@@ -2566,8 +2583,10 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
 		spin_unlock_irqrestore(&ioapic_lock, flags);
 		spin_unlock_irqrestore(&ioapic_lock, flags);
 
 
 		/* Sanity check */
 		/* Sanity check */
-		if (reg_00.bits.ID != apic_id)
-			panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic);
+		if (reg_00.bits.ID != apic_id) {
+			printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
+			return -1;
+		}
 	}
 	}
 
 
 	apic_printk(APIC_VERBOSE, KERN_INFO
 	apic_printk(APIC_VERBOSE, KERN_INFO

+ 14 - 2
arch/i386/kernel/kprobes.c

@@ -58,6 +58,11 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
 
 
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
 {
 {
+	/* insn: must be on special executable page on i386. */
+	p->ainsn.insn = get_insn_slot();
+	if (!p->ainsn.insn)
+		return -ENOMEM;
+
 	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
 	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
 	p->opcode = *p->addr;
 	p->opcode = *p->addr;
 	return 0;
 	return 0;
@@ -77,6 +82,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 			   (unsigned long) p->addr + sizeof(kprobe_opcode_t));
 			   (unsigned long) p->addr + sizeof(kprobe_opcode_t));
 }
 }
 
 
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+	down(&kprobe_mutex);
+	free_insn_slot(p->ainsn.insn);
+	up(&kprobe_mutex);
+}
+
 static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
 static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
 {
 	kcb->prev_kprobe.kp = kprobe_running();
 	kcb->prev_kprobe.kp = kprobe_running();
@@ -111,7 +123,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 	if (p->opcode == BREAKPOINT_INSTRUCTION)
 	if (p->opcode == BREAKPOINT_INSTRUCTION)
 		regs->eip = (unsigned long)p->addr;
 		regs->eip = (unsigned long)p->addr;
 	else
 	else
-		regs->eip = (unsigned long)&p->ainsn.insn;
+		regs->eip = (unsigned long)p->ainsn.insn;
 }
 }
 
 
 /* Called with kretprobe_lock held */
 /* Called with kretprobe_lock held */
@@ -351,7 +363,7 @@ static void __kprobes resume_execution(struct kprobe *p,
 {
 {
 	unsigned long *tos = (unsigned long *)&regs->esp;
 	unsigned long *tos = (unsigned long *)&regs->esp;
 	unsigned long next_eip = 0;
 	unsigned long next_eip = 0;
-	unsigned long copy_eip = (unsigned long)&p->ainsn.insn;
+	unsigned long copy_eip = (unsigned long)p->ainsn.insn;
 	unsigned long orig_eip = (unsigned long)p->addr;
 	unsigned long orig_eip = (unsigned long)p->addr;
 
 
 	switch (p->ainsn.insn[0]) {
 	switch (p->ainsn.insn[0]) {

+ 7 - 7
arch/i386/kernel/machine_kexec.c

@@ -116,13 +116,13 @@ static void load_segments(void)
 	__asm__ __volatile__ (
 	__asm__ __volatile__ (
 		"\tljmp $"STR(__KERNEL_CS)",$1f\n"
 		"\tljmp $"STR(__KERNEL_CS)",$1f\n"
 		"\t1:\n"
 		"\t1:\n"
-		"\tmovl $"STR(__KERNEL_DS)",%eax\n"
-		"\tmovl %eax,%ds\n"
-		"\tmovl %eax,%es\n"
-		"\tmovl %eax,%fs\n"
-		"\tmovl %eax,%gs\n"
-		"\tmovl %eax,%ss\n"
-		);
+		"\tmovl $"STR(__KERNEL_DS)",%%eax\n"
+		"\tmovl %%eax,%%ds\n"
+		"\tmovl %%eax,%%es\n"
+		"\tmovl %%eax,%%fs\n"
+		"\tmovl %%eax,%%gs\n"
+		"\tmovl %%eax,%%ss\n"
+		::: "eax", "memory");
 #undef STR
 #undef STR
 #undef __STR
 #undef __STR
 }
 }

+ 9 - 6
arch/i386/kernel/microcode.c

@@ -74,6 +74,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
+#include <linux/cpumask.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/vmalloc.h>
@@ -250,8 +251,8 @@ static int find_matching_ucodes (void)
 			error = -EINVAL;
 			error = -EINVAL;
 			goto out;
 			goto out;
 		}
 		}
-		
-		for (cpu_num = 0; cpu_num < num_online_cpus(); cpu_num++) {
+
+		for_each_online_cpu(cpu_num) {
 			struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
 			struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
 			if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/
 			if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/
 				continue;
 				continue;
@@ -293,7 +294,7 @@ static int find_matching_ucodes (void)
 					error = -EFAULT;
 					error = -EFAULT;
 					goto out;
 					goto out;
 				}
 				}
-				for (cpu_num = 0; cpu_num < num_online_cpus(); cpu_num++) {
+				for_each_online_cpu(cpu_num) {
 					struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
 					struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
 					if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/
 					if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/
 						continue;
 						continue;
@@ -304,7 +305,9 @@ static int find_matching_ucodes (void)
 			}
 			}
 		}
 		}
 		/* now check if any cpu has matched */
 		/* now check if any cpu has matched */
-		for (cpu_num = 0, allocated_flag = 0, sum = 0; cpu_num < num_online_cpus(); cpu_num++) {
+		allocated_flag = 0;
+		sum = 0;
+		for_each_online_cpu(cpu_num) {
 			if (ucode_cpu_info[cpu_num].err == MC_MARKED) { 
 			if (ucode_cpu_info[cpu_num].err == MC_MARKED) { 
 				struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
 				struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
 				if (!allocated_flag) {
 				if (!allocated_flag) {
@@ -415,12 +418,12 @@ static int do_microcode_update (void)
 	}
 	}
 
 
 out_free:
 out_free:
-	for (i = 0; i < num_online_cpus(); i++) {
+	for_each_online_cpu(i) {
 		if (ucode_cpu_info[i].mc) {
 		if (ucode_cpu_info[i].mc) {
 			int j;
 			int j;
 			void *tmp = ucode_cpu_info[i].mc;
 			void *tmp = ucode_cpu_info[i].mc;
 			vfree(tmp);
 			vfree(tmp);
-			for (j = i; j < num_online_cpus(); j++) {
+			for_each_online_cpu(j) {
 				if (ucode_cpu_info[j].mc == tmp)
 				if (ucode_cpu_info[j].mc == tmp)
 					ucode_cpu_info[j].mc = NULL;
 					ucode_cpu_info[j].mc = NULL;
 			}
 			}

+ 9 - 3
arch/i386/kernel/mpparse.c

@@ -710,7 +710,7 @@ void __init get_smp_config (void)
 		 * Read the physical hardware table.  Anything here will
 		 * Read the physical hardware table.  Anything here will
 		 * override the defaults.
 		 * override the defaults.
 		 */
 		 */
-		if (!smp_read_mpc((void *)mpf->mpf_physptr)) {
+		if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
 			smp_found_config = 0;
 			smp_found_config = 0;
 			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
 			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
 			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
 			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
@@ -915,6 +915,7 @@ void __init mp_register_ioapic (
 	u32			gsi_base)
 	u32			gsi_base)
 {
 {
 	int			idx = 0;
 	int			idx = 0;
+	int			tmpid;
 
 
 	if (nr_ioapics >= MAX_IO_APICS) {
 	if (nr_ioapics >= MAX_IO_APICS) {
 		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
 		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
@@ -935,9 +936,14 @@ void __init mp_register_ioapic (
 
 
 	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
 	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
 	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
 	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
-		mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
+		tmpid = io_apic_get_unique_id(idx, id);
 	else
 	else
-		mp_ioapics[idx].mpc_apicid = id;
+		tmpid = id;
+	if (tmpid == -1) {
+		nr_ioapics--;
+		return;
+	}
+	mp_ioapics[idx].mpc_apicid = tmpid;
 	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
 	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
 	
 	
 	/* 
 	/* 

+ 2 - 2
arch/i386/kernel/nmi.c

@@ -357,7 +357,7 @@ static void clear_msr_range(unsigned int base, unsigned int n)
 		wrmsr(base+i, 0, 0);
 		wrmsr(base+i, 0, 0);
 }
 }
 
 
-static inline void write_watchdog_counter(const char *descr)
+static void write_watchdog_counter(const char *descr)
 {
 {
 	u64 count = (u64)cpu_khz * 1000;
 	u64 count = (u64)cpu_khz * 1000;
 
 
@@ -544,7 +544,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
 			 * die_nmi will return ONLY if NOTIFY_STOP happens..
 			 * die_nmi will return ONLY if NOTIFY_STOP happens..
 			 */
 			 */
 			die_nmi(regs, "NMI Watchdog detected LOCKUP");
 			die_nmi(regs, "NMI Watchdog detected LOCKUP");
-
+	} else {
 		last_irq_sums[cpu] = sum;
 		last_irq_sums[cpu] = sum;
 		alert_counter[cpu] = 0;
 		alert_counter[cpu] = 0;
 	}
 	}

+ 4 - 0
arch/i386/kernel/setup.c

@@ -1599,6 +1599,10 @@ void __init setup_arch(char **cmdline_p)
 	if (efi_enabled)
 	if (efi_enabled)
 		efi_map_memmap();
 		efi_map_memmap();
 
 
+#ifdef CONFIG_X86_IO_APIC
+	check_acpi_pci();	/* Checks more than just ACPI actually */
+#endif
+
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_ACPI
 	/*
 	/*
 	 * Parse the ACPI tables for possible boot-time SMP configuration.
 	 * Parse the ACPI tables for possible boot-time SMP configuration.

+ 0 - 10
arch/i386/kernel/smpboot.c

@@ -87,11 +87,7 @@ EXPORT_SYMBOL(cpu_online_map);
 cpumask_t cpu_callin_map;
 cpumask_t cpu_callin_map;
 cpumask_t cpu_callout_map;
 cpumask_t cpu_callout_map;
 EXPORT_SYMBOL(cpu_callout_map);
 EXPORT_SYMBOL(cpu_callout_map);
-#ifdef CONFIG_HOTPLUG_CPU
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-#else
 cpumask_t cpu_possible_map;
 cpumask_t cpu_possible_map;
-#endif
 EXPORT_SYMBOL(cpu_possible_map);
 EXPORT_SYMBOL(cpu_possible_map);
 static cpumask_t smp_commenced_mask;
 static cpumask_t smp_commenced_mask;
 
 
@@ -902,12 +898,6 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
 	unsigned long start_eip;
 	unsigned long start_eip;
 	unsigned short nmi_high = 0, nmi_low = 0;
 	unsigned short nmi_high = 0, nmi_low = 0;
 
 
-	if (!cpu_gdt_descr[cpu].address &&
-	    !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
-		printk("Failed to allocate GDT for CPU %d\n", cpu);
-		return 1;
-	}
-
 	++cpucount;
 	++cpucount;
 
 
 	/*
 	/*

+ 1 - 1
arch/i386/kernel/syscall_table.S

@@ -299,7 +299,7 @@ ENTRY(sys_call_table)
 	.long sys_mknodat
 	.long sys_mknodat
 	.long sys_fchownat
 	.long sys_fchownat
 	.long sys_futimesat
 	.long sys_futimesat
-	.long sys_newfstatat		/* 300 */
+	.long sys_fstatat64		/* 300 */
 	.long sys_unlinkat
 	.long sys_unlinkat
 	.long sys_renameat
 	.long sys_renameat
 	.long sys_linkat
 	.long sys_linkat

+ 2 - 2
arch/i386/kernel/time.c

@@ -412,9 +412,9 @@ static int timer_resume(struct sys_device *dev)
 	write_seqlock_irqsave(&xtime_lock, flags);
 	write_seqlock_irqsave(&xtime_lock, flags);
 	xtime.tv_sec = sec;
 	xtime.tv_sec = sec;
 	xtime.tv_nsec = 0;
 	xtime.tv_nsec = 0;
-	write_sequnlock_irqrestore(&xtime_lock, flags);
-	jiffies += sleep_length;
+	jiffies_64 += sleep_length;
 	wall_jiffies += sleep_length;
 	wall_jiffies += sleep_length;
+	write_sequnlock_irqrestore(&xtime_lock, flags);
 	if (last_timer->resume)
 	if (last_timer->resume)
 		last_timer->resume();
 		last_timer->resume();
 	cur_timer = last_timer;
 	cur_timer = last_timer;

+ 5 - 0
arch/i386/kernel/timers/timer_tsc.c

@@ -282,6 +282,10 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
 	if (val != CPUFREQ_RESUMECHANGE)
 	if (val != CPUFREQ_RESUMECHANGE)
 		write_seqlock_irq(&xtime_lock);
 		write_seqlock_irq(&xtime_lock);
 	if (!ref_freq) {
 	if (!ref_freq) {
+		if (!freq->old){
+			ref_freq = freq->new;
+			goto end;
+		}
 		ref_freq = freq->old;
 		ref_freq = freq->old;
 		loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
 		loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
 #ifndef CONFIG_SMP
 #ifndef CONFIG_SMP
@@ -307,6 +311,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
 #endif
 #endif
 	}
 	}
 
 
+end:
 	if (val != CPUFREQ_RESUMECHANGE)
 	if (val != CPUFREQ_RESUMECHANGE)
 		write_sequnlock_irq(&xtime_lock);
 		write_sequnlock_irq(&xtime_lock);
 
 

+ 3 - 3
arch/i386/mach-default/topology.c → arch/i386/kernel/topology.c

@@ -1,12 +1,12 @@
 /*
 /*
- * arch/i386/mach-generic/topology.c - Populate driverfs with topology information
+ * arch/i386/kernel/topology.c - Populate driverfs with topology information
  *
  *
  * Written by: Matthew Dobson, IBM Corporation
  * Written by: Matthew Dobson, IBM Corporation
  * Original Code: Paul Dorwin, IBM Corporation, Patrick Mochel, OSDL
  * Original Code: Paul Dorwin, IBM Corporation, Patrick Mochel, OSDL
  *
  *
  * Copyright (C) 2002, IBM Corp.
  * Copyright (C) 2002, IBM Corp.
  *
  *
- * All rights reserved.          
+ * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * it under the terms of the GNU General Public License as published by
@@ -34,7 +34,7 @@ static struct i386_cpu cpu_devices[NR_CPUS];
 
 
 int arch_register_cpu(int num){
 int arch_register_cpu(int num){
 	struct node *parent = NULL;
 	struct node *parent = NULL;
-	
+
 #ifdef CONFIG_NUMA
 #ifdef CONFIG_NUMA
 	int node = cpu_to_node(num);
 	int node = cpu_to_node(num);
 	if (node_online(node))
 	if (node_online(node))

+ 15 - 0
arch/i386/kernel/vsyscall-sysenter.S

@@ -7,6 +7,21 @@
  *    for details.
  *    for details.
  */
  */
 
 
+/*
+ * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
+ * %ecx itself for arg2. The pushing is because the sysexit instruction
+ * (found in entry.S) requires that we clobber %ecx with the desired %esp.
+ * User code might expect that %ecx is unclobbered though, as it would be
+ * for returning via the iret instruction, so we must push and pop.
+ *
+ * The caller puts arg3 in %edx, which the sysexit instruction requires
+ * for %eip. Thus, exactly as for arg2, we must push and pop.
+ *
+ * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
+ * instruction clobbers %esp, the user's %esp won't even survive entry
+ * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
+ * arg6 from the stack.
+ */
 	.text
 	.text
 	.globl __kernel_vsyscall
 	.globl __kernel_vsyscall
 	.type __kernel_vsyscall,@function
 	.type __kernel_vsyscall,@function

+ 1 - 1
arch/i386/mach-default/Makefile

@@ -2,4 +2,4 @@
 # Makefile for the linux kernel.
 # Makefile for the linux kernel.
 #
 #
 
 
-obj-y				:= setup.o topology.o
+obj-y				:= setup.o

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff