瀏覽代碼

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into merge

Chris Zankel 16 年之前
父節點
當前提交
65127d28e3
共有 100 個文件被更改,包括 4095 次插入1254 次删除
  1. 4 0
      Documentation/DocBook/.gitignore
  2. 22 4
      Documentation/cgroups/cgroups.txt
  3. 19 1
      Documentation/cgroups/memcg_test.txt
  4. 10 0
      Documentation/feature-removal-schedule.txt
  5. 24 1028
      Documentation/filesystems/proc.txt
  6. 9 14
      Documentation/gpio.txt
  7. 100 0
      Documentation/networking/vxge.txt
  8. 2 0
      Documentation/sysctl/00-INDEX
  9. 72 2
      Documentation/sysctl/fs.txt
  10. 53 0
      Documentation/sysctl/kernel.txt
  11. 175 0
      Documentation/sysctl/net.txt
  12. 11 2
      MAINTAINERS
  13. 3 0
      arch/alpha/include/asm/spinlock.h
  14. 1 1
      arch/alpha/kernel/process.c
  15. 3 0
      arch/arm/include/asm/spinlock.h
  16. 1 1
      arch/arm/kernel/process.c
  17. 1 1
      arch/avr32/kernel/process.c
  18. 1 1
      arch/blackfin/kernel/process.c
  19. 1 1
      arch/cris/arch-v10/kernel/process.c
  20. 1 1
      arch/cris/arch-v32/kernel/process.c
  21. 2 0
      arch/cris/include/arch-v32/arch/spinlock.h
  22. 0 1
      arch/cris/kernel/process.c
  23. 1 1
      arch/frv/kernel/process.c
  24. 1 1
      arch/h8300/kernel/process.c
  25. 0 3
      arch/ia64/configs/generic_defconfig
  26. 63 14
      arch/ia64/include/asm/spinlock.h
  27. 6 0
      arch/ia64/include/asm/uv/uv_hub.h
  28. 155 3
      arch/ia64/include/asm/uv/uv_mmrs.h
  29. 1 1
      arch/ia64/kernel/process.c
  30. 1 1
      arch/m32r/kernel/process.c
  31. 1 1
      arch/m68k/kernel/process.c
  32. 1 1
      arch/m68knommu/kernel/process.c
  33. 2 0
      arch/mips/include/asm/spinlock.h
  34. 12 6
      arch/mips/include/asm/unistd.h
  35. 1 1
      arch/mips/kernel/process.c
  36. 2 0
      arch/mips/kernel/scall32-o32.S
  37. 2 0
      arch/mips/kernel/scall64-64.S
  38. 2 0
      arch/mips/kernel/scall64-n32.S
  39. 2 0
      arch/mips/kernel/scall64-o32.S
  40. 1 1
      arch/mn10300/kernel/process.c
  41. 3 0
      arch/parisc/include/asm/spinlock.h
  42. 1 1
      arch/parisc/kernel/process.c
  43. 0 10
      arch/powerpc/Kconfig.debug
  44. 3 0
      arch/powerpc/include/asm/spinlock.h
  45. 1 1
      arch/powerpc/kernel/process.c
  46. 1 1
      arch/powerpc/kernel/vio.c
  47. 1 1
      arch/powerpc/platforms/cell/spufs/inode.c
  48. 0 9
      arch/s390/Kconfig.debug
  49. 3 0
      arch/s390/include/asm/spinlock.h
  50. 1 1
      arch/s390/kernel/process.c
  51. 3 0
      arch/sh/include/asm/spinlock.h
  52. 1 1
      arch/sh/kernel/process_32.c
  53. 1 1
      arch/sh/kernel/process_64.c
  54. 0 9
      arch/sparc/Kconfig.debug
  55. 2 0
      arch/sparc/include/asm/spinlock_32.h
  56. 2 0
      arch/sparc/include/asm/spinlock_64.h
  57. 1 1
      arch/sparc/kernel/process_32.c
  58. 1 1
      arch/sparc/kernel/process_64.c
  59. 1 1
      arch/um/drivers/net_kern.c
  60. 1 1
      arch/um/kernel/process.c
  61. 2 1
      arch/um/kernel/syscall.c
  62. 11 0
      arch/um/sys-i386/sys_call_table.S
  63. 0 9
      arch/x86/Kconfig.debug
  64. 2 0
      arch/x86/ia32/ia32entry.S
  65. 3 0
      arch/x86/include/asm/spinlock.h
  66. 2 0
      arch/x86/include/asm/unistd_32.h
  67. 4 0
      arch/x86/include/asm/unistd_64.h
  68. 14 0
      arch/x86/include/asm/uv/uv_hub.h
  69. 153 0
      arch/x86/include/asm/uv/uv_mmrs.h
  70. 2 7
      arch/x86/kernel/apic/x2apic_uv_x.c
  71. 1 1
      arch/x86/kernel/process_32.c
  72. 1 1
      arch/x86/kernel/process_64.c
  73. 1 1
      arch/x86/kernel/ptrace.c
  74. 2 0
      arch/x86/kernel/syscall_table_32.S
  75. 1 1
      arch/xtensa/kernel/process.c
  76. 8 0
      drivers/block/floppy.c
  77. 71 41
      drivers/block/nbd.c
  78. 21 1
      drivers/char/hpet.c
  79. 2 1
      drivers/char/random.c
  80. 38 20
      drivers/char/synclink_gt.c
  81. 0 2
      drivers/char/tty_audit.c
  82. 3 3
      drivers/char/tty_io.c
  83. 0 1
      drivers/char/tty_ldisc.c
  84. 1 1
      drivers/crypto/hifn_795x.c
  85. 33 3
      drivers/edac/Kconfig
  86. 1 1
      drivers/edac/Makefile
  87. 595 0
      drivers/edac/amd8111_edac.c
  88. 130 0
      drivers/edac/amd8111_edac.h
  89. 379 0
      drivers/edac/amd8131_edac.c
  90. 119 0
      drivers/edac/amd8131_edac.h
  91. 15 1
      drivers/edac/edac_core.h
  92. 14 0
      drivers/edac/edac_pci.c
  93. 1448 0
      drivers/edac/ppc4xx_edac.c
  94. 172 0
      drivers/edac/ppc4xx_edac.h
  95. 14 5
      drivers/gpio/gpiolib.c
  96. 29 2
      drivers/gpu/drm/drm_crtc_helper.c
  97. 3 5
      drivers/gpu/drm/drm_edid.c
  98. 1 6
      drivers/gpu/drm/drm_gem.c
  99. 1 0
      drivers/gpu/drm/drm_sysfs.c
  100. 2 9
      drivers/gpu/drm/i915/i915_dma.c

+ 4 - 0
Documentation/DocBook/.gitignore

@@ -4,3 +4,7 @@
 *.html
 *.9.gz
 *.9
+*.aux
+*.dvi
+*.log
+*.out

+ 22 - 4
Documentation/cgroups/cgroups.txt

@@ -333,12 +333,23 @@ The "xxx" is not interpreted by the cgroup code, but will appear in
 
 To mount a cgroup hierarchy with just the cpuset and numtasks
 subsystems, type:
-# mount -t cgroup -o cpuset,numtasks hier1 /dev/cgroup
+# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup
 
 To change the set of subsystems bound to a mounted hierarchy, just
 remount with different options:
+# mount -o remount,cpuset,ns hier1 /dev/cgroup
 
-# mount -o remount,cpuset,ns  /dev/cgroup
+Now memory is removed from the hierarchy and ns is added.
+
+Note this will add ns to the hierarchy but won't remove memory or
+cpuset, because the new options are appended to the old ones:
+# mount -o remount,ns /dev/cgroup
+
+To Specify a hierarchy's release_agent:
+# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
+  xxx /dev/cgroup
+
+Note that specifying 'release_agent' more than once will return failure.
 
 Note that changing the set of subsystems is currently only supported
 when the hierarchy consists of a single (root) cgroup. Supporting
@@ -349,6 +360,11 @@ Then under /dev/cgroup you can find a tree that corresponds to the
 tree of the cgroups in the system. For instance, /dev/cgroup
 is the cgroup that holds the whole system.
 
+If you want to change the value of release_agent:
+# echo "/sbin/new_release_agent" > /dev/cgroup/release_agent
+
+It can also be changed via remount.
+
 If you want to create a new cgroup under /dev/cgroup:
 # cd /dev/cgroup
 # mkdir my_cgroup
@@ -476,11 +492,13 @@ cgroup->parent is still valid. (Note - can also be called for a
 newly-created cgroup if an error occurs after this subsystem's
 create() method has been called for the new cgroup).
 
-void pre_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
+int pre_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
 
 Called before checking the reference count on each subsystem. This may
 be useful for subsystems which have some extra references even if
-there are not tasks in the cgroup.
+there are not tasks in the cgroup. If pre_destroy() returns error code,
+rmdir() will fail with it. From this behavior, pre_destroy() can be
+called multiple times against a cgroup.
 
 int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
 	       struct task_struct *task)

+ 19 - 1
Documentation/cgroups/memcg_test.txt

@@ -1,5 +1,5 @@
 Memory Resource Controller(Memcg)  Implementation Memo.
-Last Updated: 2009/1/19
+Last Updated: 2009/1/20
 Base Kernel Version: based on 2.6.29-rc2.
 
 Because VM is getting complex (one of reasons is memcg...), memcg's behavior
@@ -360,3 +360,21 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
 	# kill malloc task.
 
 	Of course, tmpfs v.s. swapoff test should be tested, too.
+
+ 9.8 OOM-Killer
+	Out-of-memory caused by memcg's limit will kill tasks under
+	the memcg. When hierarchy is used, a task under hierarchy
+	will be killed by the kernel.
+	In this case, panic_on_oom shouldn't be invoked and tasks
+	in other groups shouldn't be killed.
+
+	It's not difficult to cause OOM under memcg as following.
+	Case A) when you can swapoff
+	#swapoff -a
+	#echo 50M > /memory.limit_in_bytes
+	run 51M of malloc
+
+	Case B) when you use mem+swap limitation.
+	#echo 50M > memory.limit_in_bytes
+	#echo 50M > memory.memsw.limit_in_bytes
+	run 51M of malloc

+ 10 - 0
Documentation/feature-removal-schedule.txt

@@ -255,6 +255,16 @@ Who:	Jan Engelhardt <jengelh@computergmbh.de>
 
 ---------------------------
 
+What:	GPIO autorequest on gpio_direction_{input,output}() in gpiolib
+When:	February 2010
+Why:	All callers should use explicit gpio_request()/gpio_free().
+	The autorequest mechanism in gpiolib was provided mostly as a
+	migration aid for legacy GPIO interfaces (for SOC based GPIOs).
+	Those users have now largely migrated.  Platforms implementing
+	the GPIO interfaces without using gpiolib will see no changes.
+Who:	David Brownell <dbrownell@users.sourceforge.net>
+---------------------------
+
 What:	b43 support for firmware revision < 410
 When:	The schedule was July 2008, but it was decided that we are going to keep the
         code as long as there are no major maintanance headaches.

File diff suppressed because it is too large
+ 24 - 1028
Documentation/filesystems/proc.txt


+ 9 - 14
Documentation/gpio.txt

@@ -123,7 +123,10 @@ platform-specific implementation issue.
 
 Using GPIOs
 -----------
-One of the first things to do with a GPIO, often in board setup code when
+The first thing a system should do with a GPIO is allocate it, using
+the gpio_request() call; see later.
+
+One of the next things to do with a GPIO, often in board setup code when
 setting up a platform_device using the GPIO, is mark its direction:
 
 	/* set as input or output, returning 0 or negative errno */
@@ -141,8 +144,8 @@ This helps avoid signal glitching during system startup.
 
 For compatibility with legacy interfaces to GPIOs, setting the direction
 of a GPIO implicitly requests that GPIO (see below) if it has not been
-requested already.  That compatibility may be removed in the future;
-explicitly requesting GPIOs is strongly preferred.
+requested already.  That compatibility is being removed from the optional
+gpiolib framework.
 
 Setting the direction can fail if the GPIO number is invalid, or when
 that particular GPIO can't be used in that mode.  It's generally a bad
@@ -195,7 +198,7 @@ This requires sleeping, which can't be done from inside IRQ handlers.
 
 Platforms that support this type of GPIO distinguish them from other GPIOs
 by returning nonzero from this call (which requires a valid GPIO number,
-either explicitly or implicitly requested):
+which should have been previously allocated with gpio_request):
 
 	int gpio_cansleep(unsigned gpio);
 
@@ -212,10 +215,9 @@ for GPIOs that can't be accessed from IRQ handlers, these calls act the
 same as the spinlock-safe calls.
 
 
-Claiming and Releasing GPIOs (OPTIONAL)
----------------------------------------
+Claiming and Releasing GPIOs
+----------------------------
 To help catch system configuration errors, two calls are defined.
-However, many platforms don't currently support this mechanism.
 
 	/* request GPIO, returning 0 or negative errno.
 	 * non-null labels may be useful for diagnostics.
@@ -244,13 +246,6 @@ Some platforms may also use knowledge about what GPIOs are active for
 power management, such as by powering down unused chip sectors and, more
 easily, gating off unused clocks.
 
-These two calls are optional because not not all current Linux platforms
-offer such functionality in their GPIO support; a valid implementation
-could return success for all gpio_request() calls.  Unlike the other calls,
-the state they represent doesn't normally match anything from a hardware
-register; it's just a software bitmap which clearly is not necessary for
-correct operation of hardware or (bug free) drivers.
-
 Note that requesting a GPIO does NOT cause it to be configured in any
 way; it just marks that GPIO as in use.  Separate code must handle any
 pin setup (e.g. controlling which pin the GPIO uses, pullup/pulldown).

+ 100 - 0
Documentation/networking/vxge.txt

@@ -0,0 +1,100 @@
+Neterion's (Formerly S2io) X3100 Series 10GbE PCIe Server Adapter Linux driver
+==============================================================================
+
+Contents
+--------
+
+1) Introduction
+2) Features supported
+3) Configurable driver parameters
+4) Troubleshooting
+
+1) Introduction:
+----------------
+This Linux driver supports all Neterion's X3100 series 10 GbE PCIe I/O
+Virtualized Server adapters.
+The X3100 series supports four modes of operation, configurable via
+firmware -
+	Single function mode
+	Multi function mode
+	SRIOV mode
+	MRIOV mode
+The functions share a 10GbE link and the pci-e bus, but hardly anything else
+inside the ASIC. Features like independent hw reset, statistics, bandwidth/
+priority allocation and guarantees, GRO, TSO, interrupt moderation etc are
+supported independently on each function.
+
+(See below for a complete list of features supported for both IPv4 and IPv6)
+
+2) Features supported:
+----------------------
+
+i)   Single function mode (up to 17 queues)
+
+ii)  Multi function mode (up to 17 functions)
+
+iii) PCI-SIG's I/O Virtualization
+       - Single Root mode: v1.0 (up to 17 functions)
+       - Multi-Root mode: v1.0 (up to 17 functions)
+
+iv)  Jumbo frames
+       X3100 Series supports MTU up to 9600 bytes, modifiable using
+       ifconfig command.
+
+v)   Offloads supported: (Enabled by default)
+       Checksum offload (TCP/UDP/IP) on transmit and receive paths
+       TCP Segmentation Offload (TSO) on transmit path
+       Generic Receive Offload (GRO) on receive path
+
+vi)  MSI-X: (Enabled by default)
+       Resulting in noticeable performance improvement (up to 7% on certain
+       platforms).
+
+vii) NAPI: (Enabled by default)
+       For better Rx interrupt moderation.
+
+viii)RTH (Receive Traffic Hash): (Enabled by default)
+       Receive side steering for better scaling.
+
+ix)  Statistics
+       Comprehensive MAC-level and software statistics displayed using
+       "ethtool -S" option.
+
+x)   Multiple hardware queues: (Enabled by default)
+       Up to 17 hardware based transmit and receive data channels, with
+       multiple steering options (transmit multiqueue enabled by default).
+
+3) Configurable driver parameters:
+----------------------------------
+
+i)  max_config_dev
+       Specifies maximum device functions to be enabled.
+       Valid range: 1-8
+
+ii) max_config_port
+       Specifies number of ports to be enabled.
+       Valid range: 1,2
+       Default: 1
+
+iii)max_config_vpath
+       Specifies maximum VPATH(s) configured for each device function.
+       Valid range: 1-17
+
+iv) vlan_tag_strip
+       Enables/disables vlan tag stripping from all received tagged frames that
+       are not replicated at the internal L2 switch.
+       Valid range: 0,1 (disabled, enabled respectively)
+       Default: 1
+
+v)  addr_learn_en
+       Enable learning the mac address of the guest OS interface in
+       virtualization environment.
+       Valid range: 0,1 (disabled, enabled respectively)
+       Default: 0
+
+4) Troubleshooting:
+-------------------
+
+To resolve an issue with the source code or X3100 series adapter, please collect
+the statistics, register dumps using ethool, relevant logs and email them to
+support@neterion.com.

+ 2 - 0
Documentation/sysctl/00-INDEX

@@ -10,6 +10,8 @@ fs.txt
 	- documentation for /proc/sys/fs/*.
 kernel.txt
 	- documentation for /proc/sys/kernel/*.
+net.txt
+	- documentation for /proc/sys/net/*.
 sunrpc.txt
 	- documentation for /proc/sys/sunrpc/*.
 vm.txt

+ 72 - 2
Documentation/sysctl/fs.txt

@@ -1,5 +1,6 @@
 Documentation for /proc/sys/fs/*	kernel version 2.2.10
 	(c) 1998, 1999,  Rik van Riel <riel@nl.linux.org>
+	(c) 2009,        Shen Feng<shen@cn.fujitsu.com>
 
 For general info and legal blurb, please look in README.
 
@@ -14,7 +15,12 @@ kernel. Since some of the files _can_ be used to screw up your
 system, it is advisable to read both documentation and source
 before actually making adjustments.
 
+1. /proc/sys/fs
+----------------------------------------------------------
+
 Currently, these files are in /proc/sys/fs:
+- aio-max-nr
+- aio-nr
 - dentry-state
 - dquot-max
 - dquot-nr
@@ -30,8 +36,15 @@ Currently, these files are in /proc/sys/fs:
 - super-max
 - super-nr
 
-Documentation for the files in /proc/sys/fs/binfmt_misc is
-in Documentation/binfmt_misc.txt.
+==============================================================
+
+aio-nr & aio-max-nr:
+
+aio-nr is the running total of the number of events specified on the
+io_setup system call for all currently active aio contexts.  If aio-nr
+reaches aio-max-nr then io_setup will fail with EAGAIN.  Note that
+raising aio-max-nr does not result in the pre-allocation or re-sizing
+of any kernel data structures.
 
 ==============================================================
 
@@ -178,3 +191,60 @@ requests.  aio-max-nr allows you to change the maximum value
 aio-nr can grow to.
 
 ==============================================================
+
+
+2. /proc/sys/fs/binfmt_misc
+----------------------------------------------------------
+
+Documentation for the files in /proc/sys/fs/binfmt_misc is
+in Documentation/binfmt_misc.txt.
+
+
+3. /proc/sys/fs/mqueue - POSIX message queues filesystem
+----------------------------------------------------------
+
+The "mqueue"  filesystem provides  the necessary kernel features to enable the
+creation of a  user space  library that  implements  the  POSIX message queues
+API (as noted by the  MSG tag in the  POSIX 1003.1-2001 version  of the System
+Interfaces specification.)
+
+The "mqueue" filesystem contains values for determining/setting  the amount of
+resources used by the file system.
+
+/proc/sys/fs/mqueue/queues_max is a read/write  file for  setting/getting  the
+maximum number of message queues allowed on the system.
+
+/proc/sys/fs/mqueue/msg_max  is  a  read/write file  for  setting/getting  the
+maximum number of messages in a queue value.  In fact it is the limiting value
+for another (user) limit which is set in mq_open invocation. This attribute of
+a queue must be less or equal then msg_max.
+
+/proc/sys/fs/mqueue/msgsize_max is  a read/write  file for setting/getting the
+maximum  message size value (it is every  message queue's attribute set during
+its creation).
+
+
+4. /proc/sys/fs/epoll - Configuration options for the epoll interface
+--------------------------------------------------------
+
+This directory contains configuration options for the epoll(7) interface.
+
+max_user_instances
+------------------
+
+This is the maximum number of epoll file descriptors that a single user can
+have open at a given time. The default value is 128, and should be enough
+for normal users.
+
+max_user_watches
+----------------
+
+Every epoll file descriptor can store a number of files to be monitored
+for event readiness. Each one of these monitored files constitutes a "watch".
+This configuration option sets the maximum number of "watches" that are
+allowed for each user.
+Each "watch" costs roughly 90 bytes on a 32bit kernel, and roughly 160 bytes
+on a 64bit one.
+The current default value for  max_user_watches  is the 1/32 of the available
+low memory, divided for the "watch" cost in bytes.
+

+ 53 - 0
Documentation/sysctl/kernel.txt

@@ -1,5 +1,6 @@
 Documentation for /proc/sys/kernel/*	kernel version 2.2.10
 	(c) 1998, 1999,  Rik van Riel <riel@nl.linux.org>
+	(c) 2009,        Shen Feng<shen@cn.fujitsu.com>
 
 For general info and legal blurb, please look in README.
 
@@ -18,6 +19,7 @@ Currently, these files might (depending on your configuration)
 show up in /proc/sys/kernel:
 - acpi_video_flags
 - acct
+- auto_msgmni
 - core_pattern
 - core_uses_pid
 - ctrl-alt-del
@@ -33,6 +35,7 @@ show up in /proc/sys/kernel:
 - msgmax
 - msgmnb
 - msgmni
+- nmi_watchdog
 - osrelease
 - ostype
 - overflowgid
@@ -40,6 +43,7 @@ show up in /proc/sys/kernel:
 - panic
 - pid_max
 - powersave-nap               [ PPC only ]
+- panic_on_unrecovered_nmi
 - printk
 - randomize_va_space
 - real-root-dev               ==> Documentation/initrd.txt
@@ -55,6 +59,7 @@ show up in /proc/sys/kernel:
 - sysrq                       ==> Documentation/sysrq.txt
 - tainted
 - threads-max
+- unknown_nmi_panic
 - version
 
 ==============================================================
@@ -381,3 +386,51 @@ can be ORed together:
  512 - A kernel warning has occurred.
 1024 - A module from drivers/staging was loaded.
 
+==============================================================
+
+auto_msgmni:
+
+Enables/Disables automatic recomputing of msgmni upon memory add/remove or
+upon ipc namespace creation/removal (see the msgmni description above).
+Echoing "1" into this file enables msgmni automatic recomputing.
+Echoing "0" turns it off.
+auto_msgmni default value is 1.
+
+==============================================================
+
+nmi_watchdog:
+
+Enables/Disables the NMI watchdog on x86 systems.  When the value is non-zero
+the NMI watchdog is enabled and will continuously test all online cpus to
+determine whether or not they are still functioning properly. Currently,
+passing "nmi_watchdog=" parameter at boot time is required for this function
+to work.
+
+If LAPIC NMI watchdog method is in use (nmi_watchdog=2 kernel parameter), the
+NMI watchdog shares registers with oprofile. By disabling the NMI watchdog,
+oprofile may have more registers to utilize.
+
+==============================================================
+
+unknown_nmi_panic:
+
+The value in this file affects behavior of handling NMI. When the value is
+non-zero, unknown NMI is trapped and then panic occurs. At that time, kernel
+debugging information is displayed on console.
+
+NMI switch that most IA32 servers have fires unknown NMI up, for example.
+If a system hangs up, try pressing the NMI switch.
+
+==============================================================
+
+panic_on_unrecovered_nmi:
+
+The default Linux behaviour on an NMI of either memory or unknown is to continue
+operation. For many environments such as scientific computing it is preferable
+that the box is taken out and the error dealt with than an uncorrected
+parity/ECC error get propogated.
+
+A small number of systems do generate NMI's for bizarre random reasons such as
+power management so the default is off. That sysctl works like the existing
+panic controls already in that directory.
+

+ 175 - 0
Documentation/sysctl/net.txt

@@ -0,0 +1,175 @@
+Documentation for /proc/sys/net/*	kernel version 2.4.0-test11-pre4
+	(c) 1999		Terrehon Bowden <terrehon@pacbell.net>
+				Bodo Bauer <bb@ricochet.net>
+	(c) 2000		Jorge Nerin <comandante@zaralinux.com>
+	(c) 2009		Shen Feng <shen@cn.fujitsu.com>
+
+For general info and legal blurb, please look in README.
+
+==============================================================
+
+This file contains the documentation for the sysctl files in
+/proc/sys/net and is valid for Linux kernel version 2.4.0-test11-pre4.
+
+The interface  to  the  networking  parts  of  the  kernel  is  located  in
+/proc/sys/net. The following table shows all possible subdirectories.You may
+see only some of them, depending on your kernel's configuration.
+
+
+Table : Subdirectories in /proc/sys/net
+..............................................................................
+ Directory Content             Directory  Content
+ core      General parameter   appletalk  Appletalk protocol
+ unix      Unix domain sockets netrom     NET/ROM
+ 802       E802 protocol       ax25       AX25
+ ethernet  Ethernet protocol   rose       X.25 PLP layer
+ ipv4      IP version 4        x25        X.25 protocol
+ ipx       IPX                 token-ring IBM token ring
+ bridge    Bridging            decnet     DEC net
+ ipv6      IP version 6
+..............................................................................
+
+1. /proc/sys/net/core - Network core options
+-------------------------------------------------------
+
+rmem_default
+------------
+
+The default setting of the socket receive buffer in bytes.
+
+rmem_max
+--------
+
+The maximum receive socket buffer size in bytes.
+
+wmem_default
+------------
+
+The default setting (in bytes) of the socket send buffer.
+
+wmem_max
+--------
+
+The maximum send socket buffer size in bytes.
+
+message_burst and message_cost
+------------------------------
+
+These parameters  are used to limit the warning messages written to the kernel
+log from  the  networking  code.  They  enforce  a  rate  limit  to  make  a
+denial-of-service attack  impossible. A higher message_cost factor, results in
+fewer messages that will be written. Message_burst controls when messages will
+be dropped.  The  default  settings  limit  warning messages to one every five
+seconds.
+
+warnings
+--------
+
+This controls console messages from the networking stack that can occur because
+of problems on the network like duplicate address or bad checksums. Normally,
+this should be enabled, but if the problem persists the messages can be
+disabled.
+
+netdev_budget
+-------------
+
+Maximum number of packets taken from all interfaces in one polling cycle (NAPI
+poll). In one polling cycle interfaces which are registered to polling are
+probed in a round-robin manner. The limit of packets in one such probe can be
+set per-device via sysfs class/net/<device>/weight .
+
+netdev_max_backlog
+------------------
+
+Maximum number  of  packets,  queued  on  the  INPUT  side, when the interface
+receives packets faster than kernel can process them.
+
+optmem_max
+----------
+
+Maximum ancillary buffer size allowed per socket. Ancillary data is a sequence
+of struct cmsghdr structures with appended data.
+
+2. /proc/sys/net/unix - Parameters for Unix domain sockets
+-------------------------------------------------------
+
+There is only one file in this directory.
+unix_dgram_qlen limits the max number of datagrams queued in Unix domain
+socket's buffer. It will not take effect unless PF_UNIX flag is spicified.
+
+
+3. /proc/sys/net/ipv4 - IPV4 settings
+-------------------------------------------------------
+Please see: Documentation/networking/ip-sysctl.txt and ipvs-sysctl.txt for
+descriptions of these entries.
+
+
+4. Appletalk
+-------------------------------------------------------
+
+The /proc/sys/net/appletalk  directory  holds the Appletalk configuration data
+when Appletalk is loaded. The configurable parameters are:
+
+aarp-expiry-time
+----------------
+
+The amount  of  time  we keep an ARP entry before expiring it. Used to age out
+old hosts.
+
+aarp-resolve-time
+-----------------
+
+The amount of time we will spend trying to resolve an Appletalk address.
+
+aarp-retransmit-limit
+---------------------
+
+The number of times we will retransmit a query before giving up.
+
+aarp-tick-time
+--------------
+
+Controls the rate at which expires are checked.
+
+The directory  /proc/net/appletalk  holds the list of active Appletalk sockets
+on a machine.
+
+The fields  indicate  the DDP type, the local address (in network:node format)
+the remote  address,  the  size of the transmit pending queue, the size of the
+received queue  (bytes waiting for applications to read) the state and the uid
+owning the socket.
+
+/proc/net/atalk_iface lists  all  the  interfaces  configured for appletalk.It
+shows the  name  of the interface, its Appletalk address, the network range on
+that address  (or  network number for phase 1 networks), and the status of the
+interface.
+
+/proc/net/atalk_route lists  each  known  network  route.  It lists the target
+(network) that the route leads to, the router (may be directly connected), the
+route flags, and the device the route is using.
+
+
+5. IPX
+-------------------------------------------------------
+
+The IPX protocol has no tunable values in proc/sys/net.
+
+The IPX  protocol  does,  however,  provide  proc/net/ipx. This lists each IPX
+socket giving  the  local  and  remote  addresses  in  Novell  format (that is
+network:node:port). In  accordance  with  the  strange  Novell  tradition,
+everything but the port is in hex. Not_Connected is displayed for sockets that
+are not  tied to a specific remote address. The Tx and Rx queue sizes indicate
+the number  of  bytes  pending  for  transmission  and  reception.  The  state
+indicates the  state  the  socket  is  in and the uid is the owning uid of the
+socket.
+
+The /proc/net/ipx_interface  file lists all IPX interfaces. For each interface
+it gives  the network number, the node number, and indicates if the network is
+the primary  network.  It  also  indicates  which  device  it  is bound to (or
+Internal for  internal  networks)  and  the  Frame  Type if appropriate. Linux
+supports 802.3,  802.2,  802.2  SNAP  and DIX (Blue Book) ethernet framing for
+IPX.
+
+The /proc/net/ipx_route  table  holds  a list of IPX routes. For each route it
+gives the  destination  network, the router node (or Directly) and the network
+address of the router (or Connected) for internal networks.

+ 11 - 2
MAINTAINERS

@@ -1945,6 +1945,12 @@ L:	lm-sensors@lm-sensors.org
 W:	http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
 S:	Maintained
 
+HYPERVISOR VIRTUAL CONSOLE DRIVER
+L:	linuxppc-dev@ozlabs.org
+L:	linux-kernel@vger.kernel.org
+S:	Odd Fixes
+F:	drivers/char/hvc_*
+
 GSPCA FINEPIX SUBDRIVER
 P:	Frank Zago
 M:	frank@zago.net
@@ -3098,7 +3104,7 @@ M:	shemminger@linux-foundation.org
 L:	netem@lists.linux-foundation.org
 S:	Maintained
 
-NETERION (S2IO) Xframe 10GbE DRIVER
+NETERION (S2IO) 10GbE DRIVER (xframe/vxge)
 P:	Ramkrishna Vepa
 M:	ram.vepa@neterion.com
 P:	Rastapur Santosh
@@ -3107,8 +3113,11 @@ P:	Sivakumar Subramani
 M:	sivakumar.subramani@neterion.com
 P:	Sreenivasa Honnur
 M:	sreenivasa.honnur@neterion.com
+P:	Anil Murthy
+M:	anil.murthy@neterion.com
 L:	netdev@vger.kernel.org
-W:	http://trac.neterion.com/cgi-bin/trac.cgi/wiki/TitleIndex?anonymous
+W:	http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
+W:	http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
 S:	Supported
 
 NETFILTER/IPTABLES/IPCHAINS

+ 3 - 0
arch/alpha/include/asm/spinlock.h

@@ -166,6 +166,9 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock)
 	lock->lock = 0;
 }
 
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
 #define _raw_spin_relax(lock)	cpu_relax()
 #define _raw_read_relax(lock)	cpu_relax()
 #define _raw_write_relax(lock)	cpu_relax()

+ 1 - 1
arch/alpha/kernel/process.c

@@ -272,7 +272,7 @@ alpha_vfork(struct pt_regs *regs)
  */
 
 int
-copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+copy_thread(unsigned long clone_flags, unsigned long usp,
 	    unsigned long unused,
 	    struct task_struct * p, struct pt_regs * regs)
 {

+ 3 - 0
arch/arm/include/asm/spinlock.h

@@ -217,6 +217,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
 /* read_can_lock - would read_trylock() succeed? */
 #define __raw_read_can_lock(x)		((x)->lock < 0x80000000)
 
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
 #define _raw_spin_relax(lock)	cpu_relax()
 #define _raw_read_relax(lock)	cpu_relax()
 #define _raw_write_relax(lock)	cpu_relax()

+ 1 - 1
arch/arm/kernel/process.c

@@ -301,7 +301,7 @@ void release_thread(struct task_struct *dead_task)
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
 int
-copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
+copy_thread(unsigned long clone_flags, unsigned long stack_start,
 	    unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
 {
 	struct thread_info *thread = task_thread_info(p);

+ 1 - 1
arch/avr32/kernel/process.c

@@ -332,7 +332,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
 
 asmlinkage void ret_from_fork(void);
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+int copy_thread(unsigned long clone_flags, unsigned long usp,
 		unsigned long unused,
 		struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/blackfin/kernel/process.c

@@ -193,7 +193,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
 }
 
 int
-copy_thread(int nr, unsigned long clone_flags,
+copy_thread(unsigned long clone_flags,
 	    unsigned long usp, unsigned long topstk,
 	    struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/cris/arch-v10/kernel/process.c

@@ -115,7 +115,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  */
 asmlinkage void ret_from_fork(void);
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+int copy_thread(unsigned long clone_flags, unsigned long usp,
 		unsigned long unused,
 		struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/cris/arch-v32/kernel/process.c

@@ -131,7 +131,7 @@ kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 extern asmlinkage void ret_from_fork(void);
 
 int
-copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+copy_thread(unsigned long clone_flags, unsigned long usp,
 	unsigned long unused,
 	struct task_struct *p, struct pt_regs *regs)
 {

+ 2 - 0
arch/cris/include/arch-v32/arch/spinlock.h

@@ -121,6 +121,8 @@ static  inline int __raw_write_trylock(raw_rwlock_t *rw)
 	return 1;
 }
 
+#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
+#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
 
 #define _raw_spin_relax(lock)	cpu_relax()
 #define _raw_read_relax(lock)	cpu_relax()

+ 0 - 1
arch/cris/kernel/process.c

@@ -19,7 +19,6 @@
 #include <asm/system.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
-#include <linux/fs_struct.h>
 #include <linux/init_task.h>
 #include <linux/sched.h>
 #include <linux/fs.h>

+ 1 - 1
arch/frv/kernel/process.c

@@ -204,7 +204,7 @@ void prepare_to_copy(struct task_struct *tsk)
 /*
  * set up the kernel stack and exception frames for a new process
  */
-int copy_thread(int nr, unsigned long clone_flags,
+int copy_thread(unsigned long clone_flags,
 		unsigned long usp, unsigned long topstk,
 		struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/h8300/kernel/process.c

@@ -191,7 +191,7 @@ asmlinkage int h8300_clone(struct pt_regs *regs)
 
 }
 
-int copy_thread(int nr, unsigned long clone_flags,
+int copy_thread(unsigned long clone_flags,
                 unsigned long usp, unsigned long topstk,
 		 struct task_struct * p, struct pt_regs * regs)
 {

+ 0 - 3
arch/ia64/configs/generic_defconfig

@@ -193,7 +193,6 @@ CONFIG_BOUNCE=y
 CONFIG_NR_QUICK=1
 CONFIG_VIRT_TO_BUS=y
 CONFIG_UNEVICTABLE_LRU=y
-CONFIG_MMU_NOTIFIER=y
 CONFIG_ARCH_SELECT_MEMORY_MODEL=y
 CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
 CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -416,8 +415,6 @@ CONFIG_SGI_IOC4=y
 # CONFIG_ENCLOSURE_SERVICES is not set
 CONFIG_SGI_XP=m
 # CONFIG_HP_ILO is not set
-CONFIG_SGI_GRU=m
-# CONFIG_SGI_GRU_DEBUG is not set
 # CONFIG_C2PORT is not set
 CONFIG_HAVE_IDE=y
 CONFIG_IDE=y

+ 63 - 14
arch/ia64/include/asm/spinlock.h

@@ -120,6 +120,38 @@ do {											\
 #define __raw_read_can_lock(rw)		(*(volatile int *)(rw) >= 0)
 #define __raw_write_can_lock(rw)	(*(volatile int *)(rw) == 0)
 
+#ifdef ASM_SUPPORTED
+
+static __always_inline void
+__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+{
+	__asm__ __volatile__ (
+		"tbit.nz p6, p0 = %1,%2\n"
+		"br.few 3f\n"
+		"1:\n"
+		"fetchadd4.rel r2 = [%0], -1;;\n"
+		"(p6) ssm psr.i\n"
+		"2:\n"
+		"hint @pause\n"
+		"ld4 r2 = [%0];;\n"
+		"cmp4.lt p7,p0 = r2, r0\n"
+		"(p7) br.cond.spnt.few 2b\n"
+		"(p6) rsm psr.i\n"
+		";;\n"
+		"3:\n"
+		"fetchadd4.acq r2 = [%0], 1;;\n"
+		"cmp4.lt p7,p0 = r2, r0\n"
+		"(p7) br.cond.spnt.few 1b\n"
+		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
+		: "p6", "p7", "r2", "memory");
+}
+
+#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
+
+#else /* !ASM_SUPPORTED */
+
+#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+
 #define __raw_read_lock(rw)								\
 do {											\
 	raw_rwlock_t *__read_lock_ptr = (rw);						\
@@ -131,6 +163,8 @@ do {											\
 	}										\
 } while (0)
 
+#endif /* !ASM_SUPPORTED */
+
 #define __raw_read_unlock(rw)					\
 do {								\
 	raw_rwlock_t *__read_lock_ptr = (rw);			\
@@ -138,20 +172,33 @@ do {								\
 } while (0)
 
 #ifdef ASM_SUPPORTED
-#define __raw_write_lock(rw)							\
-do {										\
- 	__asm__ __volatile__ (							\
-		"mov ar.ccv = r0\n"						\
-		"dep r29 = -1, r0, 31, 1;;\n"					\
-		"1:\n"								\
-		"ld4 r2 = [%0];;\n"						\
-		"cmp4.eq p0,p7 = r0,r2\n"					\
-		"(p7) br.cond.spnt.few 1b \n"					\
-		"cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"			\
-		"cmp4.eq p0,p7 = r0, r2\n"					\
-		"(p7) br.cond.spnt.few 1b;;\n"					\
-		:: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");		\
-} while(0)
+
+static __always_inline void
+__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+{
+	__asm__ __volatile__ (
+		"tbit.nz p6, p0 = %1, %2\n"
+		"mov ar.ccv = r0\n"
+		"dep r29 = -1, r0, 31, 1\n"
+		"br.few 3f;;\n"
+		"1:\n"
+		"(p6) ssm psr.i\n"
+		"2:\n"
+		"hint @pause\n"
+		"ld4 r2 = [%0];;\n"
+		"cmp4.eq p0,p7 = r0, r2\n"
+		"(p7) br.cond.spnt.few 2b\n"
+		"(p6) rsm psr.i\n"
+		";;\n"
+		"3:\n"
+		"cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
+		"cmp4.eq p0,p7 = r0, r2\n"
+		"(p7) br.cond.spnt.few 1b;;\n"
+		: : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
+		: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
+}
+
+#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
 
 #define __raw_write_trylock(rw)							\
 ({										\
@@ -174,6 +221,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
 
 #else /* !ASM_SUPPORTED */
 
+#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
+
 #define __raw_write_lock(l)								\
 ({											\
 	__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);			\

+ 6 - 0
arch/ia64/include/asm/uv/uv_hub.h

@@ -305,5 +305,11 @@ static inline int uv_num_possible_blades(void)
 	return 1;
 }
 
+static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
+{
+	/* not currently needed on ia64 */
+}
+
+
 #endif /* __ASM_IA64_UV_HUB__ */
 

+ 155 - 3
arch/ia64/include/asm/uv/uv_mmrs.h

@@ -8,8 +8,8 @@
  * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
  */
 
-#ifndef __ASM_IA64_UV_MMRS__
-#define __ASM_IA64_UV_MMRS__
+#ifndef _ASM_IA64_UV_UV_MMRS_H
+#define _ASM_IA64_UV_UV_MMRS_H
 
 #define UV_MMR_ENABLE		(1UL << 63)
 
@@ -242,6 +242,158 @@ union uvh_event_occurred0_u {
 #define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
 #define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
 
+/* ========================================================================= */
+/*                         UVH_GR0_TLB_INT0_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
+
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int0_config_u {
+    unsigned long	v;
+    struct uvh_gr0_tlb_int0_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                         UVH_GR0_TLB_INT1_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
+
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int1_config_u {
+    unsigned long	v;
+    struct uvh_gr0_tlb_int1_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                         UVH_GR1_TLB_INT0_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int0_config_u {
+    unsigned long	v;
+    struct uvh_gr1_tlb_int0_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                         UVH_GR1_TLB_INT1_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int1_config_u {
+    unsigned long	v;
+    struct uvh_gr1_tlb_int1_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
 /* ========================================================================= */
 /*                               UVH_INT_CMPB                                */
 /* ========================================================================= */
@@ -670,4 +822,4 @@ union uvh_si_alias2_overlay_config_u {
 };
 
 
-#endif /* __ASM_IA64_UV_MMRS__ */
+#endif /* _ASM_IA64_UV_UV_MMRS_H */

+ 1 - 1
arch/ia64/kernel/process.c

@@ -413,7 +413,7 @@ ia64_load_extra (struct task_struct *task)
  * so there is nothing to worry about.
  */
 int
-copy_thread (int nr, unsigned long clone_flags,
+copy_thread(unsigned long clone_flags,
 	     unsigned long user_stack_base, unsigned long user_stack_size,
 	     struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/m32r/kernel/process.c

@@ -225,7 +225,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
 	return 0; /* Task didn't use the fpu at all. */
 }
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
+int copy_thread(unsigned long clone_flags, unsigned long spu,
 	unsigned long unused, struct task_struct *tsk, struct pt_regs *regs)
 {
 	struct pt_regs *childregs = task_pt_regs(tsk);

+ 1 - 1
arch/m68k/kernel/process.c

@@ -233,7 +233,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
 		       parent_tidptr, child_tidptr);
 }
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+int copy_thread(unsigned long clone_flags, unsigned long usp,
 		 unsigned long unused,
 		 struct task_struct * p, struct pt_regs * regs)
 {

+ 1 - 1
arch/m68knommu/kernel/process.c

@@ -199,7 +199,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
         return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
 }
 
-int copy_thread(int nr, unsigned long clone_flags,
+int copy_thread(unsigned long clone_flags,
 		unsigned long usp, unsigned long topstk,
 		struct task_struct * p, struct pt_regs * regs)
 {

+ 2 - 0
arch/mips/include/asm/spinlock.h

@@ -480,6 +480,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
 	return ret;
 }
 
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
 
 #define _raw_spin_relax(lock)	cpu_relax()
 #define _raw_read_relax(lock)	cpu_relax()

+ 12 - 6
arch/mips/include/asm/unistd.h

@@ -350,16 +350,18 @@
 #define __NR_dup3			(__NR_Linux + 327)
 #define __NR_pipe2			(__NR_Linux + 328)
 #define __NR_inotify_init1		(__NR_Linux + 329)
+#define __NR_preadv			(__NR_Linux + 330)
+#define __NR_pwritev			(__NR_Linux + 331)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls		329
+#define __NR_Linux_syscalls		331
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux			4000
-#define __NR_O32_Linux_syscalls		329
+#define __NR_O32_Linux_syscalls		331
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
@@ -656,16 +658,18 @@
 #define __NR_dup3			(__NR_Linux + 286)
 #define __NR_pipe2			(__NR_Linux + 287)
 #define __NR_inotify_init1		(__NR_Linux + 288)
+#define __NR_preadv			(__NR_Linux + 289)
+#define __NR_pwritev			(__NR_Linux + 290)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls		288
+#define __NR_Linux_syscalls		290
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux			5000
-#define __NR_64_Linux_syscalls		288
+#define __NR_64_Linux_syscalls		290
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
@@ -966,16 +970,18 @@
 #define __NR_dup3			(__NR_Linux + 290)
 #define __NR_pipe2			(__NR_Linux + 291)
 #define __NR_inotify_init1		(__NR_Linux + 292)
+#define __NR_preadv			(__NR_Linux + 293)
+#define __NR_pwritev			(__NR_Linux + 294)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls		292
+#define __NR_Linux_syscalls		294
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux			6000
-#define __NR_N32_Linux_syscalls		292
+#define __NR_N32_Linux_syscalls		294
 
 #ifdef __KERNEL__
 

+ 1 - 1
arch/mips/kernel/process.c

@@ -99,7 +99,7 @@ void flush_thread(void)
 {
 }
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+int copy_thread(unsigned long clone_flags, unsigned long usp,
 	unsigned long unused, struct task_struct *p, struct pt_regs *regs)
 {
 	struct thread_info *ti = task_thread_info(p);

+ 2 - 0
arch/mips/kernel/scall32-o32.S

@@ -650,6 +650,8 @@ einval:	li	v0, -ENOSYS
 	sys	sys_dup3		3
 	sys	sys_pipe2		2
 	sys	sys_inotify_init1	1
+	sys	sys_preadv		6	/* 4330 */
+	sys	sys_pwritev		6
 	.endm
 
 	/* We pre-compute the number of _instruction_ bytes needed to

+ 2 - 0
arch/mips/kernel/scall64-64.S

@@ -487,4 +487,6 @@ sys_call_table:
 	PTR	sys_dup3
 	PTR	sys_pipe2
 	PTR	sys_inotify_init1
+	PTR	sys_preadv
+	PTR	sys_pwritev			/* 5390 */
 	.size	sys_call_table,.-sys_call_table

+ 2 - 0
arch/mips/kernel/scall64-n32.S

@@ -413,4 +413,6 @@ EXPORT(sysn32_call_table)
 	PTR	sys_dup3			/* 5290 */
 	PTR	sys_pipe2
 	PTR	sys_inotify_init1
+	PTR	sys_preadv
+	PTR	sys_pwritev
 	.size	sysn32_call_table,.-sysn32_call_table

+ 2 - 0
arch/mips/kernel/scall64-o32.S

@@ -533,4 +533,6 @@ sys_call_table:
 	PTR	sys_dup3
 	PTR	sys_pipe2
 	PTR	sys_inotify_init1
+	PTR	compat_sys_preadv		/* 4330 */
+	PTR	compat_sys_pwritev
 	.size	sys_call_table,.-sys_call_table

+ 1 - 1
arch/mn10300/kernel/process.c

@@ -193,7 +193,7 @@ void prepare_to_copy(struct task_struct *tsk)
  * set up the kernel stack for a new thread and copy arch-specific thread
  * control information
  */
-int copy_thread(int nr, unsigned long clone_flags,
+int copy_thread(unsigned long clone_flags,
 		unsigned long c_usp, unsigned long ustk_size,
 		struct task_struct *p, struct pt_regs *kregs)
 {

+ 3 - 0
arch/parisc/include/asm/spinlock.h

@@ -187,6 +187,9 @@ static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
 	return !rw->counter;
 }
 
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
 #define _raw_spin_relax(lock)	cpu_relax()
 #define _raw_read_relax(lock)	cpu_relax()
 #define _raw_write_relax(lock)	cpu_relax()

+ 1 - 1
arch/parisc/kernel/process.c

@@ -263,7 +263,7 @@ sys_vfork(struct pt_regs *regs)
 }
 
 int
-copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+copy_thread(unsigned long clone_flags, unsigned long usp,
 	    unsigned long unused,	/* in ia64 this is "user_stack_size" */
 	    struct task_struct * p, struct pt_regs * pregs)
 {

+ 0 - 10
arch/powerpc/Kconfig.debug

@@ -27,16 +27,6 @@ config DEBUG_STACK_USAGE
 
 	  This option will slow down process creation somewhat.
 
-config DEBUG_PAGEALLOC
-        bool "Debug page memory allocations"
-        depends on DEBUG_KERNEL && !HIBERNATION
-	depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
-        help
-          Unmap pages from the kernel linear mapping after free_pages().
-          This results in a large slowdown, but helps to find certain types
-          of memory corruptions.
-
-
 config HCALL_STATS
 	bool "Hypervisor call instrumentation"
 	depends on PPC_PSERIES && DEBUG_FS

+ 3 - 0
arch/powerpc/include/asm/spinlock.h

@@ -287,6 +287,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
 	rw->lock = 0;
 }
 
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
 #define _raw_spin_relax(lock)	__spin_yield(lock)
 #define _raw_read_relax(lock)	__rw_yield(lock)
 #define _raw_write_relax(lock)	__rw_yield(lock)

+ 1 - 1
arch/powerpc/kernel/process.c

@@ -598,7 +598,7 @@ void prepare_to_copy(struct task_struct *tsk)
 /*
  * Copy a thread..
  */
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+int copy_thread(unsigned long clone_flags, unsigned long usp,
 		unsigned long unused, struct task_struct *p,
 		struct pt_regs *regs)
 {

+ 1 - 1
arch/powerpc/kernel/vio.c

@@ -482,7 +482,7 @@ static void vio_cmo_balance(struct work_struct *work)
 	cmo->excess.size = cmo->entitled - cmo->reserve.size;
 	cmo->excess.free = cmo->excess.size - need;
 
-	cancel_delayed_work(container_of(work, struct delayed_work, work));
+	cancel_delayed_work(to_delayed_work(work));
 	spin_unlock_irqrestore(&vio_cmo.lock, flags);
 }
 

+ 1 - 1
arch/powerpc/platforms/cell/spufs/inode.c

@@ -635,7 +635,7 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
 	if (dentry->d_inode)
 		goto out_dput;
 
-	mode &= ~current->fs->umask;
+	mode &= ~current_umask();
 
 	if (flags & SPU_CREATE_GANG)
 		ret = spufs_create_gang(nd->path.dentry->d_inode,

+ 0 - 9
arch/s390/Kconfig.debug

@@ -6,13 +6,4 @@ config TRACE_IRQFLAGS_SUPPORT
 
 source "lib/Kconfig.debug"
 
-config DEBUG_PAGEALLOC
-	bool "Debug page memory allocations"
-	depends on DEBUG_KERNEL
-	depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
-	help
-	  Unmap pages from the kernel linear mapping after free_pages().
-	  This results in a slowdown, but helps to find certain types of
-	  memory corruptions.
-
 endmenu

+ 3 - 0
arch/s390/include/asm/spinlock.h

@@ -172,6 +172,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
 	return _raw_write_trylock_retry(rw);
 }
 
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
 #define _raw_read_relax(lock)	cpu_relax()
 #define _raw_write_relax(lock)	cpu_relax()
 

+ 1 - 1
arch/s390/kernel/process.c

@@ -160,7 +160,7 @@ void release_thread(struct task_struct *dead_task)
 {
 }
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
+int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
 		unsigned long unused,
 		struct task_struct *p, struct pt_regs *regs)
 {

+ 3 - 0
arch/sh/include/asm/spinlock.h

@@ -216,6 +216,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
 	return (oldval > (RW_LOCK_BIAS - 1));
 }
 
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
 #define _raw_spin_relax(lock)	cpu_relax()
 #define _raw_read_relax(lock)	cpu_relax()
 #define _raw_write_relax(lock)	cpu_relax()

+ 1 - 1
arch/sh/kernel/process_32.c

@@ -170,7 +170,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
 
 asmlinkage void ret_from_fork(void);
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+int copy_thread(unsigned long clone_flags, unsigned long usp,
 		unsigned long unused,
 		struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/sh/kernel/process_64.c

@@ -425,7 +425,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
 
 asmlinkage void ret_from_fork(void);
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+int copy_thread(unsigned long clone_flags, unsigned long usp,
 		unsigned long unused,
 		struct task_struct *p, struct pt_regs *regs)
 {

+ 0 - 9
arch/sparc/Kconfig.debug

@@ -22,15 +22,6 @@ config DEBUG_DCFLUSH
 config STACK_DEBUG
 	bool "Stack Overflow Detection Support"
 
-config DEBUG_PAGEALLOC
-	bool "Debug page memory allocations"
-	depends on DEBUG_KERNEL && !HIBERNATION
-	depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
-	help
-	  Unmap pages from the kernel linear mapping after free_pages().
-	  This results in a large slowdown, but helps to find certain types
-	  of memory corruptions.
-
 config MCOUNT
 	bool
 	depends on SPARC64

+ 2 - 0
arch/sparc/include/asm/spinlock_32.h

@@ -177,6 +177,8 @@ static inline int __read_trylock(raw_rwlock_t *rw)
 #define __raw_write_unlock(rw)	do { (rw)->lock = 0; } while(0)
 
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_read_lock_flags(rw, flags)   __raw_read_lock(rw)
+#define __raw_write_lock_flags(rw, flags)  __raw_write_lock(rw)
 
 #define _raw_spin_relax(lock)	cpu_relax()
 #define _raw_read_relax(lock)	cpu_relax()

+ 2 - 0
arch/sparc/include/asm/spinlock_64.h

@@ -211,9 +211,11 @@ static int inline __write_trylock(raw_rwlock_t *lock)
 }
 
 #define __raw_read_lock(p)	__read_lock(p)
+#define __raw_read_lock_flags(p, f) __read_lock(p)
 #define __raw_read_trylock(p)	__read_trylock(p)
 #define __raw_read_unlock(p)	__read_unlock(p)
 #define __raw_write_lock(p)	__write_lock(p)
+#define __raw_write_lock_flags(p, f) __write_lock(p)
 #define __raw_write_unlock(p)	__write_unlock(p)
 #define __raw_write_trylock(p)	__write_trylock(p)
 

+ 1 - 1
arch/sparc/kernel/process_32.c

@@ -455,7 +455,7 @@ asmlinkage int sparc_do_fork(unsigned long clone_flags,
  */
 extern void ret_from_fork(void);
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+int copy_thread(unsigned long clone_flags, unsigned long sp,
 		unsigned long unused,
 		struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/sparc/kernel/process_64.c

@@ -561,7 +561,7 @@ asmlinkage long sparc_do_fork(unsigned long clone_flags,
  * Parent -->  %o0 == childs  pid, %o1 == 0
  * Child  -->  %o0 == parents pid, %o1 == 1
  */
-int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+int copy_thread(unsigned long clone_flags, unsigned long sp,
 		unsigned long unused,
 		struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/um/drivers/net_kern.c

@@ -757,7 +757,7 @@ static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
 	void (*proc)(unsigned char *, unsigned char *, void *);
 	unsigned char addr_buf[4], netmask_buf[4];
 
-	if (dev->open != uml_net_open)
+	if (dev->netdev_ops->ndo_open != uml_net_open)
 		return NOTIFY_DONE;
 
 	lp = netdev_priv(dev);

+ 1 - 1
arch/um/kernel/process.c

@@ -179,7 +179,7 @@ void fork_handler(void)
 	userspace(&current->thread.regs.regs);
 }
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+int copy_thread(unsigned long clone_flags, unsigned long sp,
 		unsigned long stack_top, struct task_struct * p,
 		struct pt_regs *regs)
 {

+ 2 - 1
arch/um/kernel/syscall.c

@@ -127,7 +127,8 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[])
 
 	fs = get_fs();
 	set_fs(KERNEL_DS);
-	ret = um_execve(filename, argv, envp);
+	ret = um_execve((char *)filename, (char __user *__user *)argv,
+			(char __user *__user *) envp);
 	set_fs(fs);
 
 	return ret;

+ 11 - 0
arch/um/sys-i386/sys_call_table.S

@@ -9,6 +9,17 @@
 
 #define old_mmap old_mmap_i386
 
+#define ptregs_fork sys_fork
+#define ptregs_execve sys_execve
+#define ptregs_iopl sys_iopl
+#define ptregs_vm86old sys_vm86old
+#define ptregs_sigreturn sys_sigreturn
+#define ptregs_clone sys_clone
+#define ptregs_vm86 sys_vm86
+#define ptregs_rt_sigreturn sys_rt_sigreturn
+#define ptregs_sigaltstack sys_sigaltstack
+#define ptregs_vfork sys_vfork
+
 .section .rodata,"a"
 
 #include "../../x86/kernel/syscall_table_32.S"

+ 0 - 9
arch/x86/Kconfig.debug

@@ -72,15 +72,6 @@ config DEBUG_STACK_USAGE
 
 	  This option will slow down process creation somewhat.
 
-config DEBUG_PAGEALLOC
-	bool "Debug page memory allocations"
-	depends on DEBUG_KERNEL
-	depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
-	---help---
-	  Unmap pages from the kernel linear mapping after free_pages().
-	  This results in a large slowdown, but helps to find certain types
-	  of memory corruptions.
-
 config DEBUG_PER_CPU_MAPS
 	bool "Debug access to per_cpu maps"
 	depends on DEBUG_KERNEL

+ 2 - 0
arch/x86/ia32/ia32entry.S

@@ -828,4 +828,6 @@ ia32_sys_call_table:
 	.quad sys_dup3			/* 330 */
 	.quad sys_pipe2
 	.quad sys_inotify_init1
+	.quad compat_sys_preadv
+	.quad compat_sys_pwritev
 ia32_syscall_end:

+ 3 - 0
arch/x86/include/asm/spinlock.h

@@ -295,6 +295,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
 		     : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
 }
 
+#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
+#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+
 #define _raw_spin_relax(lock)	cpu_relax()
 #define _raw_read_relax(lock)	cpu_relax()
 #define _raw_write_relax(lock)	cpu_relax()

+ 2 - 0
arch/x86/include/asm/unistd_32.h

@@ -338,6 +338,8 @@
 #define __NR_dup3		330
 #define __NR_pipe2		331
 #define __NR_inotify_init1	332
+#define __NR_preadv		333
+#define __NR_pwritev		334
 
 #ifdef __KERNEL__
 

+ 4 - 0
arch/x86/include/asm/unistd_64.h

@@ -653,6 +653,10 @@ __SYSCALL(__NR_dup3, sys_dup3)
 __SYSCALL(__NR_pipe2, sys_pipe2)
 #define __NR_inotify_init1			294
 __SYSCALL(__NR_inotify_init1, sys_inotify_init1)
+#define __NR_preadv				295
+__SYSCALL(__NR_preadv, sys_preadv)
+#define __NR_pwritev				296
+__SYSCALL(__NR_pwritev, sys_pwritev)
 
 
 #ifndef __NO_STUBS

+ 14 - 0
arch/x86/include/asm/uv/uv_hub.h

@@ -11,11 +11,13 @@
 #ifndef _ASM_X86_UV_UV_HUB_H
 #define _ASM_X86_UV_UV_HUB_H
 
+#ifdef CONFIG_X86_64
 #include <linux/numa.h>
 #include <linux/percpu.h>
 #include <linux/timer.h>
 #include <asm/types.h>
 #include <asm/percpu.h>
+#include <asm/uv/uv_mmrs.h>
 
 
 /*
@@ -397,6 +399,7 @@ static inline void uv_set_scir_bits(unsigned char value)
 		uv_write_local_mmr8(uv_hub_info->scir.offset, value);
 	}
 }
+
 static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
 {
 	if (uv_cpu_hub_info(cpu)->scir.state != value) {
@@ -405,4 +408,15 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
 	}
 }
 
+static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
+{
+	unsigned long val;
+
+	val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+			((apicid & 0x3f) << UVH_IPI_INT_APIC_ID_SHFT) |
+			(vector << UVH_IPI_INT_VECTOR_SHFT);
+	uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+}
+
+#endif /* CONFIG_X86_64 */
 #endif /* _ASM_X86_UV_UV_HUB_H */

+ 153 - 0
arch/x86/include/asm/uv/uv_mmrs.h

@@ -1,3 +1,4 @@
+
 /*
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -242,6 +243,158 @@ union uvh_event_occurred0_u {
 #define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
 #define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
 
+/* ========================================================================= */
+/*                         UVH_GR0_TLB_INT0_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
+
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int0_config_u {
+    unsigned long	v;
+    struct uvh_gr0_tlb_int0_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                         UVH_GR0_TLB_INT1_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
+
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int1_config_u {
+    unsigned long	v;
+    struct uvh_gr0_tlb_int1_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                         UVH_GR1_TLB_INT0_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int0_config_u {
+    unsigned long	v;
+    struct uvh_gr1_tlb_int0_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                         UVH_GR1_TLB_INT1_CONFIG                           */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int1_config_u {
+    unsigned long	v;
+    struct uvh_gr1_tlb_int1_config_s {
+	unsigned long	vector_  :  8;  /* RW */
+	unsigned long	dm       :  3;  /* RW */
+	unsigned long	destmode :  1;  /* RW */
+	unsigned long	status   :  1;  /* RO */
+	unsigned long	p        :  1;  /* RO */
+	unsigned long	rsvd_14  :  1;  /*    */
+	unsigned long	t        :  1;  /* RO */
+	unsigned long	m        :  1;  /* RW */
+	unsigned long	rsvd_17_31: 15;  /*    */
+	unsigned long	apic_id  : 32;  /* RW */
+    } s;
+};
+
 /* ========================================================================= */
 /*                               UVH_INT_CMPB                                */
 /* ========================================================================= */

+ 2 - 7
arch/x86/kernel/apic/x2apic_uv_x.c

@@ -118,17 +118,12 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
 
 static void uv_send_IPI_one(int cpu, int vector)
 {
-	unsigned long val, apicid;
+	unsigned long apicid;
 	int pnode;
 
 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
 	pnode = uv_apicid_to_pnode(apicid);
-
-	val = (1UL << UVH_IPI_INT_SEND_SHFT) |
-	      (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
-	      (vector << UVH_IPI_INT_VECTOR_SHFT);
-
-	uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+	uv_hub_send_ipi(pnode, apicid, vector);
 }
 
 static void uv_send_IPI_mask(const struct cpumask *mask, int vector)

+ 1 - 1
arch/x86/kernel/process_32.c

@@ -245,7 +245,7 @@ void prepare_to_copy(struct task_struct *tsk)
 	unlazy_fpu(tsk);
 }
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+int copy_thread(unsigned long clone_flags, unsigned long sp,
 	unsigned long unused,
 	struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/x86/kernel/process_64.c

@@ -278,7 +278,7 @@ void prepare_to_copy(struct task_struct *tsk)
 	unlazy_fpu(tsk);
 }
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+int copy_thread(unsigned long clone_flags, unsigned long sp,
 		unsigned long unused,
 	struct task_struct *p, struct pt_regs *regs)
 {

+ 1 - 1
arch/x86/kernel/ptrace.c

@@ -1455,6 +1455,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
 	 * system call instruction.
 	 */
 	if (test_thread_flag(TIF_SINGLESTEP) &&
-	    tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
+	    tracehook_consider_fatal_signal(current, SIGTRAP))
 		send_sigtrap(current, regs, 0, TRAP_BRKPT);
 }

+ 2 - 0
arch/x86/kernel/syscall_table_32.S

@@ -332,3 +332,5 @@ ENTRY(sys_call_table)
 	.long sys_dup3			/* 330 */
 	.long sys_pipe2
 	.long sys_inotify_init1
+	.long sys_preadv
+	.long sys_pwritev

+ 1 - 1
arch/xtensa/kernel/process.c

@@ -172,7 +172,7 @@ void prepare_to_copy(struct task_struct *tsk)
  *       childregs.
  */
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+int copy_thread(unsigned long clone_flags, unsigned long usp,
 		unsigned long unused,
                 struct task_struct * p, struct pt_regs * regs)
 {

+ 8 - 0
drivers/block/floppy.c

@@ -177,6 +177,7 @@ static int print_unex = 1;
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/buffer_head.h>	/* for invalidate_buffers() */
 #include <linux/mutex.h>
 
@@ -4597,6 +4598,13 @@ MODULE_AUTHOR("Alain L. Knaff");
 MODULE_SUPPORTED_DEVICE("fd");
 MODULE_LICENSE("GPL");
 
+/* This doesn't actually get used other than for module information */
+static const struct pnp_device_id floppy_pnpids[] = {
+	{ "PNP0700", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(pnp, floppy_pnpids);
+
 #else
 
 __setup("floppy=", floppy_setup);

+ 71 - 41
drivers/block/nbd.c

@@ -4,7 +4,7 @@
  * Note that you can not swap over this thing, yet. Seems to work but
  * deadlocks sometimes - you can not swap over TCP in general.
  * 
- * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz>
+ * Copyright 1997-2000, 2008 Pavel Machek <pavel@suse.cz>
  * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
  *
  * This file is released under GPLv2 or later.
@@ -276,7 +276,7 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
 	return 0;
 
 error_out:
-	return 1;
+	return -EIO;
 }
 
 static struct request *nbd_find_request(struct nbd_device *lo,
@@ -467,9 +467,7 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req)
 		mutex_unlock(&lo->tx_lock);
 		printk(KERN_ERR "%s: Attempted send on closed socket\n",
 		       lo->disk->disk_name);
-		req->errors++;
-		nbd_end_request(req);
-		return;
+		goto error_out;
 	}
 
 	lo->active_req = req;
@@ -531,7 +529,7 @@ static int nbd_thread(void *data)
  *   { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
  */
 
-static void do_nbd_request(struct request_queue * q)
+static void do_nbd_request(struct request_queue *q)
 {
 	struct request *req;
 	
@@ -568,27 +566,17 @@ static void do_nbd_request(struct request_queue * q)
 	}
 }
 
-static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
-		     unsigned int cmd, unsigned long arg)
-{
-	struct nbd_device *lo = bdev->bd_disk->private_data;
-	struct file *file;
-	int error;
-	struct request sreq ;
-	struct task_struct *thread;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return -EPERM;
-
-	BUG_ON(lo->magic != LO_MAGIC);
-
-	/* Anyone capable of this syscall can do *real bad* things */
-	dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
-			lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
+/* Must be called with tx_lock held */
 
+static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+		       unsigned int cmd, unsigned long arg)
+{
 	switch (cmd) {
-	case NBD_DISCONNECT:
+	case NBD_DISCONNECT: {
+		struct request sreq;
+
 	        printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
+
 		blk_rq_init(NULL, &sreq);
 		sreq.cmd_type = REQ_TYPE_SPECIAL;
 		nbd_cmd(&sreq) = NBD_CMD_DISC;
@@ -599,29 +587,29 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 		 */
 		sreq.sector = 0;
 		sreq.nr_sectors = 0;
-                if (!lo->sock)
+		if (!lo->sock)
 			return -EINVAL;
-		mutex_lock(&lo->tx_lock);
-                nbd_send_req(lo, &sreq);
-		mutex_unlock(&lo->tx_lock);
+		nbd_send_req(lo, &sreq);
                 return 0;
+	}
  
-	case NBD_CLEAR_SOCK:
-		error = 0;
-		mutex_lock(&lo->tx_lock);
+	case NBD_CLEAR_SOCK: {
+		struct file *file;
+
 		lo->sock = NULL;
-		mutex_unlock(&lo->tx_lock);
 		file = lo->file;
 		lo->file = NULL;
 		nbd_clear_que(lo);
 		BUG_ON(!list_empty(&lo->queue_head));
 		if (file)
 			fput(file);
-		return error;
-	case NBD_SET_SOCK:
+		return 0;
+	}
+
+	case NBD_SET_SOCK: {
+		struct file *file;
 		if (lo->file)
 			return -EBUSY;
-		error = -EINVAL;
 		file = fget(arg);
 		if (file) {
 			struct inode *inode = file->f_path.dentry->d_inode;
@@ -630,12 +618,14 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 				lo->sock = SOCKET_I(inode);
 				if (max_part > 0)
 					bdev->bd_invalidated = 1;
-				error = 0;
+				return 0;
 			} else {
 				fput(file);
 			}
 		}
-		return error;
+		return -EINVAL;
+	}
+
 	case NBD_SET_BLKSIZE:
 		lo->blksize = arg;
 		lo->bytesize &= ~(lo->blksize-1);
@@ -643,35 +633,50 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 		set_blocksize(bdev, lo->blksize);
 		set_capacity(lo->disk, lo->bytesize >> 9);
 		return 0;
+
 	case NBD_SET_SIZE:
 		lo->bytesize = arg & ~(lo->blksize-1);
 		bdev->bd_inode->i_size = lo->bytesize;
 		set_blocksize(bdev, lo->blksize);
 		set_capacity(lo->disk, lo->bytesize >> 9);
 		return 0;
+
 	case NBD_SET_TIMEOUT:
 		lo->xmit_timeout = arg * HZ;
 		return 0;
+
 	case NBD_SET_SIZE_BLOCKS:
 		lo->bytesize = ((u64) arg) * lo->blksize;
 		bdev->bd_inode->i_size = lo->bytesize;
 		set_blocksize(bdev, lo->blksize);
 		set_capacity(lo->disk, lo->bytesize >> 9);
 		return 0;
-	case NBD_DO_IT:
+
+	case NBD_DO_IT: {
+		struct task_struct *thread;
+		struct file *file;
+		int error;
+
 		if (lo->pid)
 			return -EBUSY;
 		if (!lo->file)
 			return -EINVAL;
+
+		mutex_unlock(&lo->tx_lock);
+
 		thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
-		if (IS_ERR(thread))
+		if (IS_ERR(thread)) {
+			mutex_lock(&lo->tx_lock);
 			return PTR_ERR(thread);
+		}
 		wake_up_process(thread);
 		error = nbd_do_it(lo);
 		kthread_stop(thread);
+
+		mutex_lock(&lo->tx_lock);
 		if (error)
 			return error;
-		sock_shutdown(lo, 1);
+		sock_shutdown(lo, 0);
 		file = lo->file;
 		lo->file = NULL;
 		nbd_clear_que(lo);
@@ -684,6 +689,8 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 		if (max_part > 0)
 			ioctl_by_bdev(bdev, BLKRRPART, 0);
 		return lo->harderror;
+	}
+
 	case NBD_CLEAR_QUE:
 		/*
 		 * This is for compatibility only.  The queue is always cleared
@@ -691,6 +698,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 		 */
 		BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
 		return 0;
+
 	case NBD_PRINT_DEBUG:
 		printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
 			bdev->bd_disk->disk_name,
@@ -698,7 +706,29 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 			&lo->queue_head);
 		return 0;
 	}
-	return -EINVAL;
+	return -ENOTTY;
+}
+
+static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
+		     unsigned int cmd, unsigned long arg)
+{
+	struct nbd_device *lo = bdev->bd_disk->private_data;
+	int error;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	BUG_ON(lo->magic != LO_MAGIC);
+
+	/* Anyone capable of this syscall can do *real bad* things */
+	dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
+			lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
+
+	mutex_lock(&lo->tx_lock);
+	error = __nbd_ioctl(bdev, lo, cmd, arg);
+	mutex_unlock(&lo->tx_lock);
+
+	return error;
 }
 
 static struct block_device_operations nbd_fops =

+ 21 - 1
drivers/char/hpet.c

@@ -713,7 +713,7 @@ static struct ctl_table_header *sysctl_header;
  */
 #define	TICK_CALIBRATE	(1000UL)
 
-static unsigned long hpet_calibrate(struct hpets *hpetp)
+static unsigned long __hpet_calibrate(struct hpets *hpetp)
 {
 	struct hpet_timer __iomem *timer = NULL;
 	unsigned long t, m, count, i, flags, start;
@@ -750,6 +750,26 @@ static unsigned long hpet_calibrate(struct hpets *hpetp)
 	return (m - start) / i;
 }
 
+static unsigned long hpet_calibrate(struct hpets *hpetp)
+{
+	unsigned long ret = -1;
+	unsigned long tmp;
+
+	/*
+	 * Try to calibrate until return value becomes stable small value.
+	 * If SMI interruption occurs in calibration loop, the return value
+	 * will be big. This avoids its impact.
+	 */
+	for ( ; ; ) {
+		tmp = __hpet_calibrate(hpetp);
+		if (ret <= tmp)
+			break;
+		ret = tmp;
+	}
+
+	return ret;
+}
+
 int hpet_alloc(struct hpet_data *hdp)
 {
 	u64 cap, mcfg;

+ 2 - 1
drivers/char/random.c

@@ -1488,7 +1488,8 @@ static void rekey_seq_generator(struct work_struct *work)
 	keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
 	smp_wmb();
 	ip_cnt++;
-	schedule_delayed_work(&rekey_work, REKEY_INTERVAL);
+	schedule_delayed_work(&rekey_work,
+			      round_jiffies_relative(REKEY_INTERVAL));
 }
 
 static inline struct keydata *get_keyptr(void)

+ 38 - 20
drivers/char/synclink_gt.c

@@ -298,6 +298,7 @@ struct slgt_info {
 
 	unsigned int rbuf_fill_level;
 	unsigned int if_mode;
+	unsigned int base_clock;
 
 	/* device status */
 
@@ -1156,22 +1157,26 @@ static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *ne
 		return -EFAULT;
 
 	spin_lock(&info->lock);
-	info->params.mode            = tmp_params.mode;
-	info->params.loopback        = tmp_params.loopback;
-	info->params.flags           = tmp_params.flags;
-	info->params.encoding        = tmp_params.encoding;
-	info->params.clock_speed     = tmp_params.clock_speed;
-	info->params.addr_filter     = tmp_params.addr_filter;
-	info->params.crc_type        = tmp_params.crc_type;
-	info->params.preamble_length = tmp_params.preamble_length;
-	info->params.preamble        = tmp_params.preamble;
-	info->params.data_rate       = tmp_params.data_rate;
-	info->params.data_bits       = tmp_params.data_bits;
-	info->params.stop_bits       = tmp_params.stop_bits;
-	info->params.parity          = tmp_params.parity;
+	if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) {
+		info->base_clock = tmp_params.clock_speed;
+	} else {
+		info->params.mode            = tmp_params.mode;
+		info->params.loopback        = tmp_params.loopback;
+		info->params.flags           = tmp_params.flags;
+		info->params.encoding        = tmp_params.encoding;
+		info->params.clock_speed     = tmp_params.clock_speed;
+		info->params.addr_filter     = tmp_params.addr_filter;
+		info->params.crc_type        = tmp_params.crc_type;
+		info->params.preamble_length = tmp_params.preamble_length;
+		info->params.preamble        = tmp_params.preamble;
+		info->params.data_rate       = tmp_params.data_rate;
+		info->params.data_bits       = tmp_params.data_bits;
+		info->params.stop_bits       = tmp_params.stop_bits;
+		info->params.parity          = tmp_params.parity;
+	}
 	spin_unlock(&info->lock);
 
- 	change_params(info);
+	program_hw(info);
 
 	return 0;
 }
@@ -2559,10 +2564,13 @@ static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
 		return -EFAULT;
 
 	spin_lock_irqsave(&info->lock, flags);
-	memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
+	if (tmp_params.mode == MGSL_MODE_BASE_CLOCK)
+		info->base_clock = tmp_params.clock_speed;
+	else
+		memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
 	spin_unlock_irqrestore(&info->lock, flags);
 
- 	change_params(info);
+	program_hw(info);
 
 	return 0;
 }
@@ -3432,6 +3440,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
 		info->magic = MGSL_MAGIC;
 		INIT_WORK(&info->task, bh_handler);
 		info->max_frame_size = 4096;
+		info->base_clock = 14745600;
 		info->rbuf_fill_level = DMABUFSIZE;
 		info->port.close_delay = 5*HZ/10;
 		info->port.closing_wait = 30*HZ;
@@ -3779,7 +3788,7 @@ static void enable_loopback(struct slgt_info *info)
 static void set_rate(struct slgt_info *info, u32 rate)
 {
 	unsigned int div;
-	static unsigned int osc = 14745600;
+	unsigned int osc = info->base_clock;
 
 	/* div = osc/rate - 1
 	 *
@@ -4083,18 +4092,27 @@ static void async_mode(struct slgt_info *info)
 	 * 06  CTS      IRQ enable
 	 * 05  DCD      IRQ enable
 	 * 04  RI       IRQ enable
-	 * 03  reserved, must be zero
+	 * 03  0=16x sampling, 1=8x sampling
 	 * 02  1=txd->rxd internal loopback enable
 	 * 01  reserved, must be zero
 	 * 00  1=master IRQ enable
 	 */
 	val = BIT15 + BIT14 + BIT0;
+	/* JCR[8] : 1 = x8 async mode feature available */
+	if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate &&
+	    ((info->base_clock < (info->params.data_rate * 16)) ||
+	     (info->base_clock % (info->params.data_rate * 16)))) {
+		/* use 8x sampling */
+		val |= BIT3;
+		set_rate(info, info->params.data_rate * 8);
+	} else {
+		/* use 16x sampling */
+		set_rate(info, info->params.data_rate * 16);
+	}
 	wr_reg16(info, SCR, val);
 
 	slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
 
-	set_rate(info, info->params.data_rate * 16);
-
 	if (info->params.loopback)
 		enable_loopback(info);
 }

+ 0 - 2
drivers/char/tty_audit.c

@@ -10,8 +10,6 @@
  */
 
 #include <linux/audit.h>
-#include <linux/file.h>
-#include <linux/fdtable.h>
 #include <linux/tty.h>
 
 struct tty_audit_buf {

+ 3 - 3
drivers/char/tty_io.c

@@ -1758,7 +1758,7 @@ static int __tty_open(struct inode *inode, struct file *filp)
 	struct tty_driver *driver;
 	int index;
 	dev_t device = inode->i_rdev;
-	unsigned short saved_flags = filp->f_flags;
+	unsigned saved_flags = filp->f_flags;
 
 	nonseekable_open(inode, filp);
 
@@ -2681,7 +2681,7 @@ void __do_SAK(struct tty_struct *tty)
 	/* Kill the entire session */
 	do_each_pid_task(session, PIDTYPE_SID, p) {
 		printk(KERN_NOTICE "SAK: killed process %d"
-			" (%s): task_session_nr(p)==tty->session\n",
+			" (%s): task_session(p)==tty->session\n",
 			task_pid_nr(p), p->comm);
 		send_sig(SIGKILL, p, 1);
 	} while_each_pid_task(session, PIDTYPE_SID, p);
@@ -2691,7 +2691,7 @@ void __do_SAK(struct tty_struct *tty)
 	do_each_thread(g, p) {
 		if (p->signal->tty == tty) {
 			printk(KERN_NOTICE "SAK: killed process %d"
-			    " (%s): task_session_nr(p)==tty->session\n",
+			    " (%s): task_session(p)==tty->session\n",
 			    task_pid_nr(p), p->comm);
 			send_sig(SIGKILL, p, 1);
 			continue;

+ 0 - 1
drivers/char/tty_ldisc.c

@@ -10,7 +10,6 @@
 #include <linux/tty_flip.h>
 #include <linux/devpts_fs.h>
 #include <linux/file.h>
-#include <linux/fdtable.h>
 #include <linux/console.h>
 #include <linux/timer.h>
 #include <linux/ctype.h>

+ 1 - 1
drivers/crypto/hifn_795x.c

@@ -1882,7 +1882,7 @@ static void hifn_clear_rings(struct hifn_device *dev, int error)
 
 static void hifn_work(struct work_struct *work)
 {
-	struct delayed_work *dw = container_of(work, struct delayed_work, work);
+	struct delayed_work *dw = to_delayed_work(work);
 	struct hifn_device *dev = container_of(dw, struct hifn_device, work);
 	unsigned long flags;
 	int reset = 0;

+ 33 - 3
drivers/edac/Kconfig

@@ -1,13 +1,12 @@
 #
 #	EDAC Kconfig
-#	Copyright (c) 2003 Linux Networx
+#	Copyright (c) 2008 Doug Thompson www.softwarebitmaker.com
 #	Licensed and distributed under the GPL
 #
 
 menuconfig EDAC
-	bool "EDAC - error detection and reporting (EXPERIMENTAL)"
+	bool "EDAC - error detection and reporting"
 	depends on HAS_IOMEM
-	depends on EXPERIMENTAL
 	depends on X86 || PPC
 	help
 	  EDAC is designed to report errors in the core system.
@@ -40,6 +39,14 @@ config EDAC_DEBUG
 	  there're four debug levels (x=0,1,2,3 from low to high).
 	  Usually you should select 'N'.
 
+config EDAC_DEBUG_VERBOSE
+	bool "More verbose debugging"
+	depends on EDAC_DEBUG
+	help
+	  This option makes debugging information more verbose.
+	  Source file name and line number where debugging message
+	  printed will be added to debugging message.
+
 config EDAC_MM_EDAC
 	tristate "Main Memory EDAC (Error Detection And Correction) reporting"
 	default y
@@ -174,4 +181,27 @@ config EDAC_CELL
 	  Cell Broadband Engine internal memory controller
 	  on platform without a hypervisor
 
+config EDAC_PPC4XX
+	tristate "PPC4xx IBM DDR2 Memory Controller"
+	depends on EDAC_MM_EDAC && 4xx
+	help
+	  This enables support for EDAC on the ECC memory used
+	  with the IBM DDR2 memory controller found in various
+	  PowerPC 4xx embedded processors such as the 405EX[r],
+	  440SP, 440SPe, 460EX, 460GT and 460SX.
+
+config EDAC_AMD8131
+	tristate "AMD8131 HyperTransport PCI-X Tunnel"
+	depends on EDAC_MM_EDAC && PCI
+	help
+	  Support for error detection and correction on the
+	  AMD8131 HyperTransport PCI-X Tunnel chip.
+
+config EDAC_AMD8111
+	tristate "AMD8111 HyperTransport I/O Hub"
+	depends on EDAC_MM_EDAC && PCI
+	help
+	  Support for error detection and correction on the
+	  AMD8111 HyperTransport I/O Hub chip.
+
 endif # EDAC

+ 1 - 1
drivers/edac/Makefile

@@ -34,4 +34,4 @@ obj-$(CONFIG_EDAC_PASEMI)		+= pasemi_edac.o
 obj-$(CONFIG_EDAC_MPC85XX)		+= mpc85xx_edac.o
 obj-$(CONFIG_EDAC_MV64X60)		+= mv64x60_edac.o
 obj-$(CONFIG_EDAC_CELL)			+= cell_edac.o
-
+obj-$(CONFIG_EDAC_PPC4XX)		+= ppc4xx_edac.o

+ 595 - 0
drivers/edac/amd8111_edac.c

@@ -0,0 +1,595 @@
+/*
+ * amd8111_edac.c, AMD8111 Hyper Transport chip EDAC kernel module
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
+ * 		Benjamin Walsh <benjamin.walsh@windriver.com>
+ * 		Hu Yongqi <yongqi.hu@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/edac.h>
+#include <linux/pci_ids.h>
+#include <asm/io.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+#include "amd8111_edac.h"
+
+#define AMD8111_EDAC_REVISION	" Ver: 1.0.0 " __DATE__
+#define AMD8111_EDAC_MOD_STR	"amd8111_edac"
+
+#define PCI_DEVICE_ID_AMD_8111_PCI	0x7460
+static int edac_dev_idx;
+
+enum amd8111_edac_devs {
+	LPC_BRIDGE = 0,
+};
+
+enum amd8111_edac_pcis {
+	PCI_BRIDGE = 0,
+};
+
+/* Wrapper functions for accessing PCI configuration space */
+static int edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
+{
+	int ret;
+
+	ret = pci_read_config_dword(dev, reg, val32);
+	if (ret != 0)
+		printk(KERN_ERR AMD8111_EDAC_MOD_STR
+			" PCI Access Read Error at 0x%x\n", reg);
+
+	return ret;
+}
+
+static void edac_pci_read_byte(struct pci_dev *dev, int reg, u8 *val8)
+{
+	int ret;
+
+	ret = pci_read_config_byte(dev, reg, val8);
+	if (ret != 0)
+		printk(KERN_ERR AMD8111_EDAC_MOD_STR
+			" PCI Access Read Error at 0x%x\n", reg);
+}
+
+static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
+{
+	int ret;
+
+	ret = pci_write_config_dword(dev, reg, val32);
+	if (ret != 0)
+		printk(KERN_ERR AMD8111_EDAC_MOD_STR
+			" PCI Access Write Error at 0x%x\n", reg);
+}
+
+static void edac_pci_write_byte(struct pci_dev *dev, int reg, u8 val8)
+{
+	int ret;
+
+	ret = pci_write_config_byte(dev, reg, val8);
+	if (ret != 0)
+		printk(KERN_ERR AMD8111_EDAC_MOD_STR
+			" PCI Access Write Error at 0x%x\n", reg);
+}
+
+/*
+ * device-specific methods for amd8111 PCI Bridge Controller
+ *
+ * Error Reporting and Handling for amd8111 chipset could be found
+ * in its datasheet 3.1.2 section, P37
+ */
+static void amd8111_pci_bridge_init(struct amd8111_pci_info *pci_info)
+{
+	u32 val32;
+	struct pci_dev *dev = pci_info->dev;
+
+	/* First clear error detection flags on the host interface */
+
+	/* Clear SSE/SMA/STA flags in the global status register*/
+	edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
+	if (val32 & PCI_STSCMD_CLEAR_MASK)
+		edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
+
+	/* Clear CRC and Link Fail flags in HT Link Control reg */
+	edac_pci_read_dword(dev, REG_HT_LINK, &val32);
+	if (val32 & HT_LINK_CLEAR_MASK)
+		edac_pci_write_dword(dev, REG_HT_LINK, val32);
+
+	/* Second clear all fault on the secondary interface */
+
+	/* Clear error flags in the memory-base limit reg. */
+	edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
+	if (val32 & MEM_LIMIT_CLEAR_MASK)
+		edac_pci_write_dword(dev, REG_MEM_LIM, val32);
+
+	/* Clear Discard Timer Expired flag in Interrupt/Bridge Control reg */
+	edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
+	if (val32 & PCI_INTBRG_CTRL_CLEAR_MASK)
+		edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
+
+	/* Last enable error detections */
+	if (edac_op_state == EDAC_OPSTATE_POLL) {
+		/* Enable System Error reporting in global status register */
+		edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
+		val32 |= PCI_STSCMD_SERREN;
+		edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
+
+		/* Enable CRC Sync flood packets to HyperTransport Link */
+		edac_pci_read_dword(dev, REG_HT_LINK, &val32);
+		val32 |= HT_LINK_CRCFEN;
+		edac_pci_write_dword(dev, REG_HT_LINK, val32);
+
+		/* Enable SSE reporting etc in Interrupt control reg */
+		edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
+		val32 |= PCI_INTBRG_CTRL_POLL_MASK;
+		edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
+	}
+}
+
+static void amd8111_pci_bridge_exit(struct amd8111_pci_info *pci_info)
+{
+	u32 val32;
+	struct pci_dev *dev = pci_info->dev;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL) {
+		/* Disable System Error reporting */
+		edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
+		val32 &= ~PCI_STSCMD_SERREN;
+		edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
+
+		/* Disable CRC flood packets */
+		edac_pci_read_dword(dev, REG_HT_LINK, &val32);
+		val32 &= ~HT_LINK_CRCFEN;
+		edac_pci_write_dword(dev, REG_HT_LINK, val32);
+
+		/* Disable DTSERREN/MARSP/SERREN in Interrupt Control reg */
+		edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
+		val32 &= ~PCI_INTBRG_CTRL_POLL_MASK;
+		edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
+	}
+}
+
+static void amd8111_pci_bridge_check(struct edac_pci_ctl_info *edac_dev)
+{
+	struct amd8111_pci_info *pci_info = edac_dev->pvt_info;
+	struct pci_dev *dev = pci_info->dev;
+	u32 val32;
+
+	/* Check out PCI Bridge Status and Command Register */
+	edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
+	if (val32 & PCI_STSCMD_CLEAR_MASK) {
+		printk(KERN_INFO "Error(s) in PCI bridge status and command"
+			"register on device %s\n", pci_info->ctl_name);
+		printk(KERN_INFO "SSE: %d, RMA: %d, RTA: %d\n",
+			(val32 & PCI_STSCMD_SSE) != 0,
+			(val32 & PCI_STSCMD_RMA) != 0,
+			(val32 & PCI_STSCMD_RTA) != 0);
+
+		val32 |= PCI_STSCMD_CLEAR_MASK;
+		edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check out HyperTransport Link Control Register */
+	edac_pci_read_dword(dev, REG_HT_LINK, &val32);
+	if (val32 & HT_LINK_LKFAIL) {
+		printk(KERN_INFO "Error(s) in hypertransport link control"
+			"register on device %s\n", pci_info->ctl_name);
+		printk(KERN_INFO "LKFAIL: %d\n",
+			(val32 & HT_LINK_LKFAIL) != 0);
+
+		val32 |= HT_LINK_LKFAIL;
+		edac_pci_write_dword(dev, REG_HT_LINK, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check out PCI Interrupt and Bridge Control Register */
+	edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
+	if (val32 & PCI_INTBRG_CTRL_DTSTAT) {
+		printk(KERN_INFO "Error(s) in PCI interrupt and bridge control"
+			"register on device %s\n", pci_info->ctl_name);
+		printk(KERN_INFO "DTSTAT: %d\n",
+			(val32 & PCI_INTBRG_CTRL_DTSTAT) != 0);
+
+		val32 |= PCI_INTBRG_CTRL_DTSTAT;
+		edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check out PCI Bridge Memory Base-Limit Register */
+	edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
+	if (val32 & MEM_LIMIT_CLEAR_MASK) {
+		printk(KERN_INFO
+			"Error(s) in mem limit register on %s device\n",
+			pci_info->ctl_name);
+		printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
+			"RTA: %d, STA: %d, MDPE: %d\n",
+			(val32 & MEM_LIMIT_DPE)  != 0,
+			(val32 & MEM_LIMIT_RSE)  != 0,
+			(val32 & MEM_LIMIT_RMA)  != 0,
+			(val32 & MEM_LIMIT_RTA)  != 0,
+			(val32 & MEM_LIMIT_STA)  != 0,
+			(val32 & MEM_LIMIT_MDPE) != 0);
+
+		val32 |= MEM_LIMIT_CLEAR_MASK;
+		edac_pci_write_dword(dev, REG_MEM_LIM, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+}
+
+static struct resource *legacy_io_res;
+static int at_compat_reg_broken;
+#define LEGACY_NR_PORTS	1
+
+/* device-specific methods for amd8111 LPC Bridge device */
+static void amd8111_lpc_bridge_init(struct amd8111_dev_info *dev_info)
+{
+	u8 val8;
+	struct pci_dev *dev = dev_info->dev;
+
+	/* First clear REG_AT_COMPAT[SERR, IOCHK] if necessary */
+	legacy_io_res = request_region(REG_AT_COMPAT, LEGACY_NR_PORTS,
+					AMD8111_EDAC_MOD_STR);
+	if (!legacy_io_res)
+		printk(KERN_INFO "%s: failed to request legacy I/O region "
+			"start %d, len %d\n", __func__,
+			REG_AT_COMPAT, LEGACY_NR_PORTS);
+	else {
+		val8 = __do_inb(REG_AT_COMPAT);
+		if (val8 == 0xff) { /* buggy port */
+			printk(KERN_INFO "%s: port %d is buggy, not supported"
+				" by hardware?\n", __func__, REG_AT_COMPAT);
+			at_compat_reg_broken = 1;
+			release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
+			legacy_io_res = NULL;
+		} else {
+			u8 out8 = 0;
+			if (val8 & AT_COMPAT_SERR)
+				out8 = AT_COMPAT_CLRSERR;
+			if (val8 & AT_COMPAT_IOCHK)
+				out8 |= AT_COMPAT_CLRIOCHK;
+			if (out8 > 0)
+				__do_outb(out8, REG_AT_COMPAT);
+		}
+	}
+
+	/* Second clear error flags on LPC bridge */
+	edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
+	if (val8 & IO_CTRL_1_CLEAR_MASK)
+		edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
+}
+
+static void amd8111_lpc_bridge_exit(struct amd8111_dev_info *dev_info)
+{
+	if (legacy_io_res)
+		release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
+}
+
+static void amd8111_lpc_bridge_check(struct edac_device_ctl_info *edac_dev)
+{
+	struct amd8111_dev_info *dev_info = edac_dev->pvt_info;
+	struct pci_dev *dev = dev_info->dev;
+	u8 val8;
+
+	edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
+	if (val8 & IO_CTRL_1_CLEAR_MASK) {
+		printk(KERN_INFO
+			"Error(s) in IO control register on %s device\n",
+			dev_info->ctl_name);
+		printk(KERN_INFO "LPC ERR: %d, PW2LPC: %d\n",
+			(val8 & IO_CTRL_1_LPC_ERR) != 0,
+			(val8 & IO_CTRL_1_PW2LPC) != 0);
+
+		val8 |= IO_CTRL_1_CLEAR_MASK;
+		edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
+
+		edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+	}
+
+	if (at_compat_reg_broken == 0) {
+		u8 out8 = 0;
+		val8 = __do_inb(REG_AT_COMPAT);
+		if (val8 & AT_COMPAT_SERR)
+			out8 = AT_COMPAT_CLRSERR;
+		if (val8 & AT_COMPAT_IOCHK)
+			out8 |= AT_COMPAT_CLRIOCHK;
+		if (out8 > 0) {
+			__do_outb(out8, REG_AT_COMPAT);
+			edac_device_handle_ue(edac_dev, 0, 0,
+						edac_dev->ctl_name);
+		}
+	}
+}
+
+/* General devices represented by edac_device_ctl_info */
+static struct amd8111_dev_info amd8111_devices[] = {
+	[LPC_BRIDGE] = {
+		.err_dev = PCI_DEVICE_ID_AMD_8111_LPC,
+		.ctl_name = "lpc",
+		.init = amd8111_lpc_bridge_init,
+		.exit = amd8111_lpc_bridge_exit,
+		.check = amd8111_lpc_bridge_check,
+	},
+	{0},
+};
+
+/* PCI controllers represented by edac_pci_ctl_info */
+static struct amd8111_pci_info amd8111_pcis[] = {
+	[PCI_BRIDGE] = {
+		.err_dev = PCI_DEVICE_ID_AMD_8111_PCI,
+		.ctl_name = "AMD8111_PCI_Controller",
+		.init = amd8111_pci_bridge_init,
+		.exit = amd8111_pci_bridge_exit,
+		.check = amd8111_pci_bridge_check,
+	},
+	{0},
+};
+
+static int amd8111_dev_probe(struct pci_dev *dev,
+				const struct pci_device_id *id)
+{
+	struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data];
+
+	dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
+					dev_info->err_dev, NULL);
+
+	if (!dev_info->dev) {
+		printk(KERN_ERR "EDAC device not found:"
+			"vendor %x, device %x, name %s\n",
+			PCI_VENDOR_ID_AMD, dev_info->err_dev,
+			dev_info->ctl_name);
+		return -ENODEV;
+	}
+
+	if (pci_enable_device(dev_info->dev)) {
+		pci_dev_put(dev_info->dev);
+		printk(KERN_ERR "failed to enable:"
+			"vendor %x, device %x, name %s\n",
+			PCI_VENDOR_ID_AMD, dev_info->err_dev,
+			dev_info->ctl_name);
+		return -ENODEV;
+	}
+
+	/*
+	 * we do not allocate extra private structure for
+	 * edac_device_ctl_info, but make use of existing
+	 * one instead.
+	*/
+	dev_info->edac_idx = edac_dev_idx++;
+	dev_info->edac_dev =
+		edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
+					   NULL, 0, 0,
+					   NULL, 0, dev_info->edac_idx);
+	if (!dev_info->edac_dev)
+		return -ENOMEM;
+
+	dev_info->edac_dev->pvt_info = dev_info;
+	dev_info->edac_dev->dev = &dev_info->dev->dev;
+	dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
+	dev_info->edac_dev->ctl_name = dev_info->ctl_name;
+	dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		dev_info->edac_dev->edac_check = dev_info->check;
+
+	if (dev_info->init)
+		dev_info->init(dev_info);
+
+	if (edac_device_add_device(dev_info->edac_dev) > 0) {
+		printk(KERN_ERR "failed to add edac_dev for %s\n",
+			dev_info->ctl_name);
+		edac_device_free_ctl_info(dev_info->edac_dev);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO "added one edac_dev on AMD8111 "
+		"vendor %x, device %x, name %s\n",
+		PCI_VENDOR_ID_AMD, dev_info->err_dev,
+		dev_info->ctl_name);
+
+	return 0;
+}
+
+static void amd8111_dev_remove(struct pci_dev *dev)
+{
+	struct amd8111_dev_info *dev_info;
+
+	for (dev_info = amd8111_devices; dev_info->err_dev; dev_info++)
+		if (dev_info->dev->device == dev->device)
+			break;
+
+	if (!dev_info->err_dev)	/* should never happen */
+		return;
+
+	if (dev_info->edac_dev) {
+		edac_device_del_device(dev_info->edac_dev->dev);
+		edac_device_free_ctl_info(dev_info->edac_dev);
+	}
+
+	if (dev_info->exit)
+		dev_info->exit(dev_info);
+
+	pci_dev_put(dev_info->dev);
+}
+
+static int amd8111_pci_probe(struct pci_dev *dev,
+				const struct pci_device_id *id)
+{
+	struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data];
+
+	pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
+					pci_info->err_dev, NULL);
+
+	if (!pci_info->dev) {
+		printk(KERN_ERR "EDAC device not found:"
+			"vendor %x, device %x, name %s\n",
+			PCI_VENDOR_ID_AMD, pci_info->err_dev,
+			pci_info->ctl_name);
+		return -ENODEV;
+	}
+
+	if (pci_enable_device(pci_info->dev)) {
+		pci_dev_put(pci_info->dev);
+		printk(KERN_ERR "failed to enable:"
+			"vendor %x, device %x, name %s\n",
+			PCI_VENDOR_ID_AMD, pci_info->err_dev,
+			pci_info->ctl_name);
+		return -ENODEV;
+	}
+
+	/*
+	 * we do not allocate extra private structure for
+	 * edac_pci_ctl_info, but make use of existing
+	 * one instead.
+	*/
+	pci_info->edac_idx = edac_pci_alloc_index();
+	pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name);
+	if (!pci_info->edac_dev)
+		return -ENOMEM;
+
+	pci_info->edac_dev->pvt_info = pci_info;
+	pci_info->edac_dev->dev = &pci_info->dev->dev;
+	pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
+	pci_info->edac_dev->ctl_name = pci_info->ctl_name;
+	pci_info->edac_dev->dev_name = pci_info->dev->dev.bus_id;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		pci_info->edac_dev->edac_check = pci_info->check;
+
+	if (pci_info->init)
+		pci_info->init(pci_info);
+
+	if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) {
+		printk(KERN_ERR "failed to add edac_pci for %s\n",
+			pci_info->ctl_name);
+		edac_pci_free_ctl_info(pci_info->edac_dev);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO "added one edac_pci on AMD8111 "
+		"vendor %x, device %x, name %s\n",
+		PCI_VENDOR_ID_AMD, pci_info->err_dev,
+		pci_info->ctl_name);
+
+	return 0;
+}
+
+static void amd8111_pci_remove(struct pci_dev *dev)
+{
+	struct amd8111_pci_info *pci_info;
+
+	for (pci_info = amd8111_pcis; pci_info->err_dev; pci_info++)
+		if (pci_info->dev->device == dev->device)
+			break;
+
+	if (!pci_info->err_dev)	/* should never happen */
+		return;
+
+	if (pci_info->edac_dev) {
+		edac_pci_del_device(pci_info->edac_dev->dev);
+		edac_pci_free_ctl_info(pci_info->edac_dev);
+	}
+
+	if (pci_info->exit)
+		pci_info->exit(pci_info);
+
+	pci_dev_put(pci_info->dev);
+}
+
+/* PCI Device ID talbe for general EDAC device */
+static const struct pci_device_id amd8111_edac_dev_tbl[] = {
+	{
+	PCI_VEND_DEV(AMD, 8111_LPC),
+	.subvendor = PCI_ANY_ID,
+	.subdevice = PCI_ANY_ID,
+	.class = 0,
+	.class_mask = 0,
+	.driver_data = LPC_BRIDGE,
+	},
+	{
+	0,
+	}			/* table is NULL-terminated */
+};
+MODULE_DEVICE_TABLE(pci, amd8111_edac_dev_tbl);
+
+static struct pci_driver amd8111_edac_dev_driver = {
+	.name = "AMD8111_EDAC_DEV",
+	.probe = amd8111_dev_probe,
+	.remove = amd8111_dev_remove,
+	.id_table = amd8111_edac_dev_tbl,
+};
+
+/* PCI Device ID table for EDAC PCI controller */
+static const struct pci_device_id amd8111_edac_pci_tbl[] = {
+	{
+	PCI_VEND_DEV(AMD, 8111_PCI),
+	.subvendor = PCI_ANY_ID,
+	.subdevice = PCI_ANY_ID,
+	.class = 0,
+	.class_mask = 0,
+	.driver_data = PCI_BRIDGE,
+	},
+	{
+	0,
+	}			/* table is NULL-terminated */
+};
+MODULE_DEVICE_TABLE(pci, amd8111_edac_pci_tbl);
+
+static struct pci_driver amd8111_edac_pci_driver = {
+	.name = "AMD8111_EDAC_PCI",
+	.probe = amd8111_pci_probe,
+	.remove = amd8111_pci_remove,
+	.id_table = amd8111_edac_pci_tbl,
+};
+
+static int __init amd8111_edac_init(void)
+{
+	int val;
+
+	printk(KERN_INFO "AMD8111 EDAC driver "	AMD8111_EDAC_REVISION "\n");
+	printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
+
+	/* Only POLL mode supported so far */
+	edac_op_state = EDAC_OPSTATE_POLL;
+
+	val = pci_register_driver(&amd8111_edac_dev_driver);
+	val |= pci_register_driver(&amd8111_edac_pci_driver);
+
+	return val;
+}
+
+static void __exit amd8111_edac_exit(void)
+{
+	pci_unregister_driver(&amd8111_edac_pci_driver);
+	pci_unregister_driver(&amd8111_edac_dev_driver);
+}
+
+
+module_init(amd8111_edac_init);
+module_exit(amd8111_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
+MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");

+ 130 - 0
drivers/edac/amd8111_edac.h

@@ -0,0 +1,130 @@
+/*
+ * amd8111_edac.h, EDAC defs for AMD8111 hypertransport chip
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
+ * 		Benjamin Walsh <benjamin.walsh@windriver.com>
+ * 		Hu Yongqi <yongqi.hu@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AMD8111_EDAC_H_
+#define _AMD8111_EDAC_H_
+
+/************************************************************
+ *	PCI Bridge Status and Command Register, DevA:0x04
+ ************************************************************/
+#define REG_PCI_STSCMD	0x04
+enum pci_stscmd_bits {
+	PCI_STSCMD_SSE		= BIT(30),
+	PCI_STSCMD_RMA		= BIT(29),
+	PCI_STSCMD_RTA		= BIT(28),
+	PCI_STSCMD_SERREN	= BIT(8),
+	PCI_STSCMD_CLEAR_MASK	= (PCI_STSCMD_SSE |
+				   PCI_STSCMD_RMA |
+				   PCI_STSCMD_RTA)
+};
+
+/************************************************************
+ *	PCI Bridge Memory Base-Limit Register, DevA:0x1c
+ ************************************************************/
+#define REG_MEM_LIM     0x1c
+enum mem_limit_bits {
+	MEM_LIMIT_DPE   = BIT(31),
+	MEM_LIMIT_RSE   = BIT(30),
+	MEM_LIMIT_RMA   = BIT(29),
+	MEM_LIMIT_RTA   = BIT(28),
+	MEM_LIMIT_STA   = BIT(27),
+	MEM_LIMIT_MDPE  = BIT(24),
+	MEM_LIMIT_CLEAR_MASK  = (MEM_LIMIT_DPE |
+				 MEM_LIMIT_RSE |
+				 MEM_LIMIT_RMA |
+				 MEM_LIMIT_RTA |
+				 MEM_LIMIT_STA |
+				 MEM_LIMIT_MDPE)
+};
+
+/************************************************************
+ *	HyperTransport Link Control Register, DevA:0xc4
+ ************************************************************/
+#define REG_HT_LINK	0xc4
+enum ht_link_bits {
+	HT_LINK_LKFAIL	= BIT(4),
+	HT_LINK_CRCFEN	= BIT(1),
+	HT_LINK_CLEAR_MASK = (HT_LINK_LKFAIL)
+};
+
+/************************************************************
+ *	PCI Bridge Interrupt and Bridge Control, DevA:0x3c
+ ************************************************************/
+#define REG_PCI_INTBRG_CTRL	0x3c
+enum pci_intbrg_ctrl_bits {
+	PCI_INTBRG_CTRL_DTSERREN	= BIT(27),
+	PCI_INTBRG_CTRL_DTSTAT		= BIT(26),
+	PCI_INTBRG_CTRL_MARSP		= BIT(21),
+	PCI_INTBRG_CTRL_SERREN		= BIT(17),
+	PCI_INTBRG_CTRL_PEREN		= BIT(16),
+	PCI_INTBRG_CTRL_CLEAR_MASK	= (PCI_INTBRG_CTRL_DTSTAT),
+	PCI_INTBRG_CTRL_POLL_MASK	= (PCI_INTBRG_CTRL_DTSERREN |
+					   PCI_INTBRG_CTRL_MARSP |
+					   PCI_INTBRG_CTRL_SERREN)
+};
+
+/************************************************************
+ *		I/O Control 1 Register, DevB:0x40
+ ************************************************************/
+#define REG_IO_CTRL_1 0x40
+enum io_ctrl_1_bits {
+	IO_CTRL_1_NMIONERR	= BIT(7),
+	IO_CTRL_1_LPC_ERR	= BIT(6),
+	IO_CTRL_1_PW2LPC	= BIT(1),
+	IO_CTRL_1_CLEAR_MASK	= (IO_CTRL_1_LPC_ERR | IO_CTRL_1_PW2LPC)
+};
+
+/************************************************************
+ *		Legacy I/O Space Registers
+ ************************************************************/
+#define REG_AT_COMPAT 0x61
+enum at_compat_bits {
+	AT_COMPAT_SERR		= BIT(7),
+	AT_COMPAT_IOCHK		= BIT(6),
+	AT_COMPAT_CLRIOCHK	= BIT(3),
+	AT_COMPAT_CLRSERR	= BIT(2),
+};
+
+struct amd8111_dev_info {
+	u16 err_dev;	/* PCI Device ID */
+	struct pci_dev *dev;
+	int edac_idx;	/* device index */
+	char *ctl_name;
+	struct edac_device_ctl_info *edac_dev;
+	void (*init)(struct amd8111_dev_info *dev_info);
+	void (*exit)(struct amd8111_dev_info *dev_info);
+	void (*check)(struct edac_device_ctl_info *edac_dev);
+};
+
+struct amd8111_pci_info {
+	u16 err_dev;	/* PCI Device ID */
+	struct pci_dev *dev;
+	int edac_idx;	/* pci index */
+	const char *ctl_name;
+	struct edac_pci_ctl_info *edac_dev;
+	void (*init)(struct amd8111_pci_info *dev_info);
+	void (*exit)(struct amd8111_pci_info *dev_info);
+	void (*check)(struct edac_pci_ctl_info *edac_dev);
+};
+
+#endif /* _AMD8111_EDAC_H_ */

+ 379 - 0
drivers/edac/amd8131_edac.c

@@ -0,0 +1,379 @@
+/*
+ * amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
+ * 		Benjamin Walsh <benjamin.walsh@windriver.com>
+ * 		Hu Yongqi <yongqi.hu@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/edac.h>
+#include <linux/pci_ids.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+#include "amd8131_edac.h"
+
+#define AMD8131_EDAC_REVISION	" Ver: 1.0.0 " __DATE__
+#define AMD8131_EDAC_MOD_STR	"amd8131_edac"
+
+/* Wrapper functions for accessing PCI configuration space */
+static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
+{
+	int ret;
+
+	ret = pci_read_config_dword(dev, reg, val32);
+	if (ret != 0)
+		printk(KERN_ERR AMD8131_EDAC_MOD_STR
+			" PCI Access Read Error at 0x%x\n", reg);
+}
+
+static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
+{
+	int ret;
+
+	ret = pci_write_config_dword(dev, reg, val32);
+	if (ret != 0)
+		printk(KERN_ERR AMD8131_EDAC_MOD_STR
+			" PCI Access Write Error at 0x%x\n", reg);
+}
+
+static char * const bridge_str[] = {
+	[NORTH_A] = "NORTH A",
+	[NORTH_B] = "NORTH B",
+	[SOUTH_A] = "SOUTH A",
+	[SOUTH_B] = "SOUTH B",
+	[NO_BRIDGE] = "NO BRIDGE",
+};
+
+/* Support up to two AMD8131 chipsets on a platform */
+static struct amd8131_dev_info amd8131_devices[] = {
+	{
+	.inst = NORTH_A,
+	.devfn = DEVFN_PCIX_BRIDGE_NORTH_A,
+	.ctl_name = "AMD8131_PCIX_NORTH_A",
+	},
+	{
+	.inst = NORTH_B,
+	.devfn = DEVFN_PCIX_BRIDGE_NORTH_B,
+	.ctl_name = "AMD8131_PCIX_NORTH_B",
+	},
+	{
+	.inst = SOUTH_A,
+	.devfn = DEVFN_PCIX_BRIDGE_SOUTH_A,
+	.ctl_name = "AMD8131_PCIX_SOUTH_A",
+	},
+	{
+	.inst = SOUTH_B,
+	.devfn = DEVFN_PCIX_BRIDGE_SOUTH_B,
+	.ctl_name = "AMD8131_PCIX_SOUTH_B",
+	},
+	{.inst = NO_BRIDGE,},
+};
+
+static void amd8131_pcix_init(struct amd8131_dev_info *dev_info)
+{
+	u32 val32;
+	struct pci_dev *dev = dev_info->dev;
+
+	/* First clear error detection flags */
+	edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
+	if (val32 & MEM_LIMIT_MASK)
+		edac_pci_write_dword(dev, REG_MEM_LIM, val32);
+
+	/* Clear Discard Timer Timedout flag */
+	edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
+	if (val32 & INT_CTLR_DTS)
+		edac_pci_write_dword(dev, REG_INT_CTLR, val32);
+
+	/* Clear CRC Error flag on link side A */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
+	if (val32 & LNK_CTRL_CRCERR_A)
+		edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
+
+	/* Clear CRC Error flag on link side B */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
+	if (val32 & LNK_CTRL_CRCERR_B)
+		edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
+
+	/*
+	 * Then enable all error detections.
+	 *
+	 * Setup Discard Timer Sync Flood Enable,
+	 * System Error Enable and Parity Error Enable.
+	 */
+	edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
+	val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE;
+	edac_pci_write_dword(dev, REG_INT_CTLR, val32);
+
+	/* Enable overall SERR Error detection */
+	edac_pci_read_dword(dev, REG_STS_CMD, &val32);
+	val32 |= STS_CMD_SERREN;
+	edac_pci_write_dword(dev, REG_STS_CMD, val32);
+
+	/* Setup CRC Flood Enable for link side A */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
+	val32 |= LNK_CTRL_CRCFEN;
+	edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
+
+	/* Setup CRC Flood Enable for link side B */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
+	val32 |= LNK_CTRL_CRCFEN;
+	edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
+}
+
+static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info)
+{
+	u32 val32;
+	struct pci_dev *dev = dev_info->dev;
+
+	/* Disable SERR, PERR and DTSE Error detection */
+	edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
+	val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE);
+	edac_pci_write_dword(dev, REG_INT_CTLR, val32);
+
+	/* Disable overall System Error detection */
+	edac_pci_read_dword(dev, REG_STS_CMD, &val32);
+	val32 &= ~STS_CMD_SERREN;
+	edac_pci_write_dword(dev, REG_STS_CMD, val32);
+
+	/* Disable CRC Sync Flood on link side A */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
+	val32 &= ~LNK_CTRL_CRCFEN;
+	edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
+
+	/* Disable CRC Sync Flood on link side B */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
+	val32 &= ~LNK_CTRL_CRCFEN;
+	edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
+}
+
+static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev)
+{
+	struct amd8131_dev_info *dev_info = edac_dev->pvt_info;
+	struct pci_dev *dev = dev_info->dev;
+	u32 val32;
+
+	/* Check PCI-X Bridge Memory Base-Limit Register for errors */
+	edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
+	if (val32 & MEM_LIMIT_MASK) {
+		printk(KERN_INFO "Error(s) in mem limit register "
+			"on %s bridge\n", dev_info->ctl_name);
+		printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
+			"RTA: %d, STA: %d, MDPE: %d\n",
+			val32 & MEM_LIMIT_DPE,
+			val32 & MEM_LIMIT_RSE,
+			val32 & MEM_LIMIT_RMA,
+			val32 & MEM_LIMIT_RTA,
+			val32 & MEM_LIMIT_STA,
+			val32 & MEM_LIMIT_MDPE);
+
+		val32 |= MEM_LIMIT_MASK;
+		edac_pci_write_dword(dev, REG_MEM_LIM, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check if Discard Timer timed out */
+	edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
+	if (val32 & INT_CTLR_DTS) {
+		printk(KERN_INFO "Error(s) in interrupt and control register "
+			"on %s bridge\n", dev_info->ctl_name);
+		printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS);
+
+		val32 |= INT_CTLR_DTS;
+		edac_pci_write_dword(dev, REG_INT_CTLR, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check if CRC error happens on link side A */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
+	if (val32 & LNK_CTRL_CRCERR_A) {
+		printk(KERN_INFO "Error(s) in link conf and control register "
+			"on %s bridge\n", dev_info->ctl_name);
+		printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A);
+
+		val32 |= LNK_CTRL_CRCERR_A;
+		edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+
+	/* Check if CRC error happens on link side B */
+	edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
+	if (val32 & LNK_CTRL_CRCERR_B) {
+		printk(KERN_INFO "Error(s) in link conf and control register "
+			"on %s bridge\n", dev_info->ctl_name);
+		printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B);
+
+		val32 |= LNK_CTRL_CRCERR_B;
+		edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
+
+		edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
+	}
+}
+
+static struct amd8131_info amd8131_chipset = {
+	.err_dev = PCI_DEVICE_ID_AMD_8131_APIC,
+	.devices = amd8131_devices,
+	.init = amd8131_pcix_init,
+	.exit = amd8131_pcix_exit,
+	.check = amd8131_pcix_check,
+};
+
+/*
+ * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID,
+ * so amd8131_probe() would be called by kernel 4 times, with different
+ * address of pci_dev for each of them each time.
+ */
+static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	struct amd8131_dev_info *dev_info;
+
+	for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
+		dev_info++)
+		if (dev_info->devfn == dev->devfn)
+			break;
+
+	if (dev_info->inst == NO_BRIDGE) /* should never happen */
+		return -ENODEV;
+
+	/*
+	 * We can't call pci_get_device() as we are used to do because
+	 * there are 4 of them but pci_dev_get() instead.
+	 */
+	dev_info->dev = pci_dev_get(dev);
+
+	if (pci_enable_device(dev_info->dev)) {
+		pci_dev_put(dev_info->dev);
+		printk(KERN_ERR "failed to enable:"
+			"vendor %x, device %x, devfn %x, name %s\n",
+			PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
+			dev_info->devfn, dev_info->ctl_name);
+		return -ENODEV;
+	}
+
+	/*
+	 * we do not allocate extra private structure for
+	 * edac_pci_ctl_info, but make use of existing
+	 * one instead.
+	 */
+	dev_info->edac_idx = edac_pci_alloc_index();
+	dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
+	if (!dev_info->edac_dev)
+		return -ENOMEM;
+
+	dev_info->edac_dev->pvt_info = dev_info;
+	dev_info->edac_dev->dev = &dev_info->dev->dev;
+	dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
+	dev_info->edac_dev->ctl_name = dev_info->ctl_name;
+	dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id;
+
+	if (edac_op_state == EDAC_OPSTATE_POLL)
+		dev_info->edac_dev->edac_check = amd8131_chipset.check;
+
+	if (amd8131_chipset.init)
+		amd8131_chipset.init(dev_info);
+
+	if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
+		printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
+			dev_info->ctl_name);
+		edac_pci_free_ctl_info(dev_info->edac_dev);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO "added one device on AMD8131 "
+		"vendor %x, device %x, devfn %x, name %s\n",
+		PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
+		dev_info->devfn, dev_info->ctl_name);
+
+	return 0;
+}
+
+static void amd8131_remove(struct pci_dev *dev)
+{
+	struct amd8131_dev_info *dev_info;
+
+	for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
+		dev_info++)
+		if (dev_info->devfn == dev->devfn)
+			break;
+
+	if (dev_info->inst == NO_BRIDGE) /* should never happen */
+		return;
+
+	if (dev_info->edac_dev) {
+		edac_pci_del_device(dev_info->edac_dev->dev);
+		edac_pci_free_ctl_info(dev_info->edac_dev);
+	}
+
+	if (amd8131_chipset.exit)
+		amd8131_chipset.exit(dev_info);
+
+	pci_dev_put(dev_info->dev);
+}
+
+static const struct pci_device_id amd8131_edac_pci_tbl[] = {
+	{
+	PCI_VEND_DEV(AMD, 8131_BRIDGE),
+	.subvendor = PCI_ANY_ID,
+	.subdevice = PCI_ANY_ID,
+	.class = 0,
+	.class_mask = 0,
+	.driver_data = 0,
+	},
+	{
+	0,
+	}			/* table is NULL-terminated */
+};
+MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl);
+
+static struct pci_driver amd8131_edac_driver = {
+	.name = AMD8131_EDAC_MOD_STR,
+	.probe = amd8131_probe,
+	.remove = amd8131_remove,
+	.id_table = amd8131_edac_pci_tbl,
+};
+
+static int __init amd8131_edac_init(void)
+{
+	printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n");
+	printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
+
+	/* Only POLL mode supported so far */
+	edac_op_state = EDAC_OPSTATE_POLL;
+
+	return pci_register_driver(&amd8131_edac_driver);
+}
+
+static void __exit amd8131_edac_exit(void)
+{
+	pci_unregister_driver(&amd8131_edac_driver);
+}
+
+module_init(amd8131_edac_init);
+module_exit(amd8131_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
+MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");

+ 119 - 0
drivers/edac/amd8131_edac.h

@@ -0,0 +1,119 @@
+/*
+ * amd8131_edac.h, EDAC defs for AMD8131 hypertransport chip
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
+ * 		Benjamin Walsh <benjamin.walsh@windriver.com>
+ * 		Hu Yongqi <yongqi.hu@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AMD8131_EDAC_H_
+#define _AMD8131_EDAC_H_
+
+#define DEVFN_PCIX_BRIDGE_NORTH_A	8
+#define DEVFN_PCIX_BRIDGE_NORTH_B	16
+#define DEVFN_PCIX_BRIDGE_SOUTH_A	24
+#define DEVFN_PCIX_BRIDGE_SOUTH_B	32
+
+/************************************************************
+ *	PCI-X Bridge Status and Command Register, DevA:0x04
+ ************************************************************/
+#define REG_STS_CMD	0x04
+enum sts_cmd_bits {
+	STS_CMD_SSE	= BIT(30),
+	STS_CMD_SERREN	= BIT(8)
+};
+
+/************************************************************
+ *	PCI-X Bridge Interrupt and Bridge Control Register,
+ ************************************************************/
+#define REG_INT_CTLR	0x3c
+enum int_ctlr_bits {
+	INT_CTLR_DTSE	= BIT(27),
+	INT_CTLR_DTS	= BIT(26),
+	INT_CTLR_SERR	= BIT(17),
+	INT_CTLR_PERR	= BIT(16)
+};
+
+/************************************************************
+ *	PCI-X Bridge Memory Base-Limit Register, DevA:0x1C
+ ************************************************************/
+#define REG_MEM_LIM	0x1c
+enum mem_limit_bits {
+	MEM_LIMIT_DPE 	= BIT(31),
+	MEM_LIMIT_RSE 	= BIT(30),
+	MEM_LIMIT_RMA 	= BIT(29),
+	MEM_LIMIT_RTA 	= BIT(28),
+	MEM_LIMIT_STA	= BIT(27),
+	MEM_LIMIT_MDPE	= BIT(24),
+	MEM_LIMIT_MASK	= MEM_LIMIT_DPE|MEM_LIMIT_RSE|MEM_LIMIT_RMA|
+				MEM_LIMIT_RTA|MEM_LIMIT_STA|MEM_LIMIT_MDPE
+};
+
+/************************************************************
+ *	Link Configuration And Control Register, side A
+ ************************************************************/
+#define REG_LNK_CTRL_A	0xc4
+
+/************************************************************
+ *	Link Configuration And Control Register, side B
+ ************************************************************/
+#define REG_LNK_CTRL_B  0xc8
+
+enum lnk_ctrl_bits {
+	LNK_CTRL_CRCERR_A	= BIT(9),
+	LNK_CTRL_CRCERR_B	= BIT(8),
+	LNK_CTRL_CRCFEN		= BIT(1)
+};
+
+enum pcix_bridge_inst {
+	NORTH_A = 0,
+	NORTH_B = 1,
+	SOUTH_A = 2,
+	SOUTH_B = 3,
+	NO_BRIDGE = 4
+};
+
+struct amd8131_dev_info {
+	int devfn;
+	enum pcix_bridge_inst inst;
+	struct pci_dev *dev;
+	int edac_idx;	/* pci device index */
+	char *ctl_name;
+	struct edac_pci_ctl_info *edac_dev;
+};
+
+/*
+ * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
+ * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are
+ * four PCIX Bridges on ATCA-6101 altogether.
+ *
+ * These PCIX Bridges share the same PCI Device ID and are all of
+ * Function Zero, they could be discrimated by their pci_dev->devfn.
+ * They share the same set of init/check/exit methods, and their
+ * private structures are collected in the devices[] array.
+ */
+struct amd8131_info {
+	u16 err_dev;	/* PCI Device ID for AMD8131 APIC*/
+	struct amd8131_dev_info *devices;
+	void (*init)(struct amd8131_dev_info *dev_info);
+	void (*exit)(struct amd8131_dev_info *dev_info);
+	void (*check)(struct edac_pci_ctl_info *edac_dev);
+};
+
+#endif /* _AMD8131_EDAC_H_ */
+

+ 15 - 1
drivers/edac/edac_core.h

@@ -49,6 +49,10 @@
 #define edac_printk(level, prefix, fmt, arg...) \
 	printk(level "EDAC " prefix ": " fmt, ##arg)
 
+#define edac_printk_verbose(level, prefix, fmt, arg...) \
+	printk(level "EDAC " prefix ": " "in %s, line at %d: " fmt,	\
+	       __FILE__, __LINE__, ##arg)
+
 #define edac_mc_printk(mci, level, fmt, arg...) \
 	printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
 
@@ -71,11 +75,20 @@
 #ifdef CONFIG_EDAC_DEBUG
 extern int edac_debug_level;
 
+#ifndef CONFIG_EDAC_DEBUG_VERBOSE
 #define edac_debug_printk(level, fmt, arg...)                            \
 	do {                                                             \
 		if (level <= edac_debug_level)                           \
 			edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
-	} while(0)
+	} while (0)
+#else  /* CONFIG_EDAC_DEBUG_VERBOSE */
+#define edac_debug_printk(level, fmt, arg...)                            \
+	do {                                                             \
+		if (level <= edac_debug_level)                           \
+			edac_printk_verbose(KERN_DEBUG, EDAC_DEBUG, fmt, \
+					    ##arg);			\
+	} while (0)
+#endif
 
 #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
 #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
@@ -831,6 +844,7 @@ extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
 extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
 				unsigned long value);
 
+extern int edac_pci_alloc_index(void);
 extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
 extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
 

+ 14 - 0
drivers/edac/edac_pci.c

@@ -30,6 +30,7 @@
 
 static DEFINE_MUTEX(edac_pci_ctls_mutex);
 static LIST_HEAD(edac_pci_list);
+static atomic_t pci_indexes = ATOMIC_INIT(0);
 
 /*
  * edac_pci_alloc_ctl_info
@@ -317,6 +318,19 @@ void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
 }
 EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
 
+/*
+ * edac_pci_alloc_index: Allocate a unique PCI index number
+ *
+ * Return:
+ *      allocated index number
+ *
+ */
+int edac_pci_alloc_index(void)
+{
+	return atomic_inc_return(&pci_indexes) - 1;
+}
+EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
+
 /*
  * edac_pci_add_device: Insert the 'edac_dev' structure into the
  * edac_pci global list and create sysfs entries associated with

+ 1448 - 0
drivers/edac/ppc4xx_edac.c

@@ -0,0 +1,1448 @@
+/*
+ * Copyright (c) 2008 Nuovation System Designs, LLC
+ *   Grant Erickson <gerickson@nuovations.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/types.h>
+
+#include <asm/dcr.h>
+
+#include "edac_core.h"
+#include "ppc4xx_edac.h"
+
+/*
+ * This file implements a driver for monitoring and handling events
+ * associated with the IMB DDR2 ECC controller found in the AMCC/IBM
+ * 405EX[r], 440SP, 440SPe, 460EX, 460GT and 460SX.
+ *
+ * As realized in the 405EX[r], this controller features:
+ *
+ *   - Support for registered- and non-registered DDR1 and DDR2 memory.
+ *   - 32-bit or 16-bit memory interface with optional ECC.
+ *
+ *     o ECC support includes:
+ *
+ *       - 4-bit SEC/DED
+ *       - Aligned-nibble error detect
+ *       - Bypass mode
+ *
+ *   - Two (2) memory banks/ranks.
+ *   - Up to 1 GiB per bank/rank in 32-bit mode and up to 512 MiB per
+ *     bank/rank in 16-bit mode.
+ *
+ * As realized in the 440SP and 440SPe, this controller changes/adds:
+ *
+ *   - 64-bit or 32-bit memory interface with optional ECC.
+ *
+ *     o ECC support includes:
+ *
+ *       - 8-bit SEC/DED
+ *       - Aligned-nibble error detect
+ *       - Bypass mode
+ *
+ *   - Up to 4 GiB per bank/rank in 64-bit mode and up to 2 GiB
+ *     per bank/rank in 32-bit mode.
+ *
+ * As realized in the 460EX and 460GT, this controller changes/adds:
+ *
+ *   - 64-bit or 32-bit memory interface with optional ECC.
+ *
+ *     o ECC support includes:
+ *
+ *       - 8-bit SEC/DED
+ *       - Aligned-nibble error detect
+ *       - Bypass mode
+ *
+ *   - Four (4) memory banks/ranks.
+ *   - Up to 16 GiB per bank/rank in 64-bit mode and up to 8 GiB
+ *     per bank/rank in 32-bit mode.
+ *
+ * At present, this driver has ONLY been tested against the controller
+ * realization in the 405EX[r] on the AMCC Kilauea and Haleakala
+ * boards (256 MiB w/o ECC memory soldered onto the board) and a
+ * proprietary board based on those designs (128 MiB ECC memory, also
+ * soldered onto the board).
+ *
+ * Dynamic feature detection and handling needs to be added for the
+ * other realizations of this controller listed above.
+ *
+ * Eventually, this driver will likely be adapted to the above variant
+ * realizations of this controller as well as broken apart to handle
+ * the other known ECC-capable controllers prevalent in other 4xx
+ * processors:
+ *
+ *   - IBM SDRAM (405GP, 405CR and 405EP) "ibm,sdram-4xx"
+ *   - IBM DDR1 (440GP, 440GX, 440EP and 440GR) "ibm,sdram-4xx-ddr"
+ *   - Denali DDR1/DDR2 (440EPX and 440GRX) "denali,sdram-4xx-ddr2"
+ *
+ * For this controller, unfortunately, correctable errors report
+ * nothing more than the beat/cycle and byte/lane the correction
+ * occurred on and the check bit group that covered the error.
+ *
+ * In contrast, uncorrectable errors also report the failing address,
+ * the bus master and the transaction direction (i.e. read or write)
+ *
+ * Regardless of whether the error is a CE or a UE, we report the
+ * following pieces of information in the driver-unique message to the
+ * EDAC subsystem:
+ *
+ *   - Device tree path
+ *   - Bank(s)
+ *   - Check bit error group
+ *   - Beat(s)/lane(s)
+ */
+
+/* Preprocessor Definitions */
+
+#define EDAC_OPSTATE_INT_STR		"interrupt"
+#define EDAC_OPSTATE_POLL_STR		"polled"
+#define EDAC_OPSTATE_UNKNOWN_STR	"unknown"
+
+#define PPC4XX_EDAC_MODULE_NAME		"ppc4xx_edac"
+#define PPC4XX_EDAC_MODULE_REVISION	"v1.0.0 " __DATE__
+
+#define PPC4XX_EDAC_MESSAGE_SIZE	256
+
+/*
+ * Kernel logging without an EDAC instance
+ */
+#define ppc4xx_edac_printk(level, fmt, arg...) \
+	edac_printk(level, "PPC4xx MC", fmt, ##arg)
+
+/*
+ * Kernel logging with an EDAC instance
+ */
+#define ppc4xx_edac_mc_printk(level, mci, fmt, arg...) \
+	edac_mc_chipset_printk(mci, level, "PPC4xx", fmt, ##arg)
+
+/*
+ * Macros to convert bank configuration size enumerations into MiB and
+ * page values.
+ */
+#define SDRAM_MBCF_SZ_MiB_MIN		4
+#define SDRAM_MBCF_SZ_TO_MiB(n)		(SDRAM_MBCF_SZ_MiB_MIN \
+					 << (SDRAM_MBCF_SZ_DECODE(n)))
+#define SDRAM_MBCF_SZ_TO_PAGES(n)	(SDRAM_MBCF_SZ_MiB_MIN \
+					 << (20 - PAGE_SHIFT + \
+					     SDRAM_MBCF_SZ_DECODE(n)))
+
+/*
+ * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
+ * indirectly acccessed and have a base and length defined by the
+ * device tree. The base can be anything; however, we expect the
+ * length to be precisely two registers, the first for the address
+ * window and the second for the data window.
+ */
+#define SDRAM_DCR_RESOURCE_LEN		2
+#define SDRAM_DCR_ADDR_OFFSET		0
+#define SDRAM_DCR_DATA_OFFSET		1
+
+/*
+ * Device tree interrupt indices
+ */
+#define INTMAP_ECCDED_INDEX		0	/* Double-bit Error Detect */
+#define INTMAP_ECCSEC_INDEX		1	/* Single-bit Error Correct */
+
+/* Type Definitions */
+
+/*
+ * PPC4xx SDRAM memory controller private instance data
+ */
+struct ppc4xx_edac_pdata {
+	dcr_host_t dcr_host;	/* Indirect DCR address/data window mapping */
+	struct {
+		int sec;	/* Single-bit correctable error IRQ assigned */
+		int ded;	/* Double-bit detectable error IRQ assigned */
+	} irqs;
+};
+
+/*
+ * Various status data gathered and manipulated when checking and
+ * reporting ECC status.
+ */
+struct ppc4xx_ecc_status {
+	u32 ecces;
+	u32 besr;
+	u32 bearh;
+	u32 bearl;
+	u32 wmirq;
+};
+
+/* Function Prototypes */
+
+static int ppc4xx_edac_probe(struct of_device *device,
+			     const struct of_device_id *device_id);
+static int ppc4xx_edac_remove(struct of_device *device);
+
+/* Global Variables */
+
+/*
+ * Device tree node type and compatible tuples this driver can match
+ * on.
+ */
+static struct of_device_id ppc4xx_edac_match[] = {
+	{
+		.compatible	= "ibm,sdram-4xx-ddr2"
+	},
+	{ }
+};
+
+static struct of_platform_driver ppc4xx_edac_driver = {
+	.match_table		= ppc4xx_edac_match,
+	.probe			= ppc4xx_edac_probe,
+	.remove			= ppc4xx_edac_remove,
+	.driver			= {
+		.owner	= THIS_MODULE,
+		.name	= PPC4XX_EDAC_MODULE_NAME
+	}
+};
+
+/*
+ * TODO: The row and channel parameters likely need to be dynamically
+ * set based on the aforementioned variant controller realizations.
+ */
+static const unsigned ppc4xx_edac_nr_csrows = 2;
+static const unsigned ppc4xx_edac_nr_chans = 1;
+
+/*
+ * Strings associated with PLB master IDs capable of being posted in
+ * SDRAM_BESR or SDRAM_WMIRQ on uncorrectable ECC errors.
+ */
+static const char * const ppc4xx_plb_masters[9] = {
+	[SDRAM_PLB_M0ID_ICU]	= "ICU",
+	[SDRAM_PLB_M0ID_PCIE0]	= "PCI-E 0",
+	[SDRAM_PLB_M0ID_PCIE1]	= "PCI-E 1",
+	[SDRAM_PLB_M0ID_DMA]	= "DMA",
+	[SDRAM_PLB_M0ID_DCU]	= "DCU",
+	[SDRAM_PLB_M0ID_OPB]	= "OPB",
+	[SDRAM_PLB_M0ID_MAL]	= "MAL",
+	[SDRAM_PLB_M0ID_SEC]	= "SEC",
+	[SDRAM_PLB_M0ID_AHB]	= "AHB"
+};
+
+/**
+ * mfsdram - read and return controller register data
+ * @dcr_host: A pointer to the DCR mapping.
+ * @idcr_n: The indirect DCR register to read.
+ *
+ * This routine reads and returns the data associated with the
+ * controller's specified indirect DCR register.
+ *
+ * Returns the read data.
+ */
+static inline u32
+mfsdram(const dcr_host_t *dcr_host, unsigned int idcr_n)
+{
+	return __mfdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
+			dcr_host->base + SDRAM_DCR_DATA_OFFSET,
+			idcr_n);
+}
+
+/**
+ * mtsdram - write controller register data
+ * @dcr_host: A pointer to the DCR mapping.
+ * @idcr_n: The indirect DCR register to write.
+ * @value: The data to write.
+ *
+ * This routine writes the provided data to the controller's specified
+ * indirect DCR register.
+ */
+static inline void
+mtsdram(const dcr_host_t *dcr_host, unsigned int idcr_n, u32 value)
+{
+	return __mtdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
+			dcr_host->base + SDRAM_DCR_DATA_OFFSET,
+			idcr_n,
+			value);
+}
+
+/**
+ * ppc4xx_edac_check_bank_error - check a bank for an ECC bank error
+ * @status: A pointer to the ECC status structure to check for an
+ *          ECC bank error.
+ * @bank: The bank to check for an ECC error.
+ *
+ * This routine determines whether the specified bank has an ECC
+ * error.
+ *
+ * Returns true if the specified bank has an ECC error; otherwise,
+ * false.
+ */
+static bool
+ppc4xx_edac_check_bank_error(const struct ppc4xx_ecc_status *status,
+			     unsigned int bank)
+{
+	switch (bank) {
+	case 0:
+		return status->ecces & SDRAM_ECCES_BK0ER;
+	case 1:
+		return status->ecces & SDRAM_ECCES_BK1ER;
+	default:
+		return false;
+	}
+}
+
+/**
+ * ppc4xx_edac_generate_bank_message - generate interpretted bank status message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the bank message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the ECCESS[BKNER]
+ * field of the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_bank_message(const struct mem_ctl_info *mci,
+				  const struct ppc4xx_ecc_status *status,
+				  char *buffer,
+				  size_t size)
+{
+	int n, total = 0;
+	unsigned int row, rows;
+
+	n = snprintf(buffer, size, "%s: Banks: ", mci->dev_name);
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+	for (rows = 0, row = 0; row < mci->nr_csrows; row++) {
+		if (ppc4xx_edac_check_bank_error(status, row)) {
+			n = snprintf(buffer, size, "%s%u",
+					(rows++ ? ", " : ""), row);
+
+			if (n < 0 || n >= size)
+				goto fail;
+
+			buffer += n;
+			size -= n;
+			total += n;
+		}
+	}
+
+	n = snprintf(buffer, size, "%s; ", rows ? "" : "None");
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+ fail:
+	return total;
+}
+
+/**
+ * ppc4xx_edac_generate_checkbit_message - generate interpretted checkbit message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the checkbit message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the ECCESS[CKBER]
+ * field of the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_checkbit_message(const struct mem_ctl_info *mci,
+				      const struct ppc4xx_ecc_status *status,
+				      char *buffer,
+				      size_t size)
+{
+	const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	const char *ckber = NULL;
+
+	switch (status->ecces & SDRAM_ECCES_CKBER_MASK) {
+	case SDRAM_ECCES_CKBER_NONE:
+		ckber = "None";
+		break;
+	case SDRAM_ECCES_CKBER_32_ECC_0_3:
+		ckber = "ECC0:3";
+		break;
+	case SDRAM_ECCES_CKBER_32_ECC_4_8:
+		switch (mfsdram(&pdata->dcr_host, SDRAM_MCOPT1) &
+			SDRAM_MCOPT1_WDTH_MASK) {
+		case SDRAM_MCOPT1_WDTH_16:
+			ckber = "ECC0:3";
+			break;
+		case SDRAM_MCOPT1_WDTH_32:
+			ckber = "ECC4:8";
+			break;
+		default:
+			ckber = "Unknown";
+			break;
+		}
+		break;
+	case SDRAM_ECCES_CKBER_32_ECC_0_8:
+		ckber = "ECC0:8";
+		break;
+	default:
+		ckber = "Unknown";
+		break;
+	}
+
+	return snprintf(buffer, size, "Checkbit Error: %s", ckber);
+}
+
+/**
+ * ppc4xx_edac_generate_lane_message - generate interpretted byte lane message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the byte lane message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the ECCESS[BNCE]
+ * field of the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_lane_message(const struct mem_ctl_info *mci,
+				  const struct ppc4xx_ecc_status *status,
+				  char *buffer,
+				  size_t size)
+{
+	int n, total = 0;
+	unsigned int lane, lanes;
+	const unsigned int first_lane = 0;
+	const unsigned int lane_count = 16;
+
+	n = snprintf(buffer, size, "; Byte Lane Errors: ");
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+	for (lanes = 0, lane = first_lane; lane < lane_count; lane++) {
+		if ((status->ecces & SDRAM_ECCES_BNCE_ENCODE(lane)) != 0) {
+			n = snprintf(buffer, size,
+				     "%s%u",
+				     (lanes++ ? ", " : ""), lane);
+
+			if (n < 0 || n >= size)
+				goto fail;
+
+			buffer += n;
+			size -= n;
+			total += n;
+		}
+	}
+
+	n = snprintf(buffer, size, "%s; ", lanes ? "" : "None");
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+ fail:
+	return total;
+}
+
+/**
+ * ppc4xx_edac_generate_ecc_message - generate interpretted ECC status message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the ECCES message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the ECCESS register of
+ * the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_ecc_message(const struct mem_ctl_info *mci,
+				 const struct ppc4xx_ecc_status *status,
+				 char *buffer,
+				 size_t size)
+{
+	int n, total = 0;
+
+	n = ppc4xx_edac_generate_bank_message(mci, status, buffer, size);
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+	n = ppc4xx_edac_generate_checkbit_message(mci, status, buffer, size);
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+	n = ppc4xx_edac_generate_lane_message(mci, status, buffer, size);
+
+	if (n < 0 || n >= size)
+		goto fail;
+
+	buffer += n;
+	size -= n;
+	total += n;
+
+ fail:
+	return total;
+}
+
+/**
+ * ppc4xx_edac_generate_plb_message - generate interpretted PLB status message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the PLB message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the portion of the
+ * driver-unique report message associated with the PLB-related BESR
+ * and/or WMIRQ registers of the specified ECC status.
+ *
+ * Returns the number of characters generated on success; otherwise, <
+ * 0 on error.
+ */
+static int
+ppc4xx_edac_generate_plb_message(const struct mem_ctl_info *mci,
+				 const struct ppc4xx_ecc_status *status,
+				 char *buffer,
+				 size_t size)
+{
+	unsigned int master;
+	bool read;
+
+	if ((status->besr & SDRAM_BESR_MASK) == 0)
+		return 0;
+
+	if ((status->besr & SDRAM_BESR_M0ET_MASK) == SDRAM_BESR_M0ET_NONE)
+		return 0;
+
+	read = ((status->besr & SDRAM_BESR_M0RW_MASK) == SDRAM_BESR_M0RW_READ);
+
+	master = SDRAM_BESR_M0ID_DECODE(status->besr);
+
+	return snprintf(buffer, size,
+			"%s error w/ PLB master %u \"%s\"; ",
+			(read ? "Read" : "Write"),
+			master,
+			(((master >= SDRAM_PLB_M0ID_FIRST) &&
+			  (master <= SDRAM_PLB_M0ID_LAST)) ?
+			 ppc4xx_plb_masters[master] : "UNKNOWN"));
+}
+
+/**
+ * ppc4xx_edac_generate_message - generate interpretted status message
+ * @mci: A pointer to the EDAC memory controller instance associated
+ *       with the driver-unique message being generated.
+ * @status: A pointer to the ECC status structure to generate the
+ *          message from.
+ * @buffer: A pointer to the buffer in which to generate the
+ *          message.
+ * @size: The size, in bytes, of space available in buffer.
+ *
+ * This routine generates to the provided buffer the driver-unique
+ * EDAC report message from the specified ECC status.
+ */
+static void
+ppc4xx_edac_generate_message(const struct mem_ctl_info *mci,
+			     const struct ppc4xx_ecc_status *status,
+			     char *buffer,
+			     size_t size)
+{
+	int n;
+
+	if (buffer == NULL || size == 0)
+		return;
+
+	n = ppc4xx_edac_generate_ecc_message(mci, status, buffer, size);
+
+	if (n < 0 || n >= size)
+		return;
+
+	buffer += n;
+	size -= n;
+
+	ppc4xx_edac_generate_plb_message(mci, status, buffer, size);
+}
+
+#ifdef DEBUG
+/**
+ * ppc4xx_ecc_dump_status - dump controller ECC status registers
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the status being dumped.
+ * @status: A pointer to the ECC status structure to generate the
+ *          dump from.
+ *
+ * This routine dumps to the kernel log buffer the raw and
+ * interpretted specified ECC status.
+ */
+static void
+ppc4xx_ecc_dump_status(const struct mem_ctl_info *mci,
+		       const struct ppc4xx_ecc_status *status)
+{
+	char message[PPC4XX_EDAC_MESSAGE_SIZE];
+
+	ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
+
+	ppc4xx_edac_mc_printk(KERN_INFO, mci,
+			      "\n"
+			      "\tECCES: 0x%08x\n"
+			      "\tWMIRQ: 0x%08x\n"
+			      "\tBESR:  0x%08x\n"
+			      "\tBEAR:  0x%08x%08x\n"
+			      "\t%s\n",
+			      status->ecces,
+			      status->wmirq,
+			      status->besr,
+			      status->bearh,
+			      status->bearl,
+			      message);
+}
+#endif /* DEBUG */
+
+/**
+ * ppc4xx_ecc_get_status - get controller ECC status
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the status being retrieved.
+ * @status: A pointer to the ECC status structure to populate the
+ *          ECC status with.
+ *
+ * This routine reads and masks, as appropriate, all the relevant
+ * status registers that deal with ibm,sdram-4xx-ddr2 ECC errors.
+ * While we read all of them, for correctable errors, we only expect
+ * to deal with ECCES. For uncorrectable errors, we expect to deal
+ * with all of them.
+ */
+static void
+ppc4xx_ecc_get_status(const struct mem_ctl_info *mci,
+		      struct ppc4xx_ecc_status *status)
+{
+	const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	const dcr_host_t *dcr_host = &pdata->dcr_host;
+
+	status->ecces = mfsdram(dcr_host, SDRAM_ECCES) & SDRAM_ECCES_MASK;
+	status->wmirq = mfsdram(dcr_host, SDRAM_WMIRQ) & SDRAM_WMIRQ_MASK;
+	status->besr  = mfsdram(dcr_host, SDRAM_BESR)  & SDRAM_BESR_MASK;
+	status->bearl = mfsdram(dcr_host, SDRAM_BEARL);
+	status->bearh = mfsdram(dcr_host, SDRAM_BEARH);
+}
+
+/**
+ * ppc4xx_ecc_clear_status - clear controller ECC status
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the status being cleared.
+ * @status: A pointer to the ECC status structure containing the
+ *          values to write to clear the ECC status.
+ *
+ * This routine clears--by writing the masked (as appropriate) status
+ * values back to--the status registers that deal with
+ * ibm,sdram-4xx-ddr2 ECC errors.
+ */
+static void
+ppc4xx_ecc_clear_status(const struct mem_ctl_info *mci,
+			const struct ppc4xx_ecc_status *status)
+{
+	const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	const dcr_host_t *dcr_host = &pdata->dcr_host;
+
+	mtsdram(dcr_host, SDRAM_ECCES,	status->ecces & SDRAM_ECCES_MASK);
+	mtsdram(dcr_host, SDRAM_WMIRQ,	status->wmirq & SDRAM_WMIRQ_MASK);
+	mtsdram(dcr_host, SDRAM_BESR,	status->besr & SDRAM_BESR_MASK);
+	mtsdram(dcr_host, SDRAM_BEARL,	0);
+	mtsdram(dcr_host, SDRAM_BEARH,	0);
+}
+
+/**
+ * ppc4xx_edac_handle_ce - handle controller correctable ECC error (CE)
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the correctable error being handled and reported.
+ * @status: A pointer to the ECC status structure associated with
+ *          the correctable error being handled and reported.
+ *
+ * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
+ * correctable error. Per the aforementioned discussion, there's not
+ * enough status available to use the full EDAC correctable error
+ * interface, so we just pass driver-unique message to the "no info"
+ * interface.
+ */
+static void
+ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
+		      const struct ppc4xx_ecc_status *status)
+{
+	int row;
+	char message[PPC4XX_EDAC_MESSAGE_SIZE];
+
+	ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
+
+	for (row = 0; row < mci->nr_csrows; row++)
+		if (ppc4xx_edac_check_bank_error(status, row))
+			edac_mc_handle_ce_no_info(mci, message);
+}
+
+/**
+ * ppc4xx_edac_handle_ue - handle controller uncorrectable ECC error (UE)
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the uncorrectable error being handled and
+ *       reported.
+ * @status: A pointer to the ECC status structure associated with
+ *          the uncorrectable error being handled and reported.
+ *
+ * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
+ * uncorrectable error.
+ */
+static void
+ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
+		      const struct ppc4xx_ecc_status *status)
+{
+	const u64 bear = ((u64)status->bearh << 32 | status->bearl);
+	const unsigned long page = bear >> PAGE_SHIFT;
+	const unsigned long offset = bear & ~PAGE_MASK;
+	int row;
+	char message[PPC4XX_EDAC_MESSAGE_SIZE];
+
+	ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
+
+	for (row = 0; row < mci->nr_csrows; row++)
+		if (ppc4xx_edac_check_bank_error(status, row))
+			edac_mc_handle_ue(mci, page, offset, row, message);
+}
+
+/**
+ * ppc4xx_edac_check - check controller for ECC errors
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the ibm,sdram-4xx-ddr2 controller being
+ *       checked.
+ *
+ * This routine is used to check and post ECC errors and is called by
+ * both the EDAC polling thread and this driver's CE and UE interrupt
+ * handler.
+ */
+static void
+ppc4xx_edac_check(struct mem_ctl_info *mci)
+{
+#ifdef DEBUG
+	static unsigned int count;
+#endif
+	struct ppc4xx_ecc_status status;
+
+	ppc4xx_ecc_get_status(mci, &status);
+
+#ifdef DEBUG
+	if (count++ % 30 == 0)
+		ppc4xx_ecc_dump_status(mci, &status);
+#endif
+
+	if (status.ecces & SDRAM_ECCES_UE)
+		ppc4xx_edac_handle_ue(mci, &status);
+
+	if (status.ecces & SDRAM_ECCES_CE)
+		ppc4xx_edac_handle_ce(mci, &status);
+
+	ppc4xx_ecc_clear_status(mci, &status);
+}
+
+/**
+ * ppc4xx_edac_isr - SEC (CE) and DED (UE) interrupt service routine
+ * @irq:    The virtual interrupt number being serviced.
+ * @dev_id: A pointer to the EDAC memory controller instance
+ *          associated with the interrupt being handled.
+ *
+ * This routine implements the interrupt handler for both correctable
+ * (CE) and uncorrectable (UE) ECC errors for the ibm,sdram-4xx-ddr2
+ * controller. It simply calls through to the same routine used during
+ * polling to check, report and clear the ECC status.
+ *
+ * Unconditionally returns IRQ_HANDLED.
+ */
+static irqreturn_t
+ppc4xx_edac_isr(int irq, void *dev_id)
+{
+	struct mem_ctl_info *mci = dev_id;
+
+	ppc4xx_edac_check(mci);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ppc4xx_edac_get_dtype - return the controller memory width
+ * @mcopt1: The 32-bit Memory Controller Option 1 register value
+ *          currently set for the controller, from which the width
+ *          is derived.
+ *
+ * This routine returns the EDAC device type width appropriate for the
+ * current controller configuration.
+ *
+ * TODO: This needs to be conditioned dynamically through feature
+ * flags or some such when other controller variants are supported as
+ * the 405EX[r] is 16-/32-bit and the others are 32-/64-bit with the
+ * 16- and 64-bit field definition/value/enumeration (b1) overloaded
+ * among them.
+ *
+ * Returns a device type width enumeration.
+ */
+static enum dev_type __devinit
+ppc4xx_edac_get_dtype(u32 mcopt1)
+{
+	switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) {
+	case SDRAM_MCOPT1_WDTH_16:
+		return DEV_X2;
+	case SDRAM_MCOPT1_WDTH_32:
+		return DEV_X4;
+	default:
+		return DEV_UNKNOWN;
+	}
+}
+
+/**
+ * ppc4xx_edac_get_mtype - return controller memory type
+ * @mcopt1: The 32-bit Memory Controller Option 1 register value
+ *          currently set for the controller, from which the memory type
+ *          is derived.
+ *
+ * This routine returns the EDAC memory type appropriate for the
+ * current controller configuration.
+ *
+ * Returns a memory type enumeration.
+ */
+static enum mem_type __devinit
+ppc4xx_edac_get_mtype(u32 mcopt1)
+{
+	bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN);
+
+	switch (mcopt1 & SDRAM_MCOPT1_DDR_TYPE_MASK) {
+	case SDRAM_MCOPT1_DDR2_TYPE:
+		return rden ? MEM_RDDR2 : MEM_DDR2;
+	case SDRAM_MCOPT1_DDR1_TYPE:
+		return rden ? MEM_RDDR : MEM_DDR;
+	default:
+		return MEM_UNKNOWN;
+	}
+}
+
+/**
+ * ppc4xx_edac_init_csrows - intialize driver instance rows
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the ibm,sdram-4xx-ddr2 controller for which
+ *       the csrows (i.e. banks/ranks) are being initialized.
+ * @mcopt1: The 32-bit Memory Controller Option 1 register value
+ *          currently set for the controller, from which bank width
+ *          and memory typ information is derived.
+ *
+ * This routine intializes the virtual "chip select rows" associated
+ * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
+ * controller bank/rank is mapped to a row.
+ *
+ * Returns 0 if OK; otherwise, -EINVAL if the memory bank size
+ * configuration cannot be determined.
+ */
+static int __devinit
+ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
+{
+	const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	int status = 0;
+	enum mem_type mtype;
+	enum dev_type dtype;
+	enum edac_type edac_mode;
+	int row;
+	u32 mbxcf, size;
+	static u32 ppc4xx_last_page;
+
+	/* Establish the memory type and width */
+
+	mtype = ppc4xx_edac_get_mtype(mcopt1);
+	dtype = ppc4xx_edac_get_dtype(mcopt1);
+
+	/* Establish EDAC mode */
+
+	if (mci->edac_cap & EDAC_FLAG_SECDED)
+		edac_mode = EDAC_SECDED;
+	else if (mci->edac_cap & EDAC_FLAG_EC)
+		edac_mode = EDAC_EC;
+	else
+		edac_mode = EDAC_NONE;
+
+	/*
+	 * Initialize each chip select row structure which correspond
+	 * 1:1 with a controller bank/rank.
+	 */
+
+	for (row = 0; row < mci->nr_csrows; row++) {
+		struct csrow_info *csi = &mci->csrows[row];
+
+		/*
+		 * Get the configuration settings for this
+		 * row/bank/rank and skip disabled banks.
+		 */
+
+		mbxcf = mfsdram(&pdata->dcr_host, SDRAM_MBXCF(row));
+
+		if ((mbxcf & SDRAM_MBCF_BE_MASK) != SDRAM_MBCF_BE_ENABLE)
+			continue;
+
+		/* Map the bank configuration size setting to pages. */
+
+		size = mbxcf & SDRAM_MBCF_SZ_MASK;
+
+		switch (size) {
+		case SDRAM_MBCF_SZ_4MB:
+		case SDRAM_MBCF_SZ_8MB:
+		case SDRAM_MBCF_SZ_16MB:
+		case SDRAM_MBCF_SZ_32MB:
+		case SDRAM_MBCF_SZ_64MB:
+		case SDRAM_MBCF_SZ_128MB:
+		case SDRAM_MBCF_SZ_256MB:
+		case SDRAM_MBCF_SZ_512MB:
+		case SDRAM_MBCF_SZ_1GB:
+		case SDRAM_MBCF_SZ_2GB:
+		case SDRAM_MBCF_SZ_4GB:
+		case SDRAM_MBCF_SZ_8GB:
+			csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
+			break;
+		default:
+			ppc4xx_edac_mc_printk(KERN_ERR, mci,
+					      "Unrecognized memory bank %d "
+					      "size 0x%08x\n",
+					      row, SDRAM_MBCF_SZ_DECODE(size));
+			status = -EINVAL;
+			goto done;
+		}
+
+		csi->first_page = ppc4xx_last_page;
+		csi->last_page	= csi->first_page + csi->nr_pages - 1;
+		csi->page_mask	= 0;
+
+		/*
+		 * It's unclear exactly what grain should be set to
+		 * here. The SDRAM_ECCES register allows resolution of
+		 * an error down to a nibble which would potentially
+		 * argue for a grain of '1' byte, even though we only
+		 * know the associated address for uncorrectable
+		 * errors. This value is not used at present for
+		 * anything other than error reporting so getting it
+		 * wrong should be of little consequence. Other
+		 * possible values would be the PLB width (16), the
+		 * page size (PAGE_SIZE) or the memory width (2 or 4).
+		 */
+
+		csi->grain	= 1;
+
+		csi->mtype	= mtype;
+		csi->dtype	= dtype;
+
+		csi->edac_mode	= edac_mode;
+
+		ppc4xx_last_page += csi->nr_pages;
+	}
+
+ done:
+	return status;
+}
+
+/**
+ * ppc4xx_edac_mc_init - intialize driver instance
+ * @mci: A pointer to the EDAC memory controller instance being
+ *       initialized.
+ * @op: A pointer to the OpenFirmware device tree node associated
+ *      with the controller this EDAC instance is bound to.
+ * @match: A pointer to the OpenFirmware device tree match
+ *         information associated with the controller this EDAC instance
+ *         is bound to.
+ * @dcr_host: A pointer to the DCR data containing the DCR mapping
+ *            for this controller instance.
+ * @mcopt1: The 32-bit Memory Controller Option 1 register value
+ *          currently set for the controller, from which ECC capabilities
+ *          and scrub mode are derived.
+ *
+ * This routine performs initialization of the EDAC memory controller
+ * instance and related driver-private data associated with the
+ * ibm,sdram-4xx-ddr2 memory controller the instance is bound to.
+ *
+ * Returns 0 if OK; otherwise, < 0 on error.
+ */
+static int __devinit
+ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
+		    struct of_device *op,
+		    const struct of_device_id *match,
+		    const dcr_host_t *dcr_host,
+		    u32 mcopt1)
+{
+	int status = 0;
+	const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
+	struct ppc4xx_edac_pdata *pdata = NULL;
+	const struct device_node *np = op->node;
+
+	if (match == NULL)
+		return -EINVAL;
+
+	/* Initial driver pointers and private data */
+
+	mci->dev		= &op->dev;
+
+	dev_set_drvdata(mci->dev, mci);
+
+	pdata			= mci->pvt_info;
+
+	pdata->dcr_host		= *dcr_host;
+	pdata->irqs.sec		= NO_IRQ;
+	pdata->irqs.ded		= NO_IRQ;
+
+	/* Initialize controller capabilities and configuration */
+
+	mci->mtype_cap		= (MEM_FLAG_DDR | MEM_FLAG_RDDR |
+				   MEM_FLAG_DDR2 | MEM_FLAG_RDDR2);
+
+	mci->edac_ctl_cap	= (EDAC_FLAG_NONE |
+				   EDAC_FLAG_EC |
+				   EDAC_FLAG_SECDED);
+
+	mci->scrub_cap		= SCRUB_NONE;
+	mci->scrub_mode		= SCRUB_NONE;
+
+	/*
+	 * Update the actual capabilites based on the MCOPT1[MCHK]
+	 * settings. Scrubbing is only useful if reporting is enabled.
+	 */
+
+	switch (memcheck) {
+	case SDRAM_MCOPT1_MCHK_CHK:
+		mci->edac_cap	= EDAC_FLAG_EC;
+		break;
+	case SDRAM_MCOPT1_MCHK_CHK_REP:
+		mci->edac_cap	= (EDAC_FLAG_EC | EDAC_FLAG_SECDED);
+		mci->scrub_mode	= SCRUB_SW_SRC;
+		break;
+	default:
+		mci->edac_cap	= EDAC_FLAG_NONE;
+		break;
+	}
+
+	/* Initialize strings */
+
+	mci->mod_name		= PPC4XX_EDAC_MODULE_NAME;
+	mci->mod_ver		= PPC4XX_EDAC_MODULE_REVISION;
+	mci->ctl_name		= match->compatible,
+	mci->dev_name		= np->full_name;
+
+	/* Initialize callbacks */
+
+	mci->edac_check		= ppc4xx_edac_check;
+	mci->ctl_page_to_phys	= NULL;
+
+	/* Initialize chip select rows */
+
+	status = ppc4xx_edac_init_csrows(mci, mcopt1);
+
+	if (status)
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Failed to initialize rows!\n");
+
+	return status;
+}
+
+/**
+ * ppc4xx_edac_register_irq - setup and register controller interrupts
+ * @op: A pointer to the OpenFirmware device tree node associated
+ *      with the controller this EDAC instance is bound to.
+ * @mci: A pointer to the EDAC memory controller instance
+ *       associated with the ibm,sdram-4xx-ddr2 controller for which
+ *       interrupts are being registered.
+ *
+ * This routine parses the correctable (CE) and uncorrectable error (UE)
+ * interrupts from the device tree node and maps and assigns them to
+ * the associated EDAC memory controller instance.
+ *
+ * Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be
+ * mapped and assigned.
+ */
+static int __devinit
+ppc4xx_edac_register_irq(struct of_device *op, struct mem_ctl_info *mci)
+{
+	int status = 0;
+	int ded_irq, sec_irq;
+	struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+	struct device_node *np = op->node;
+
+	ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX);
+	sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX);
+
+	if (ded_irq == NO_IRQ || sec_irq == NO_IRQ) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Unable to map interrupts.\n");
+		status = -ENODEV;
+		goto fail;
+	}
+
+	status = request_irq(ded_irq,
+			     ppc4xx_edac_isr,
+			     IRQF_DISABLED,
+			     "[EDAC] MC ECCDED",
+			     mci);
+
+	if (status < 0) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Unable to request irq %d for ECC DED",
+				      ded_irq);
+		status = -ENODEV;
+		goto fail1;
+	}
+
+	status = request_irq(sec_irq,
+			     ppc4xx_edac_isr,
+			     IRQF_DISABLED,
+			     "[EDAC] MC ECCSEC",
+			     mci);
+
+	if (status < 0) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Unable to request irq %d for ECC SEC",
+				      sec_irq);
+		status = -ENODEV;
+		goto fail2;
+	}
+
+	ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCDED irq is %d\n", ded_irq);
+	ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCSEC irq is %d\n", sec_irq);
+
+	pdata->irqs.ded = ded_irq;
+	pdata->irqs.sec = sec_irq;
+
+	return 0;
+
+ fail2:
+	free_irq(sec_irq, mci);
+
+ fail1:
+	free_irq(ded_irq, mci);
+
+ fail:
+	return status;
+}
+
+/**
+ * ppc4xx_edac_map_dcrs - locate and map controller registers
+ * @np: A pointer to the device tree node containing the DCR
+ *      resources to map.
+ * @dcr_host: A pointer to the DCR data to populate with the
+ *            DCR mapping.
+ *
+ * This routine attempts to locate in the device tree and map the DCR
+ * register resources associated with the controller's indirect DCR
+ * address and data windows.
+ *
+ * Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on
+ * error.
+ */
+static int __devinit
+ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
+{
+	unsigned int dcr_base, dcr_len;
+
+	if (np == NULL || dcr_host == NULL)
+		return -EINVAL;
+
+	/* Get the DCR resource extent and sanity check the values. */
+
+	dcr_base = dcr_resource_start(np, 0);
+	dcr_len = dcr_resource_len(np, 0);
+
+	if (dcr_base == 0 || dcr_len == 0) {
+		ppc4xx_edac_printk(KERN_ERR,
+				   "Failed to obtain DCR property.\n");
+		return -ENODEV;
+	}
+
+	if (dcr_len != SDRAM_DCR_RESOURCE_LEN) {
+		ppc4xx_edac_printk(KERN_ERR,
+				   "Unexpected DCR length %d, expected %d.\n",
+				   dcr_len, SDRAM_DCR_RESOURCE_LEN);
+		return -ENODEV;
+	}
+
+	/*  Attempt to map the DCR extent. */
+
+	*dcr_host = dcr_map(np, dcr_base, dcr_len);
+
+	if (!DCR_MAP_OK(*dcr_host)) {
+		ppc4xx_edac_printk(KERN_INFO, "Failed to map DCRs.\n");
+		    return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * ppc4xx_edac_probe - check controller and bind driver
+ * @op: A pointer to the OpenFirmware device tree node associated
+ *      with the controller being probed for driver binding.
+ * @match: A pointer to the OpenFirmware device tree match
+ *         information associated with the controller being probed
+ *         for driver binding.
+ *
+ * This routine probes a specific ibm,sdram-4xx-ddr2 controller
+ * instance for binding with the driver.
+ *
+ * Returns 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int __devinit
+ppc4xx_edac_probe(struct of_device *op, const struct of_device_id *match)
+{
+	int status = 0;
+	u32 mcopt1, memcheck;
+	dcr_host_t dcr_host;
+	const struct device_node *np = op->node;
+	struct mem_ctl_info *mci = NULL;
+	static int ppc4xx_edac_instance;
+
+	/*
+	 * At this point, we only support the controller realized on
+	 * the AMCC PPC 405EX[r]. Reject anything else.
+	 */
+
+	if (!of_device_is_compatible(np, "ibm,sdram-405ex") &&
+	    !of_device_is_compatible(np, "ibm,sdram-405exr")) {
+		ppc4xx_edac_printk(KERN_NOTICE,
+				   "Only the PPC405EX[r] is supported.\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Next, get the DCR property and attempt to map it so that we
+	 * can probe the controller.
+	 */
+
+	status = ppc4xx_edac_map_dcrs(np, &dcr_host);
+
+	if (status)
+		return status;
+
+	/*
+	 * First determine whether ECC is enabled at all. If not,
+	 * there is no useful checking or monitoring that can be done
+	 * for this controller.
+	 */
+
+	mcopt1 = mfsdram(&dcr_host, SDRAM_MCOPT1);
+	memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
+
+	if (memcheck == SDRAM_MCOPT1_MCHK_NON) {
+		ppc4xx_edac_printk(KERN_INFO, "%s: No ECC memory detected or "
+				   "ECC is disabled.\n", np->full_name);
+		status = -ENODEV;
+		goto done;
+	}
+
+	/*
+	 * At this point, we know ECC is enabled, allocate an EDAC
+	 * controller instance and perform the appropriate
+	 * initialization.
+	 */
+
+	mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
+			    ppc4xx_edac_nr_csrows,
+			    ppc4xx_edac_nr_chans,
+			    ppc4xx_edac_instance);
+
+	if (mci == NULL) {
+		ppc4xx_edac_printk(KERN_ERR, "%s: "
+				   "Failed to allocate EDAC MC instance!\n",
+				   np->full_name);
+		status = -ENOMEM;
+		goto done;
+	}
+
+	status = ppc4xx_edac_mc_init(mci, op, match, &dcr_host, mcopt1);
+
+	if (status) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Failed to initialize instance!\n");
+		goto fail;
+	}
+
+	/*
+	 * We have a valid, initialized EDAC instance bound to the
+	 * controller. Attempt to register it with the EDAC subsystem
+	 * and, if necessary, register interrupts.
+	 */
+
+	if (edac_mc_add_mc(mci)) {
+		ppc4xx_edac_mc_printk(KERN_ERR, mci,
+				      "Failed to add instance!\n");
+		status = -ENODEV;
+		goto fail;
+	}
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		status = ppc4xx_edac_register_irq(op, mci);
+
+		if (status)
+			goto fail1;
+	}
+
+	ppc4xx_edac_instance++;
+
+	return 0;
+
+ fail1:
+	edac_mc_del_mc(mci->dev);
+
+ fail:
+	edac_mc_free(mci);
+
+ done:
+	return status;
+}
+
+/**
+ * ppc4xx_edac_remove - unbind driver from controller
+ * @op: A pointer to the OpenFirmware device tree node associated
+ *      with the controller this EDAC instance is to be unbound/removed
+ *      from.
+ *
+ * This routine unbinds the EDAC memory controller instance associated
+ * with the specified ibm,sdram-4xx-ddr2 controller described by the
+ * OpenFirmware device tree node passed as a parameter.
+ *
+ * Unconditionally returns 0.
+ */
+static int
+ppc4xx_edac_remove(struct of_device *op)
+{
+	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
+	struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
+
+	if (edac_op_state == EDAC_OPSTATE_INT) {
+		free_irq(pdata->irqs.sec, mci);
+		free_irq(pdata->irqs.ded, mci);
+	}
+
+	dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN);
+
+	edac_mc_del_mc(mci->dev);
+	edac_mc_free(mci);
+
+	return 0;
+}
+
+/**
+ * ppc4xx_edac_opstate_init - initialize EDAC reporting method
+ *
+ * This routine ensures that the EDAC memory controller reporting
+ * method is mapped to a sane value as the EDAC core defines the value
+ * to EDAC_OPSTATE_INVAL by default. We don't call the global
+ * opstate_init as that defaults to polling and we want interrupt as
+ * the default.
+ */
+static inline void __init
+ppc4xx_edac_opstate_init(void)
+{
+	switch (edac_op_state) {
+	case EDAC_OPSTATE_POLL:
+	case EDAC_OPSTATE_INT:
+		break;
+	default:
+		edac_op_state = EDAC_OPSTATE_INT;
+		break;
+	}
+
+	ppc4xx_edac_printk(KERN_INFO, "Reporting type: %s\n",
+			   ((edac_op_state == EDAC_OPSTATE_POLL) ?
+			    EDAC_OPSTATE_POLL_STR :
+			    ((edac_op_state == EDAC_OPSTATE_INT) ?
+			     EDAC_OPSTATE_INT_STR :
+			     EDAC_OPSTATE_UNKNOWN_STR)));
+}
+
+/**
+ * ppc4xx_edac_init - driver/module insertion entry point
+ *
+ * This routine is the driver/module insertion entry point. It
+ * initializes the EDAC memory controller reporting state and
+ * registers the driver as an OpenFirmware device tree platform
+ * driver.
+ */
+static int __init
+ppc4xx_edac_init(void)
+{
+	ppc4xx_edac_printk(KERN_INFO, PPC4XX_EDAC_MODULE_REVISION "\n");
+
+	ppc4xx_edac_opstate_init();
+
+	return of_register_platform_driver(&ppc4xx_edac_driver);
+}
+
+/**
+ * ppc4xx_edac_exit - driver/module removal entry point
+ *
+ * This routine is the driver/module removal entry point. It
+ * unregisters the driver as an OpenFirmware device tree platform
+ * driver.
+ */
+static void __exit
+ppc4xx_edac_exit(void)
+{
+	of_unregister_platform_driver(&ppc4xx_edac_driver);
+}
+
+module_init(ppc4xx_edac_init);
+module_exit(ppc4xx_edac_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grant Erickson <gerickson@nuovations.com>");
+MODULE_DESCRIPTION("EDAC MC Driver for the PPC4xx IBM DDR2 Memory Controller");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting State: "
+		 "0=" EDAC_OPSTATE_POLL_STR ", 2=" EDAC_OPSTATE_INT_STR);

+ 172 - 0
drivers/edac/ppc4xx_edac.h

@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2008 Nuovation System Designs, LLC
+ *   Grant Erickson <gerickson@nuovations.com>
+ *
+ * This file defines processor mnemonics for accessing and managing
+ * the IBM DDR1/DDR2 ECC controller found in the 405EX[r], 440SP,
+ * 440SPe, 460EX, 460GT and 460SX.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#ifndef __PPC4XX_EDAC_H
+#define __PPC4XX_EDAC_H
+
+#include <linux/types.h>
+
+/*
+ * Macro for generating register field mnemonics
+ */
+#define PPC_REG_BITS			32
+#define PPC_REG_VAL(bit, val)		((val) << ((PPC_REG_BITS - 1) - (bit)))
+#define PPC_REG_DECODE(bit, val)	((val) >> ((PPC_REG_BITS - 1) - (bit)))
+
+/*
+ * IBM 4xx DDR1/DDR2 SDRAM memory controller registers (at least those
+ * relevant to ECC)
+ */
+#define SDRAM_BESR			0x00	/* Error status (read/clear) */
+#define SDRAM_BESRT			0x01	/* Error statuss (test/set)  */
+#define SDRAM_BEARL			0x02	/* Error address low	     */
+#define SDRAM_BEARH			0x03	/* Error address high	     */
+#define SDRAM_WMIRQ			0x06	/* Write master (read/clear) */
+#define SDRAM_WMIRQT			0x07	/* Write master (test/set)   */
+#define SDRAM_MCOPT1			0x20	/* Controller options 1	     */
+#define SDRAM_MBXCF_BASE		0x40	/* Bank n configuration base */
+#define	SDRAM_MBXCF(n)			(SDRAM_MBXCF_BASE + (4 * (n)))
+#define SDRAM_MB0CF			SDRAM_MBXCF(0)
+#define SDRAM_MB1CF			SDRAM_MBXCF(1)
+#define SDRAM_MB2CF			SDRAM_MBXCF(2)
+#define SDRAM_MB3CF			SDRAM_MBXCF(3)
+#define SDRAM_ECCCR			0x98	/* ECC error status	     */
+#define SDRAM_ECCES			SDRAM_ECCCR
+
+/*
+ * PLB Master IDs
+ */
+#define	SDRAM_PLB_M0ID_FIRST		0
+#define	SDRAM_PLB_M0ID_ICU		SDRAM_PLB_M0ID_FIRST
+#define	SDRAM_PLB_M0ID_PCIE0		1
+#define	SDRAM_PLB_M0ID_PCIE1		2
+#define	SDRAM_PLB_M0ID_DMA		3
+#define	SDRAM_PLB_M0ID_DCU		4
+#define	SDRAM_PLB_M0ID_OPB		5
+#define	SDRAM_PLB_M0ID_MAL		6
+#define	SDRAM_PLB_M0ID_SEC		7
+#define	SDRAM_PLB_M0ID_AHB		8
+#define SDRAM_PLB_M0ID_LAST		SDRAM_PLB_M0ID_AHB
+#define SDRAM_PLB_M0ID_COUNT		(SDRAM_PLB_M0ID_LAST - \
+					 SDRAM_PLB_M0ID_FIRST + 1)
+
+/*
+ * Memory Controller Bus Error Status Register
+ */
+#define SDRAM_BESR_MASK			PPC_REG_VAL(7, 0xFF)
+#define SDRAM_BESR_M0ID_MASK		PPC_REG_VAL(3, 0xF)
+#define	SDRAM_BESR_M0ID_DECODE(n)	PPC_REG_DECODE(3, n)
+#define SDRAM_BESR_M0ID_ICU		PPC_REG_VAL(3, SDRAM_PLB_M0ID_ICU)
+#define SDRAM_BESR_M0ID_PCIE0		PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE0)
+#define SDRAM_BESR_M0ID_PCIE1		PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE1)
+#define SDRAM_BESR_M0ID_DMA		PPC_REG_VAL(3, SDRAM_PLB_M0ID_DMA)
+#define SDRAM_BESR_M0ID_DCU		PPC_REG_VAL(3, SDRAM_PLB_M0ID_DCU)
+#define SDRAM_BESR_M0ID_OPB		PPC_REG_VAL(3, SDRAM_PLB_M0ID_OPB)
+#define SDRAM_BESR_M0ID_MAL		PPC_REG_VAL(3, SDRAM_PLB_M0ID_MAL)
+#define SDRAM_BESR_M0ID_SEC		PPC_REG_VAL(3, SDRAM_PLB_M0ID_SEC)
+#define SDRAM_BESR_M0ID_AHB		PPC_REG_VAL(3, SDRAM_PLB_M0ID_AHB)
+#define SDRAM_BESR_M0ET_MASK		PPC_REG_VAL(6, 0x7)
+#define SDRAM_BESR_M0ET_NONE		PPC_REG_VAL(6, 0)
+#define SDRAM_BESR_M0ET_ECC		PPC_REG_VAL(6, 1)
+#define SDRAM_BESR_M0RW_MASK		PPC_REG_VAL(7, 1)
+#define SDRAM_BESR_M0RW_WRITE		PPC_REG_VAL(7, 0)
+#define SDRAM_BESR_M0RW_READ		PPC_REG_VAL(7, 1)
+
+/*
+ * Memory Controller PLB Write Master Interrupt Register
+ */
+#define SDRAM_WMIRQ_MASK		PPC_REG_VAL(8, 0x1FF)
+#define	SDRAM_WMIRQ_ENCODE(id)		PPC_REG_VAL((id % \
+						     SDRAM_PLB_M0ID_COUNT), 1)
+#define SDRAM_WMIRQ_ICU			PPC_REG_VAL(SDRAM_PLB_M0ID_ICU, 1)
+#define SDRAM_WMIRQ_PCIE0		PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE0, 1)
+#define SDRAM_WMIRQ_PCIE1		PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE1, 1)
+#define SDRAM_WMIRQ_DMA			PPC_REG_VAL(SDRAM_PLB_M0ID_DMA, 1)
+#define SDRAM_WMIRQ_DCU			PPC_REG_VAL(SDRAM_PLB_M0ID_DCU, 1)
+#define SDRAM_WMIRQ_OPB			PPC_REG_VAL(SDRAM_PLB_M0ID_OPB, 1)
+#define SDRAM_WMIRQ_MAL			PPC_REG_VAL(SDRAM_PLB_M0ID_MAL, 1)
+#define SDRAM_WMIRQ_SEC			PPC_REG_VAL(SDRAM_PLB_M0ID_SEC, 1)
+#define SDRAM_WMIRQ_AHB			PPC_REG_VAL(SDRAM_PLB_M0ID_AHB, 1)
+
+/*
+ * Memory Controller Options 1 Register
+ */
+#define SDRAM_MCOPT1_MCHK_MASK	    PPC_REG_VAL(3, 0x3)	 /* ECC mask	     */
+#define SDRAM_MCOPT1_MCHK_NON	    PPC_REG_VAL(3, 0x0)	 /* No ECC gen	     */
+#define SDRAM_MCOPT1_MCHK_GEN	    PPC_REG_VAL(3, 0x2)	 /* ECC gen	     */
+#define SDRAM_MCOPT1_MCHK_CHK	    PPC_REG_VAL(3, 0x1)	 /* ECC gen and chk  */
+#define SDRAM_MCOPT1_MCHK_CHK_REP   PPC_REG_VAL(3, 0x3)	 /* ECC gen/chk/rpt  */
+#define SDRAM_MCOPT1_MCHK_DECODE(n) ((((u32)(n)) >> 28) & 0x3)
+#define SDRAM_MCOPT1_RDEN_MASK	    PPC_REG_VAL(4, 0x1)	 /* Rgstrd DIMM mask */
+#define SDRAM_MCOPT1_RDEN	    PPC_REG_VAL(4, 0x1)	 /* Rgstrd DIMM enbl */
+#define SDRAM_MCOPT1_WDTH_MASK	    PPC_REG_VAL(7, 0x1)	 /* Width mask	     */
+#define SDRAM_MCOPT1_WDTH_32	    PPC_REG_VAL(7, 0x0)	 /* 32 bits	     */
+#define SDRAM_MCOPT1_WDTH_16	    PPC_REG_VAL(7, 0x1)	 /* 16 bits	     */
+#define SDRAM_MCOPT1_DDR_TYPE_MASK  PPC_REG_VAL(11, 0x1) /* DDR type mask    */
+#define SDRAM_MCOPT1_DDR1_TYPE	    PPC_REG_VAL(11, 0x0) /* DDR1 type	     */
+#define SDRAM_MCOPT1_DDR2_TYPE	    PPC_REG_VAL(11, 0x1) /* DDR2 type	     */
+
+/*
+ * Memory Bank 0 - n Configuration Register
+ */
+#define SDRAM_MBCF_BA_MASK		PPC_REG_VAL(12, 0x1FFF)
+#define SDRAM_MBCF_SZ_MASK		PPC_REG_VAL(19, 0xF)
+#define SDRAM_MBCF_SZ_DECODE(mbxcf)	PPC_REG_DECODE(19, mbxcf)
+#define SDRAM_MBCF_SZ_4MB		PPC_REG_VAL(19, 0x0)
+#define SDRAM_MBCF_SZ_8MB		PPC_REG_VAL(19, 0x1)
+#define SDRAM_MBCF_SZ_16MB		PPC_REG_VAL(19, 0x2)
+#define SDRAM_MBCF_SZ_32MB		PPC_REG_VAL(19, 0x3)
+#define SDRAM_MBCF_SZ_64MB		PPC_REG_VAL(19, 0x4)
+#define SDRAM_MBCF_SZ_128MB		PPC_REG_VAL(19, 0x5)
+#define SDRAM_MBCF_SZ_256MB		PPC_REG_VAL(19, 0x6)
+#define SDRAM_MBCF_SZ_512MB		PPC_REG_VAL(19, 0x7)
+#define SDRAM_MBCF_SZ_1GB		PPC_REG_VAL(19, 0x8)
+#define SDRAM_MBCF_SZ_2GB		PPC_REG_VAL(19, 0x9)
+#define SDRAM_MBCF_SZ_4GB		PPC_REG_VAL(19, 0xA)
+#define SDRAM_MBCF_SZ_8GB		PPC_REG_VAL(19, 0xB)
+#define SDRAM_MBCF_AM_MASK		PPC_REG_VAL(23, 0xF)
+#define SDRAM_MBCF_AM_MODE0		PPC_REG_VAL(23, 0x0)
+#define SDRAM_MBCF_AM_MODE1		PPC_REG_VAL(23, 0x1)
+#define SDRAM_MBCF_AM_MODE2		PPC_REG_VAL(23, 0x2)
+#define SDRAM_MBCF_AM_MODE3		PPC_REG_VAL(23, 0x3)
+#define SDRAM_MBCF_AM_MODE4		PPC_REG_VAL(23, 0x4)
+#define SDRAM_MBCF_AM_MODE5		PPC_REG_VAL(23, 0x5)
+#define SDRAM_MBCF_AM_MODE6		PPC_REG_VAL(23, 0x6)
+#define SDRAM_MBCF_AM_MODE7		PPC_REG_VAL(23, 0x7)
+#define SDRAM_MBCF_AM_MODE8		PPC_REG_VAL(23, 0x8)
+#define SDRAM_MBCF_AM_MODE9		PPC_REG_VAL(23, 0x9)
+#define SDRAM_MBCF_BE_MASK		PPC_REG_VAL(31, 0x1)
+#define SDRAM_MBCF_BE_DISABLE		PPC_REG_VAL(31, 0x0)
+#define SDRAM_MBCF_BE_ENABLE		PPC_REG_VAL(31, 0x1)
+
+/*
+ * ECC Error Status
+ */
+#define SDRAM_ECCES_MASK		PPC_REG_VAL(21, 0x3FFFFF)
+#define SDRAM_ECCES_BNCE_MASK		PPC_REG_VAL(15, 0xFFFF)
+#define SDRAM_ECCES_BNCE_ENCODE(lane)	PPC_REG_VAL(((lane) & 0xF), 1)
+#define SDRAM_ECCES_CKBER_MASK		PPC_REG_VAL(17, 0x3)
+#define SDRAM_ECCES_CKBER_NONE		PPC_REG_VAL(17, 0)
+#define SDRAM_ECCES_CKBER_16_ECC_0_3	PPC_REG_VAL(17, 2)
+#define SDRAM_ECCES_CKBER_32_ECC_0_3	PPC_REG_VAL(17, 1)
+#define SDRAM_ECCES_CKBER_32_ECC_4_8	PPC_REG_VAL(17, 2)
+#define SDRAM_ECCES_CKBER_32_ECC_0_8	PPC_REG_VAL(17, 3)
+#define SDRAM_ECCES_CE			PPC_REG_VAL(18, 1)
+#define SDRAM_ECCES_UE			PPC_REG_VAL(19, 1)
+#define SDRAM_ECCES_BKNER_MASK		PPC_REG_VAL(21, 0x3)
+#define SDRAM_ECCES_BK0ER		PPC_REG_VAL(20, 1)
+#define SDRAM_ECCES_BK1ER		PPC_REG_VAL(21, 1)
+
+#endif /* __PPC4XX_EDAC_H */

+ 14 - 5
drivers/gpio/gpiolib.c

@@ -69,20 +69,24 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
  * those calls have no teeth) we can't avoid autorequesting.  This nag
  * message should motivate switching to explicit requests... so should
  * the weaker cleanup after faults, compared to gpio_request().
+ *
+ * NOTE: the autorequest mechanism is going away; at this point it's
+ * only "legal" in the sense that (old) code using it won't break yet,
+ * but instead only triggers a WARN() stack dump.
  */
 static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
 {
-	if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
-		struct gpio_chip *chip = desc->chip;
-		int gpio = chip->base + offset;
+	const struct gpio_chip *chip = desc->chip;
+	const int gpio = chip->base + offset;
 
+	if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0,
+			"autorequest GPIO-%d\n", gpio)) {
 		if (!try_module_get(chip->owner)) {
 			pr_err("GPIO-%d: module can't be gotten \n", gpio);
 			clear_bit(FLAG_REQUESTED, &desc->flags);
 			/* lose */
 			return -EIO;
 		}
-		pr_warning("GPIO-%d autorequested\n", gpio);
 		desc_set_label(desc, "[auto]");
 		/* caller must chip->request() w/o spinlock */
 		if (chip->request)
@@ -438,6 +442,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
 	unsigned long		flags;
 	struct gpio_desc	*desc;
 	int			status = -EINVAL;
+	char			*ioname = NULL;
 
 	/* can't export until sysfs is available ... */
 	if (!gpio_class.p) {
@@ -461,11 +466,14 @@ int gpio_export(unsigned gpio, bool direction_may_change)
 	}
 	spin_unlock_irqrestore(&gpio_lock, flags);
 
+	if (desc->chip->names && desc->chip->names[gpio - desc->chip->base])
+		ioname = desc->chip->names[gpio - desc->chip->base];
+
 	if (status == 0) {
 		struct device	*dev;
 
 		dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
-					desc, "gpio%d", gpio);
+				    desc, ioname ? ioname : "gpio%d", gpio);
 		if (dev) {
 			if (direction_may_change)
 				status = sysfs_create_group(&dev->kobj,
@@ -513,6 +521,7 @@ void gpio_unexport(unsigned gpio)
 	mutex_lock(&sysfs_lock);
 
 	desc = &gpio_desc[gpio];
+
 	if (test_bit(FLAG_EXPORT, &desc->flags)) {
 		struct device	*dev = NULL;
 

+ 29 - 2
drivers/gpu/drm/drm_crtc_helper.c

@@ -42,6 +42,26 @@ static struct drm_display_mode std_modes[] = {
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 };
 
+static void drm_mode_validate_flag(struct drm_connector *connector,
+				   int flags)
+{
+	struct drm_display_mode *mode, *t;
+
+	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+		return;
+
+	list_for_each_entry_safe(mode, t, &connector->modes, head) {
+		if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+				!(flags & DRM_MODE_FLAG_INTERLACE))
+			mode->status = MODE_NO_INTERLACE;
+		if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+				!(flags & DRM_MODE_FLAG_DBLSCAN))
+			mode->status = MODE_NO_DBLESCAN;
+	}
+
+	return;
+}
+
 /**
  * drm_helper_probe_connector_modes - get complete set of display modes
  * @dev: DRM device
@@ -72,6 +92,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 	struct drm_connector_helper_funcs *connector_funcs =
 		connector->helper_private;
 	int count = 0;
+	int mode_flags = 0;
 
 	DRM_DEBUG("%s\n", drm_get_connector_name(connector));
 	/* set all modes to the unverified state */
@@ -96,6 +117,13 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 	if (maxX && maxY)
 		drm_mode_validate_size(dev, &connector->modes, maxX,
 				       maxY, 0);
+
+	if (connector->interlace_allowed)
+		mode_flags |= DRM_MODE_FLAG_INTERLACE;
+	if (connector->doublescan_allowed)
+		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+	drm_mode_validate_flag(connector, mode_flags);
+
 	list_for_each_entry_safe(mode, t, &connector->modes, head) {
 		if (mode->status == MODE_OK)
 			mode->status = connector_funcs->mode_valid(connector,
@@ -885,7 +913,6 @@ bool drm_helper_plugged_event(struct drm_device *dev)
 /**
  * drm_initial_config - setup a sane initial connector configuration
  * @dev: DRM device
- * @can_grow: this configuration is growable
  *
  * LOCKING:
  * Called at init time, must take mode config lock.
@@ -897,7 +924,7 @@ bool drm_helper_plugged_event(struct drm_device *dev)
  * RETURNS:
  * Zero if everything went ok, nonzero otherwise.
  */
-bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
+bool drm_helper_initial_config(struct drm_device *dev)
 {
 	struct drm_connector *connector;
 	int count = 0;

+ 3 - 5
drivers/gpu/drm/drm_edid.c

@@ -125,10 +125,8 @@ static bool edid_is_valid(struct edid *edid)
 		DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
 		goto bad;
 	}
-	if (edid->revision > 3) {
-		DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision);
-		goto bad;
-	}
+	if (edid->revision > 4)
+		DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
 
 	for (i = 0; i < EDID_LENGTH; i++)
 		csum += raw_edid[i];
@@ -162,7 +160,7 @@ static bool edid_vendor(struct edid *edid, char *vendor)
 	edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
 	edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
 			  ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
-	edid_vendor[2] = (edid->mfg_id[2] & 0x1f) + '@';
+	edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
 
 	return !strncmp(edid_vendor, vendor, 3);
 }

+ 1 - 6
drivers/gpu/drm/drm_gem.c

@@ -505,7 +505,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	struct drm_local_map *map = NULL;
 	struct drm_gem_object *obj;
 	struct drm_hash_item *hash;
-	unsigned long prot;
 	int ret = 0;
 
 	mutex_lock(&dev->struct_mutex);
@@ -538,11 +537,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	vma->vm_ops = obj->dev->driver->gem_vm_ops;
 	vma->vm_private_data = map->handle;
 	/* FIXME: use pgprot_writecombine when available */
-	prot = pgprot_val(vma->vm_page_prot);
-#ifdef CONFIG_X86
-	prot |= _PAGE_CACHE_WC;
-#endif
-	vma->vm_page_prot = __pgprot(prot);
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
 	/* Take a ref for this mapping of the object, so that the fault
 	 * handler can dereference the mmap offset's pointer to the object.

+ 1 - 0
drivers/gpu/drm/drm_sysfs.c

@@ -451,6 +451,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
 
 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
 }
+EXPORT_SYMBOL(drm_sysfs_hotplug_event);
 
 /**
  * drm_sysfs_device_add - adds a class device to sysfs for a character driver

+ 2 - 9
drivers/gpu/drm/i915/i915_dma.c

@@ -922,7 +922,7 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
 	 * Some of the preallocated space is taken by the GTT
 	 * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
 	 */
-	if (IS_G4X(dev))
+	if (IS_G4X(dev) || IS_IGD(dev))
 		overhead = 4096;
 	else
 		overhead = (*aperture_size / 1024) + 4096;
@@ -1030,13 +1030,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	if (ret)
 		goto destroy_ringbuffer;
 
-	/* FIXME: re-add hotplug support */
-#if 0
-	ret = drm_hotplug_init(dev);
-	if (ret)
-		goto destroy_ringbuffer;
-#endif
-
 	/* Always safe in the mode setting case. */
 	/* FIXME: do pre/post-mode set stuff in core KMS code */
 	dev->vblank_disable_allowed = 1;
@@ -1049,7 +1042,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
 	intel_modeset_init(dev);
 
-	drm_helper_initial_config(dev, false);
+	drm_helper_initial_config(dev);
 
 	return 0;
 

Some files were not shown because too many files changed in this diff