Browse Source

Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git

David Woodhouse 20 năm trước cách đây
mục cha
commit
bfd4bda097
100 tập tin đã thay đổi với 7053 bổ sung546 xóa
  1. 36 4
      Documentation/aoe/aoe.txt
  2. 0 4
      Documentation/aoe/status.sh
  3. 1 0
      Documentation/pci.txt
  4. 2 33
      Documentation/power/pci.txt
  5. 4 0
      arch/alpha/Kconfig
  6. 4 0
      arch/arm/Kconfig
  7. 22 83
      arch/arm/boot/compressed/head.S
  8. 16 0
      arch/arm/mach-imx/generic.c
  9. 4 2
      arch/arm/mach-integrator/core.c
  10. 1 1
      arch/arm/mach-integrator/leds.c
  11. 0 10
      arch/arm/mach-ixp4xx/common-pci.c
  12. 1 0
      arch/arm/mm/Kconfig
  13. 4 0
      arch/arm26/Kconfig
  14. 4 0
      arch/i386/Kconfig
  15. 19 0
      arch/ia64/Kconfig
  16. 57 39
      arch/ia64/configs/tiger_defconfig
  17. 4 30
      arch/ia64/hp/common/sba_iommu.c
  18. 5 18
      arch/ia64/kernel/acpi.c
  19. 1 1
      arch/ia64/kernel/entry.S
  20. 2 2
      arch/ia64/kernel/mca_drv.c
  21. 15 3
      arch/ia64/kernel/mca_drv_asm.S
  22. 33 10
      arch/ia64/kernel/perfmon.c
  23. 36 19
      arch/ia64/kernel/process.c
  24. 1 1
      arch/ia64/kernel/signal.c
  25. 3 3
      arch/ia64/lib/flush.S
  26. 1 1
      arch/ia64/lib/memcpy_mck.S
  27. 1 1
      arch/ia64/lib/memset.S
  28. 6 1
      arch/ia64/sn/kernel/Makefile
  29. 6 4
      arch/ia64/sn/kernel/io_init.c
  30. 21 13
      arch/ia64/sn/kernel/mca.c
  31. 24 16
      arch/ia64/sn/kernel/setup.c
  32. 32 28
      arch/ia64/sn/kernel/tiocx.c
  33. 289 0
      arch/ia64/sn/kernel/xp_main.c
  34. 31 0
      arch/ia64/sn/kernel/xp_nofault.S
  35. 991 0
      arch/ia64/sn/kernel/xpc.h
  36. 2297 0
      arch/ia64/sn/kernel/xpc_channel.c
  37. 1064 0
      arch/ia64/sn/kernel/xpc_main.c
  38. 984 0
      arch/ia64/sn/kernel/xpc_partition.c
  39. 715 0
      arch/ia64/sn/kernel/xpnet.c
  40. 1 1
      arch/ia64/sn/pci/pcibr/pcibr_dma.c
  41. 1 1
      arch/ia64/sn/pci/tioca_provider.c
  42. 5 0
      arch/m68knommu/Kconfig
  43. 4 0
      arch/mips/Kconfig
  44. 4 0
      arch/parisc/Kconfig
  45. 4 0
      arch/ppc/Kconfig
  46. 3 0
      arch/ppc64/Kconfig
  47. 7 0
      arch/ppc64/Makefile
  48. 4 0
      arch/sh/Kconfig
  49. 14 14
      arch/sparc/prom/memory.c
  50. 1 1
      arch/sparc/prom/sun4prom.c
  51. 3 3
      arch/sparc64/kernel/irq.c
  52. 5 0
      arch/x86_64/Kconfig
  53. 2 3
      drivers/base/bus.c
  54. 1 1
      drivers/base/core.c
  55. 1 1
      drivers/block/Kconfig
  56. 1 1
      drivers/block/aoe/aoe.h
  57. 13 0
      drivers/block/aoe/aoeblk.c
  58. 4 7
      drivers/block/aoe/aoedev.c
  59. 16 1
      drivers/block/aoe/aoenet.c
  60. 3 3
      drivers/char/Kconfig
  61. 16 16
      drivers/char/ipmi/ipmi_si_intf.c
  62. 1 1
      drivers/char/ipmi/ipmi_si_sm.h
  63. 2 2
      drivers/char/mbcs.c
  64. 2 2
      drivers/char/mbcs.h
  65. 3 3
      drivers/char/sonypi.c
  66. 1 1
      drivers/mmc/Kconfig
  67. 5 5
      drivers/net/Kconfig
  68. 1 1
      drivers/net/appletalk/Kconfig
  69. 2 2
      drivers/net/hamradio/Kconfig
  70. 5 5
      drivers/net/irda/Kconfig
  71. 2 4
      drivers/net/ppp_deflate.c
  72. 4 8
      drivers/net/ppp_generic.c
  73. 3 3
      drivers/net/wan/Kconfig
  74. 2 6
      drivers/net/wan/cycx_x25.c
  75. 11 16
      drivers/net/wan/pc300_tty.c
  76. 4 9
      drivers/net/wan/sdla_chdlc.c
  77. 6 14
      drivers/net/wan/x25_asy.c
  78. 1 1
      drivers/parport/Kconfig
  79. 24 10
      drivers/parport/parport_pc.c
  80. 1 1
      drivers/pci/hotplug/ibmphp.h
  81. 3 3
      drivers/pci/hotplug/ibmphp_hpc.c
  82. 5 2
      drivers/pci/hotplug/ibmphp_pci.c
  83. 1 1
      drivers/pci/hotplug/pci_hotplug.h
  84. 18 5
      drivers/pci/hotplug/pciehp_core.c
  85. 1 1
      drivers/pci/hotplug/pcihp_skeleton.c
  86. 3 3
      drivers/pci/msi.c
  87. 1 1
      drivers/pci/pci-acpi.c
  88. 10 1
      drivers/pci/pci-driver.c
  89. 58 24
      drivers/pci/pci-sysfs.c
  90. 4 16
      drivers/pci/pci.c
  91. 1 0
      drivers/pci/probe.c
  92. 1 0
      drivers/pci/proc.c
  93. 2 0
      drivers/pci/quirks.c
  94. 6 6
      drivers/scsi/Kconfig
  95. 2 2
      drivers/usb/core/message.c
  96. 3 3
      drivers/usb/core/urb.c
  97. 1 1
      drivers/usb/gadget/ether.c
  98. 1 1
      drivers/usb/gadget/inode.c
  99. 1 1
      drivers/usb/gadget/lh7a40x_udc.c
  100. 1 1
      drivers/usb/gadget/serial.c

+ 36 - 4
Documentation/aoe/aoe.txt

@@ -4,6 +4,16 @@ The EtherDrive (R) HOWTO for users of 2.6 kernels is found at ...
 
 
   It has many tips and hints!
   It has many tips and hints!
 
 
+The aoetools are userland programs that are designed to work with this
+driver.  The aoetools are on sourceforge.
+
+  http://aoetools.sourceforge.net/
+
+The scripts in this Documentation/aoe directory are intended to
+document the use of the driver and are not necessary if you install
+the aoetools.
+
+
 CREATING DEVICE NODES
 CREATING DEVICE NODES
 
 
   Users of udev should find the block device nodes created
   Users of udev should find the block device nodes created
@@ -35,14 +45,15 @@ USING DEVICE NODES
 
 
   "echo eth2 eth4 > /dev/etherd/interfaces" tells the aoe driver to
   "echo eth2 eth4 > /dev/etherd/interfaces" tells the aoe driver to
   limit ATA over Ethernet traffic to eth2 and eth4.  AoE traffic from
   limit ATA over Ethernet traffic to eth2 and eth4.  AoE traffic from
-  untrusted networks should be ignored as a matter of security.
+  untrusted networks should be ignored as a matter of security.  See
+  also the aoe_iflist driver option described below.
 
 
   "echo > /dev/etherd/discover" tells the driver to find out what AoE
   "echo > /dev/etherd/discover" tells the driver to find out what AoE
   devices are available.
   devices are available.
 
 
   These character devices may disappear and be replaced by sysfs
   These character devices may disappear and be replaced by sysfs
-  counterparts, so distribution maintainers are encouraged to create
-  scripts that use these devices.
+  counterparts.  Using the commands in aoetools insulates users from
+  these implementation details.
 
 
   The block devices are named like this:
   The block devices are named like this:
 
 
@@ -66,7 +77,8 @@ USING SYSFS
   through which we are communicating with the remote AoE device.
   through which we are communicating with the remote AoE device.
 
 
   There is a script in this directory that formats this information
   There is a script in this directory that formats this information
-  in a convenient way.
+  in a convenient way.  Users with aoetools can use the aoe-stat
+  command.
 
 
   root@makki root# sh Documentation/aoe/status.sh 
   root@makki root# sh Documentation/aoe/status.sh 
      e10.0            eth3              up
      e10.0            eth3              up
@@ -89,3 +101,23 @@ USING SYSFS
       e4.7            eth1              up
       e4.7            eth1              up
       e4.8            eth1              up
       e4.8            eth1              up
       e4.9            eth1              up
       e4.9            eth1              up
+
+  Use /sys/module/aoe/parameters/aoe_iflist (or better, the driver
+  option discussed below) instead of /dev/etherd/interfaces to limit
+  AoE traffic to the network interfaces in the given
+  whitespace-separated list.  Unlike the old character device, the
+  sysfs entry can be read from as well as written to.
+
+  It's helpful to trigger discovery after setting the list of allowed
+  interfaces.  The aoetools package provides an aoe-discover script
+  for this purpose.  You can also directly use the
+  /dev/etherd/discover special file described above.
+
+DRIVER OPTIONS
+
+  There is a boot option for the built-in aoe driver and a
+  corresponding module parameter, aoe_iflist.  Without this option,
+  all network interfaces may be used for ATA over Ethernet.  Here is a
+  usage example for the module parameter.
+
+    modprobe aoe_iflist="eth1 eth3"

+ 0 - 4
Documentation/aoe/status.sh

@@ -14,10 +14,6 @@ test ! -d "$sysd/block" && {
 	echo "$me Error: sysfs is not mounted" 1>&2
 	echo "$me Error: sysfs is not mounted" 1>&2
 	exit 1
 	exit 1
 }
 }
-test -z "`lsmod | grep '^aoe'`" && {
-	echo  "$me Error: aoe module is not loaded" 1>&2
-	exit 1
-}
 
 
 for d in `ls -d $sysd/block/etherd* 2>/dev/null | grep -v p` end; do
 for d in `ls -d $sysd/block/etherd* 2>/dev/null | grep -v p` end; do
 	# maybe ls comes up empty, so we use "end"
 	# maybe ls comes up empty, so we use "end"

+ 1 - 0
Documentation/pci.txt

@@ -279,6 +279,7 @@ pci_for_each_dev_reverse()	Superseded by pci_find_device_reverse()
 pci_for_each_bus()		Superseded by pci_find_next_bus()
 pci_for_each_bus()		Superseded by pci_find_next_bus()
 pci_find_device()		Superseded by pci_get_device()
 pci_find_device()		Superseded by pci_get_device()
 pci_find_subsys()		Superseded by pci_get_subsys()
 pci_find_subsys()		Superseded by pci_get_subsys()
+pci_find_slot()			Superseded by pci_get_slot()
 pcibios_find_class()		Superseded by pci_get_class()
 pcibios_find_class()		Superseded by pci_get_class()
 pci_find_class()		Superseded by pci_get_class()
 pci_find_class()		Superseded by pci_get_class()
 pci_(read|write)_*_nodev()	Superseded by pci_bus_(read|write)_*()
 pci_(read|write)_*_nodev()	Superseded by pci_bus_(read|write)_*()

+ 2 - 33
Documentation/power/pci.txt

@@ -165,40 +165,9 @@ Description:
 These functions are intended for use by individual drivers, and are defined in 
 These functions are intended for use by individual drivers, and are defined in 
 struct pci_driver:
 struct pci_driver:
 
 
-        int  (*save_state) (struct pci_dev *dev, u32 state);
-        int  (*suspend) (struct pci_dev *dev, u32 state);
+        int  (*suspend) (struct pci_dev *dev, pm_message_t state);
         int  (*resume) (struct pci_dev *dev);
         int  (*resume) (struct pci_dev *dev);
-        int  (*enable_wake) (struct pci_dev *dev, u32 state, int enable);
-
-
-save_state
-----------
-
-Usage:
-
-if (dev->driver && dev->driver->save_state)
-	dev->driver->save_state(dev,state);
-
-The driver should use this callback to save device state. It should take into
-account the current state of the device and the requested state in order to
-avoid any unnecessary operations.
-
-For example, a video card that supports all 4 states (D0-D3), all controller
-context is preserved when entering D1, but the screen is placed into a low power
-state (blanked). 
-
-The driver can also interpret this function as a notification that it may be
-entering a sleep state in the near future. If it knows that the device cannot
-enter the requested state, either because of lack of support for it, or because
-the device is middle of some critical operation, then it should fail.
-
-This function should not be used to set any state in the device or the driver
-because the device may not actually enter the sleep state (e.g. another driver
-later causes causes a global state transition to fail).
-
-Note that in intermediate low power states, a device's I/O and memory spaces may
-be disabled and may not be available in subsequent transitions to lower power
-states.
+        int  (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable);
 
 
 
 
 suspend
 suspend

+ 4 - 0
arch/alpha/Kconfig

@@ -280,6 +280,10 @@ config ISA
 	  (MCA) or VESA.  ISA is an older system, now being displaced by PCI;
 	  (MCA) or VESA.  ISA is an older system, now being displaced by PCI;
 	  newer boards don't support it.  If you have ISA, say Y, otherwise N.
 	  newer boards don't support it.  If you have ISA, say Y, otherwise N.
 
 
+config ISA_DMA_API
+	bool
+	default y
+
 config PCI
 config PCI
 	bool
 	bool
 	depends on !ALPHA_JENSEN
 	depends on !ALPHA_JENSEN

+ 4 - 0
arch/arm/Kconfig

@@ -266,6 +266,10 @@ config ISA_DMA
 	depends on FOOTBRIDGE_HOST || ARCH_SHARK
 	depends on FOOTBRIDGE_HOST || ARCH_SHARK
 	default y
 	default y
 
 
+config ISA_DMA_API
+	bool
+	default y
+
 config PCI
 config PCI
 	bool "PCI support" if ARCH_INTEGRATOR_AP
 	bool "PCI support" if ARCH_INTEGRATOR_AP
 	default y if ARCH_SHARK || FOOTBRIDGE_HOST || ARCH_IOP3XX || ARCH_IXP4XX || ARCH_IXP2000
 	default y if ARCH_SHARK || FOOTBRIDGE_HOST || ARCH_IOP3XX || ARCH_IXP4XX || ARCH_IXP2000

+ 22 - 83
arch/arm/boot/compressed/head.S

@@ -18,48 +18,30 @@
  * Please select one of the following when turning on debugging.
  * Please select one of the following when turning on debugging.
  */
  */
 #ifdef DEBUG
 #ifdef DEBUG
-#if defined(CONFIG_DEBUG_DC21285_PORT)
-		.macro	loadsp, rb
-		mov	\rb, #0x42000000
-		.endm
-		.macro	writeb, rb
-		str	\rb, [r3, #0x160]
-		.endm
-#elif defined(CONFIG_DEBUG_ICEDCC)
+
+#include <asm/arch/debug-macro.S>
+
+#if defined(CONFIG_DEBUG_ICEDCC)
 		.macro	loadsp, rb
 		.macro	loadsp, rb
 		.endm
 		.endm
-		.macro writeb, rb
-		mcr	p14, 0, \rb, c0, c1, 0
-		.endm
-#elif defined(CONFIG_FOOTBRIDGE)
-		.macro	loadsp,	rb
-		mov	\rb, #0x7c000000
+		.macro writeb, ch, rb
+		mcr	p14, 0, \ch, c0, c1, 0
 		.endm
 		.endm
-		.macro	writeb,	rb
-		strb	\rb, [r3, #0x3f8]
+#else
+		.macro	writeb,	ch, rb
+		senduart \ch, \rb
 		.endm
 		.endm
-#elif defined(CONFIG_ARCH_RPC)
+
+#if defined(CONFIG_FOOTBRIDGE) || \
+    defined(CONFIG_ARCH_RPC) || \
+    defined(CONFIG_ARCH_INTEGRATOR) || \
+    defined(CONFIG_ARCH_PXA) || \
+    defined(CONFIG_ARCH_IXP4XX) || \
+    defined(CONFIG_ARCH_IXP2000) || \
+    defined(CONFIG_ARCH_LH7A40X) || \
+    defined(CONFIG_ARCH_OMAP)
 		.macro	loadsp,	rb
 		.macro	loadsp,	rb
-		mov	\rb, #0x03000000
-		orr	\rb, \rb, #0x00010000
-		.endm
-		.macro	writeb,	rb
-		strb	\rb, [r3, #0x3f8 << 2]
-		.endm
-#elif defined(CONFIG_ARCH_INTEGRATOR)
-		.macro	loadsp, rb
-		mov	\rb, #0x16000000
-		.endm
-		.macro	writeb, rb
-		strb	\rb, [r3, #0]
-		.endm
-#elif defined(CONFIG_ARCH_PXA) /* Xscale-type */
-		.macro 	loadsp, rb
-		mov	\rb, #0x40000000
-		orr	\rb, \rb, #0x00100000
-		.endm
-		.macro	writeb, rb
-		strb	\rb, [r3, #0]
+		addruart \rb
 		.endm
 		.endm
 #elif defined(CONFIG_ARCH_SA1100)
 #elif defined(CONFIG_ARCH_SA1100)
 		.macro	loadsp, rb
 		.macro	loadsp, rb
@@ -70,64 +52,21 @@
 		add	\rb, \rb, #0x00010000	@ Ser1
 		add	\rb, \rb, #0x00010000	@ Ser1
 #  endif
 #  endif
 		.endm
 		.endm
-		.macro	writeb, rb
-		str	\rb, [r3, #0x14]	@ UTDR
-		.endm
-#elif defined(CONFIG_ARCH_IXP4XX)
-		.macro	loadsp, rb
-		mov	\rb, #0xc8000000
-		.endm
-		.macro	writeb, rb
-		str	\rb, [r3, #0]
-#elif defined(CONFIG_ARCH_IXP2000)
-		.macro	loadsp, rb
-		mov	\rb, #0xc0000000
-		orr	\rb, \rb, #0x00030000
-		.endm
-		.macro	writeb, rb
-		str	\rb, [r3, #0]
-		.endm
-#elif defined(CONFIG_ARCH_LH7A40X)
-		.macro	loadsp, rb
-		ldr	\rb, =0x80000700	@ UART2 UARTBASE
-		.endm
-		.macro	writeb, rb
-		strb	\rb, [r3, #0]
-		.endm
-#elif defined(CONFIG_ARCH_OMAP)
-		.macro  loadsp, rb
-		mov	\rb, #0xff000000	@ physical base address
-		add	\rb, \rb, #0x00fb0000
-#if defined(CONFIG_OMAP_LL_DEBUG_UART2) || defined(CONFIG_OMAP_LL_DEBUG_UART3)
-		add	\rb, \rb, #0x00000800
-#endif
-#ifdef CONFIG_OMAP_LL_DEBUG_UART3
-		add	\rb, \rb, #0x00009000
-#endif
-		.endm
-		.macro	writeb, rb
-		strb	\rb, [r3]
-		.endm
 #elif defined(CONFIG_ARCH_IOP331)
 #elif defined(CONFIG_ARCH_IOP331)
 		.macro loadsp, rb
 		.macro loadsp, rb
                 mov   	\rb, #0xff000000
                 mov   	\rb, #0xff000000
                 orr     \rb, \rb, #0x00ff0000
                 orr     \rb, \rb, #0x00ff0000
                 orr     \rb, \rb, #0x0000f700   @ location of the UART
                 orr     \rb, \rb, #0x0000f700   @ location of the UART
 		.endm
 		.endm
-		.macro	writeb, rb
-		str	\rb, [r3, #0]
-		.endm
 #elif defined(CONFIG_ARCH_S3C2410)
 #elif defined(CONFIG_ARCH_S3C2410)
-			.macro loadsp, rb
+		.macro loadsp, rb
 		mov	\rb, #0x50000000
 		mov	\rb, #0x50000000
 		add	\rb, \rb, #0x4000 * CONFIG_S3C2410_LOWLEVEL_UART_PORT
 		add	\rb, \rb, #0x4000 * CONFIG_S3C2410_LOWLEVEL_UART_PORT
 		.endm
 		.endm
-		.macro	writeb, rb
-		strb	\rb, [r3, #0x20]
-		.endm
 #else
 #else
 #error no serial architecture defined
 #error no serial architecture defined
 #endif
 #endif
+#endif
 #endif
 #endif
 
 
 		.macro	kputc,val
 		.macro	kputc,val
@@ -734,7 +673,7 @@ puts:		loadsp	r3
 1:		ldrb	r2, [r0], #1
 1:		ldrb	r2, [r0], #1
 		teq	r2, #0
 		teq	r2, #0
 		moveq	pc, lr
 		moveq	pc, lr
-2:		writeb	r2
+2:		writeb	r2, r3
 		mov	r1, #0x00020000
 		mov	r1, #0x00020000
 3:		subs	r1, r1, #1
 3:		subs	r1, r1, #1
 		bne	3b
 		bne	3b

+ 16 - 0
arch/arm/mach-imx/generic.c

@@ -26,6 +26,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
+#include <asm/arch/imxfb.h>
 #include <asm/hardware.h>
 #include <asm/hardware.h>
 
 
 #include <asm/mach/map.h>
 #include <asm/mach/map.h>
@@ -228,6 +229,14 @@ static struct platform_device imx_uart2_device = {
 	.resource	= imx_uart2_resources,
 	.resource	= imx_uart2_resources,
 };
 };
 
 
+static struct imxfb_mach_info imx_fb_info;
+
+void __init set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info)
+{
+	memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imxfb_mach_info));
+}
+EXPORT_SYMBOL(set_imx_fb_info);
+
 static struct resource imxfb_resources[] = {
 static struct resource imxfb_resources[] = {
 	[0] = {
 	[0] = {
 		.start	= 0x00205000,
 		.start	= 0x00205000,
@@ -241,9 +250,16 @@ static struct resource imxfb_resources[] = {
 	},
 	},
 };
 };
 
 
+static u64 fb_dma_mask = ~(u64)0;
+
 static struct platform_device imxfb_device = {
 static struct platform_device imxfb_device = {
 	.name		= "imx-fb",
 	.name		= "imx-fb",
 	.id		= 0,
 	.id		= 0,
+	.dev		= {
+ 		.platform_data	= &imx_fb_info,
+		.dma_mask	= &fb_dma_mask,
+		.coherent_dma_mask = 0xffffffff,
+	},
 	.num_resources	= ARRAY_SIZE(imxfb_resources),
 	.num_resources	= ARRAY_SIZE(imxfb_resources),
 	.resource	= imxfb_resources,
 	.resource	= imxfb_resources,
 };
 };

+ 4 - 2
arch/arm/mach-integrator/core.c

@@ -216,7 +216,9 @@ integrator_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 
 
 	write_seqlock(&xtime_lock);
 	write_seqlock(&xtime_lock);
 
 
-	// ...clear the interrupt
+	/*
+	 * clear the interrupt
+	 */
 	timer1->TimerClear = 1;
 	timer1->TimerClear = 1;
 
 
 	timer_tick(regs);
 	timer_tick(regs);
@@ -264,7 +266,7 @@ void __init integrator_time_init(unsigned long reload, unsigned int ctrl)
 	timer1->TimerValue   = timer_reload;
 	timer1->TimerValue   = timer_reload;
 	timer1->TimerControl = timer_ctrl;
 	timer1->TimerControl = timer_ctrl;
 
 
-	/* 
+	/*
 	 * Make irqs happen for the system timer
 	 * Make irqs happen for the system timer
 	 */
 	 */
 	setup_irq(IRQ_TIMERINT1, &integrator_timer_irq);
 	setup_irq(IRQ_TIMERINT1, &integrator_timer_irq);

+ 1 - 1
arch/arm/mach-integrator/leds.c

@@ -37,7 +37,7 @@ static void integrator_leds_event(led_event_t ledevt)
 	unsigned long flags;
 	unsigned long flags;
 	const unsigned int dbg_base = IO_ADDRESS(INTEGRATOR_DBG_BASE);
 	const unsigned int dbg_base = IO_ADDRESS(INTEGRATOR_DBG_BASE);
 	unsigned int update_alpha_leds;
 	unsigned int update_alpha_leds;
-	
+
 	// yup, change the LEDs
 	// yup, change the LEDs
 	local_irq_save(flags);
 	local_irq_save(flags);
 	update_alpha_leds = 0;
 	update_alpha_leds = 0;

+ 0 - 10
arch/arm/mach-ixp4xx/common-pci.c

@@ -501,15 +501,6 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask)
 	return -EIO;
 	return -EIO;
 }
 }
     
     
-int
-pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
-	if (mask >= SZ_64M - 1 )
-		return 0;
-
-	return -EIO;
-}
-
 int
 int
 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
 {
 {
@@ -520,7 +511,6 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
 }
 }
 
 
 EXPORT_SYMBOL(pci_set_dma_mask);
 EXPORT_SYMBOL(pci_set_dma_mask);
-EXPORT_SYMBOL(pci_dac_set_dma_mask);
 EXPORT_SYMBOL(pci_set_consistent_dma_mask);
 EXPORT_SYMBOL(pci_set_consistent_dma_mask);
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_write);
 EXPORT_SYMBOL(ixp4xx_pci_write);

+ 1 - 0
arch/arm/mm/Kconfig

@@ -413,6 +413,7 @@ config CPU_BPREDICT_DISABLE
 config HAS_TLS_REG
 config HAS_TLS_REG
 	bool
 	bool
 	depends on CPU_32v6 && !CPU_32v5 && !CPU_32v4 && !CPU_32v3
 	depends on CPU_32v6 && !CPU_32v5 && !CPU_32v4 && !CPU_32v3
+	default y
 	help
 	help
 	  This selects support for the CP15 thread register.
 	  This selects support for the CP15 thread register.
 	  It is defined to be available on ARMv6 or later.  However
 	  It is defined to be available on ARMv6 or later.  However

+ 4 - 0
arch/arm26/Kconfig

@@ -89,6 +89,10 @@ config PAGESIZE_16
           machine with 4MB of memory.
           machine with 4MB of memory.
 endmenu
 endmenu
 
 
+config ISA_DMA_API
+	bool
+	default y
+
 menu "General setup"
 menu "General setup"
 
 
 # Compressed boot loader in ROM.  Yes, we really want to ask about
 # Compressed boot loader in ROM.  Yes, we really want to ask about

+ 4 - 0
arch/i386/Kconfig

@@ -1173,6 +1173,10 @@ source "drivers/pci/pcie/Kconfig"
 
 
 source "drivers/pci/Kconfig"
 source "drivers/pci/Kconfig"
 
 
+config ISA_DMA_API
+	bool
+	default y
+
 config ISA
 config ISA
 	bool "ISA support"
 	bool "ISA support"
 	depends on !(X86_VOYAGER || X86_VISWS)
 	depends on !(X86_VOYAGER || X86_VISWS)

+ 19 - 0
arch/ia64/Kconfig

@@ -217,6 +217,16 @@ config IA64_SGI_SN_SIM
 	  If you are compiling a kernel that will run under SGI's IA-64
 	  If you are compiling a kernel that will run under SGI's IA-64
 	  simulator (Medusa) then say Y, otherwise say N.
 	  simulator (Medusa) then say Y, otherwise say N.
 
 
+config IA64_SGI_SN_XP
+	tristate "Support communication between SGI SSIs"
+	depends on MSPEC
+	help
+	  An SGI machine can be divided into multiple Single System
+	  Images which act independently of each other and have
+	  hardware based memory protection from the others.  Enabling
+	  this feature will allow for direct communication between SSIs
+	  based on a network adapter and DMA messaging.
+
 config FORCE_MAX_ZONEORDER
 config FORCE_MAX_ZONEORDER
 	int
 	int
 	default "18"
 	default "18"
@@ -261,6 +271,15 @@ config HOTPLUG_CPU
 	  can be controlled through /sys/devices/system/cpu/cpu#.
 	  can be controlled through /sys/devices/system/cpu/cpu#.
 	  Say N if you want to disable CPU hotplug.
 	  Say N if you want to disable CPU hotplug.
 
 
+config SCHED_SMT
+	bool "SMT scheduler support"
+	depends on SMP
+	default off
+	help
+	  Improves the CPU scheduler's decision making when dealing with
+	  Intel IA64 chips with MultiThreading at a cost of slightly increased
+	  overhead in some places. If unsure say N here.
+
 config PREEMPT
 config PREEMPT
 	bool "Preemptible Kernel"
 	bool "Preemptible Kernel"
         help
         help

+ 57 - 39
arch/ia64/configs/tiger_defconfig

@@ -1,7 +1,7 @@
 #
 #
 # Automatically generated make config: don't edit
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.11-rc2
-# Sat Jan 22 11:17:02 2005
+# Linux kernel version: 2.6.12-rc3
+# Tue May  3 15:55:04 2005
 #
 #
 
 
 #
 #
@@ -10,6 +10,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_EXPERIMENTAL=y
 CONFIG_CLEAN_COMPILE=y
 CONFIG_CLEAN_COMPILE=y
 CONFIG_LOCK_KERNEL=y
 CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
 
 
 #
 #
 # General setup
 # General setup
@@ -21,24 +22,27 @@ CONFIG_POSIX_MQUEUE=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
 CONFIG_SYSCTL=y
 CONFIG_SYSCTL=y
 # CONFIG_AUDIT is not set
 # CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=20
 CONFIG_HOTPLUG=y
 CONFIG_HOTPLUG=y
 CONFIG_KOBJECT_UEVENT=y
 CONFIG_KOBJECT_UEVENT=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_IKCONFIG_PROC=y
+# CONFIG_CPUSETS is not set
 # CONFIG_EMBEDDED is not set
 # CONFIG_EMBEDDED is not set
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_FUTEX=y
 CONFIG_EPOLL=y
 CONFIG_EPOLL=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SHMEM=y
 CONFIG_SHMEM=y
 CONFIG_CC_ALIGN_FUNCTIONS=0
 CONFIG_CC_ALIGN_FUNCTIONS=0
 CONFIG_CC_ALIGN_LABELS=0
 CONFIG_CC_ALIGN_LABELS=0
 CONFIG_CC_ALIGN_LOOPS=0
 CONFIG_CC_ALIGN_LOOPS=0
 CONFIG_CC_ALIGN_JUMPS=0
 CONFIG_CC_ALIGN_JUMPS=0
 # CONFIG_TINY_SHMEM is not set
 # CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
 
 
 #
 #
 # Loadable module support
 # Loadable module support
@@ -85,6 +89,7 @@ CONFIG_FORCE_MAX_ZONEORDER=18
 CONFIG_SMP=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=4
 CONFIG_NR_CPUS=4
 CONFIG_HOTPLUG_CPU=y
 CONFIG_HOTPLUG_CPU=y
+# CONFIG_SCHED_SMT is not set
 # CONFIG_PREEMPT is not set
 # CONFIG_PREEMPT is not set
 CONFIG_HAVE_DEC_LOCK=y
 CONFIG_HAVE_DEC_LOCK=y
 CONFIG_IA32_SUPPORT=y
 CONFIG_IA32_SUPPORT=y
@@ -135,6 +140,7 @@ CONFIG_PCI_DOMAINS=y
 # CONFIG_PCI_MSI is not set
 # CONFIG_PCI_MSI is not set
 CONFIG_PCI_LEGACY_PROC=y
 CONFIG_PCI_LEGACY_PROC=y
 CONFIG_PCI_NAMES=y
 CONFIG_PCI_NAMES=y
+# CONFIG_PCI_DEBUG is not set
 
 
 #
 #
 # PCI Hotplug Support
 # PCI Hotplug Support
@@ -151,10 +157,6 @@ CONFIG_HOTPLUG_PCI_ACPI=m
 #
 #
 # CONFIG_PCCARD is not set
 # CONFIG_PCCARD is not set
 
 
-#
-# PC-card bridges
-#
-
 #
 #
 # Device Drivers
 # Device Drivers
 #
 #
@@ -195,9 +197,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_NBD=m
 # CONFIG_BLK_DEV_SX8 is not set
 # CONFIG_BLK_DEV_SX8 is not set
 # CONFIG_BLK_DEV_UB is not set
 # CONFIG_BLK_DEV_UB is not set
-CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
 CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_CDROM_PKTCDVD is not set
 
 
@@ -313,7 +316,6 @@ CONFIG_SCSI_FC_ATTRS=y
 # CONFIG_SCSI_BUSLOGIC is not set
 # CONFIG_SCSI_BUSLOGIC is not set
 # CONFIG_SCSI_DMX3191D is not set
 # CONFIG_SCSI_DMX3191D is not set
 # CONFIG_SCSI_EATA is not set
 # CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
 # CONFIG_SCSI_FUTURE_DOMAIN is not set
 # CONFIG_SCSI_FUTURE_DOMAIN is not set
 # CONFIG_SCSI_GDTH is not set
 # CONFIG_SCSI_GDTH is not set
 # CONFIG_SCSI_IPS is not set
 # CONFIG_SCSI_IPS is not set
@@ -325,7 +327,6 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
 # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
 # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
 # CONFIG_SCSI_IPR is not set
 # CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
 CONFIG_SCSI_QLOGIC_FC=y
 CONFIG_SCSI_QLOGIC_FC=y
 # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
 # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
 CONFIG_SCSI_QLOGIC_1280=y
 CONFIG_SCSI_QLOGIC_1280=y
@@ -336,6 +337,7 @@ CONFIG_SCSI_QLA22XX=m
 CONFIG_SCSI_QLA2300=m
 CONFIG_SCSI_QLA2300=m
 CONFIG_SCSI_QLA2322=m
 CONFIG_SCSI_QLA2322=m
 # CONFIG_SCSI_QLA6312 is not set
 # CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_LPFC is not set
 # CONFIG_SCSI_DC395x is not set
 # CONFIG_SCSI_DC395x is not set
 # CONFIG_SCSI_DC390T is not set
 # CONFIG_SCSI_DC390T is not set
 # CONFIG_SCSI_DEBUG is not set
 # CONFIG_SCSI_DEBUG is not set
@@ -358,6 +360,7 @@ CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_ZERO=m
+# CONFIG_DM_MULTIPATH is not set
 
 
 #
 #
 # Fusion MPT device support
 # Fusion MPT device support
@@ -386,7 +389,6 @@ CONFIG_NET=y
 #
 #
 CONFIG_PACKET=y
 CONFIG_PACKET=y
 # CONFIG_PACKET_MMAP is not set
 # CONFIG_PACKET_MMAP is not set
-CONFIG_NETLINK_DEV=y
 CONFIG_UNIX=y
 CONFIG_UNIX=y
 # CONFIG_NET_KEY is not set
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
 CONFIG_INET=y
@@ -446,7 +448,6 @@ CONFIG_DUMMY=m
 # CONFIG_BONDING is not set
 # CONFIG_BONDING is not set
 # CONFIG_EQUALIZER is not set
 # CONFIG_EQUALIZER is not set
 # CONFIG_TUN is not set
 # CONFIG_TUN is not set
-# CONFIG_ETHERTAP is not set
 
 
 #
 #
 # ARCnet devices
 # ARCnet devices
@@ -484,7 +485,6 @@ CONFIG_NET_PCI=y
 # CONFIG_DGRS is not set
 # CONFIG_DGRS is not set
 CONFIG_EEPRO100=m
 CONFIG_EEPRO100=m
 CONFIG_E100=m
 CONFIG_E100=m
-# CONFIG_E100_NAPI is not set
 # CONFIG_FEALNX is not set
 # CONFIG_FEALNX is not set
 # CONFIG_NATSEMI is not set
 # CONFIG_NATSEMI is not set
 # CONFIG_NE2K_PCI is not set
 # CONFIG_NE2K_PCI is not set
@@ -565,25 +565,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
 # CONFIG_INPUT_EVDEV is not set
 # CONFIG_INPUT_EVDEV is not set
 # CONFIG_INPUT_EVBUG is not set
 # CONFIG_INPUT_EVBUG is not set
 
 
-#
-# Input I/O drivers
-#
-CONFIG_GAMEPORT=m
-CONFIG_SOUND_GAMEPORT=m
-# CONFIG_GAMEPORT_NS558 is not set
-# CONFIG_GAMEPORT_L4 is not set
-# CONFIG_GAMEPORT_EMU10K1 is not set
-# CONFIG_GAMEPORT_VORTEX is not set
-# CONFIG_GAMEPORT_FM801 is not set
-# CONFIG_GAMEPORT_CS461X is not set
-CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PCIPS2 is not set
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_RAW is not set
-
 #
 #
 # Input Device Drivers
 # Input Device Drivers
 #
 #
@@ -601,6 +582,24 @@ CONFIG_MOUSE_PS2=y
 # CONFIG_INPUT_TOUCHSCREEN is not set
 # CONFIG_INPUT_TOUCHSCREEN is not set
 # CONFIG_INPUT_MISC is not set
 # CONFIG_INPUT_MISC is not set
 
 
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+CONFIG_GAMEPORT=m
+# CONFIG_GAMEPORT_NS558 is not set
+# CONFIG_GAMEPORT_L4 is not set
+# CONFIG_GAMEPORT_EMU10K1 is not set
+# CONFIG_GAMEPORT_VORTEX is not set
+# CONFIG_GAMEPORT_FM801 is not set
+# CONFIG_GAMEPORT_CS461X is not set
+CONFIG_SOUND_GAMEPORT=m
+
 #
 #
 # Character devices
 # Character devices
 #
 #
@@ -615,6 +614,8 @@ CONFIG_SERIAL_NONSTANDARD=y
 # CONFIG_SYNCLINK is not set
 # CONFIG_SYNCLINK is not set
 # CONFIG_SYNCLINKMP is not set
 # CONFIG_SYNCLINKMP is not set
 # CONFIG_N_HDLC is not set
 # CONFIG_N_HDLC is not set
+# CONFIG_SPECIALIX is not set
+# CONFIG_SX is not set
 # CONFIG_STALDRV is not set
 # CONFIG_STALDRV is not set
 
 
 #
 #
@@ -635,6 +636,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
 #
 #
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
 CONFIG_UNIX98_PTYS=y
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
 CONFIG_LEGACY_PTY_COUNT=256
@@ -670,6 +672,12 @@ CONFIG_HPET=y
 # CONFIG_HPET_RTC_IRQ is not set
 # CONFIG_HPET_RTC_IRQ is not set
 CONFIG_HPET_MMAP=y
 CONFIG_HPET_MMAP=y
 CONFIG_MAX_RAW_DEVS=256
 CONFIG_MAX_RAW_DEVS=256
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
 
 
 #
 #
 # I2C support
 # I2C support
@@ -705,7 +713,6 @@ CONFIG_MAX_RAW_DEVS=256
 #
 #
 CONFIG_VGA_CONSOLE=y
 CONFIG_VGA_CONSOLE=y
 CONFIG_DUMMY_CONSOLE=y
 CONFIG_DUMMY_CONSOLE=y
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 
 #
 #
 # Sound
 # Sound
@@ -715,6 +722,8 @@ CONFIG_DUMMY_CONSOLE=y
 #
 #
 # USB support
 # USB support
 #
 #
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
 CONFIG_USB=y
 CONFIG_USB=y
 # CONFIG_USB_DEBUG is not set
 # CONFIG_USB_DEBUG is not set
 
 
@@ -726,8 +735,6 @@ CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_DYNAMIC_MINORS is not set
 # CONFIG_USB_DYNAMIC_MINORS is not set
 # CONFIG_USB_SUSPEND is not set
 # CONFIG_USB_SUSPEND is not set
 # CONFIG_USB_OTG is not set
 # CONFIG_USB_OTG is not set
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB_ARCH_HAS_OHCI=y
 
 
 #
 #
 # USB Host Controller Drivers
 # USB Host Controller Drivers
@@ -736,6 +743,8 @@ CONFIG_USB_EHCI_HCD=m
 # CONFIG_USB_EHCI_SPLIT_ISO is not set
 # CONFIG_USB_EHCI_SPLIT_ISO is not set
 # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
 # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
 CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
 CONFIG_USB_UHCI_HCD=y
 CONFIG_USB_UHCI_HCD=y
 # CONFIG_USB_SL811_HCD is not set
 # CONFIG_USB_SL811_HCD is not set
 
 
@@ -751,12 +760,11 @@ CONFIG_USB_UHCI_HCD=y
 #
 #
 CONFIG_USB_STORAGE=m
 CONFIG_USB_STORAGE=m
 # CONFIG_USB_STORAGE_DEBUG is not set
 # CONFIG_USB_STORAGE_DEBUG is not set
-# CONFIG_USB_STORAGE_RW_DETECT is not set
 # CONFIG_USB_STORAGE_DATAFAB is not set
 # CONFIG_USB_STORAGE_DATAFAB is not set
 # CONFIG_USB_STORAGE_FREECOM is not set
 # CONFIG_USB_STORAGE_FREECOM is not set
 # CONFIG_USB_STORAGE_ISD200 is not set
 # CONFIG_USB_STORAGE_ISD200 is not set
 # CONFIG_USB_STORAGE_DPCM is not set
 # CONFIG_USB_STORAGE_DPCM is not set
-# CONFIG_USB_STORAGE_HP8200e is not set
+# CONFIG_USB_STORAGE_USBAT is not set
 # CONFIG_USB_STORAGE_SDDR09 is not set
 # CONFIG_USB_STORAGE_SDDR09 is not set
 # CONFIG_USB_STORAGE_SDDR55 is not set
 # CONFIG_USB_STORAGE_SDDR55 is not set
 # CONFIG_USB_STORAGE_JUMPSHOT is not set
 # CONFIG_USB_STORAGE_JUMPSHOT is not set
@@ -800,6 +808,7 @@ CONFIG_USB_HIDINPUT=y
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_RTL8150 is not set
 # CONFIG_USB_RTL8150 is not set
 # CONFIG_USB_USBNET is not set
 # CONFIG_USB_USBNET is not set
+# CONFIG_USB_MON is not set
 
 
 #
 #
 # USB port drivers
 # USB port drivers
@@ -824,6 +833,7 @@ CONFIG_USB_HIDINPUT=y
 # CONFIG_USB_PHIDGETKIT is not set
 # CONFIG_USB_PHIDGETKIT is not set
 # CONFIG_USB_PHIDGETSERVO is not set
 # CONFIG_USB_PHIDGETSERVO is not set
 # CONFIG_USB_IDMOUSE is not set
 # CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_SISUSBVGA is not set
 # CONFIG_USB_TEST is not set
 # CONFIG_USB_TEST is not set
 
 
 #
 #
@@ -867,7 +877,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_REISERFS_FS_SECURITY=y
 # CONFIG_JFS_FS is not set
 # CONFIG_JFS_FS is not set
 CONFIG_FS_POSIX_ACL=y
 CONFIG_FS_POSIX_ACL=y
+
+#
+# XFS support
+#
 CONFIG_XFS_FS=y
 CONFIG_XFS_FS=y
+CONFIG_XFS_EXPORT=y
 # CONFIG_XFS_RT is not set
 # CONFIG_XFS_RT is not set
 # CONFIG_XFS_QUOTA is not set
 # CONFIG_XFS_QUOTA is not set
 # CONFIG_XFS_SECURITY is not set
 # CONFIG_XFS_SECURITY is not set
@@ -945,7 +960,7 @@ CONFIG_NFSD_V4=y
 CONFIG_NFSD_TCP=y
 CONFIG_NFSD_TCP=y
 CONFIG_LOCKD=m
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
+CONFIG_EXPORTFS=y
 CONFIG_SUNRPC=m
 CONFIG_SUNRPC=m
 CONFIG_SUNRPC_GSS=m
 CONFIG_SUNRPC_GSS=m
 CONFIG_RPCSEC_GSS_KRB5=m
 CONFIG_RPCSEC_GSS_KRB5=m
@@ -1042,8 +1057,10 @@ CONFIG_GENERIC_IRQ_PROBE=y
 #
 #
 # Kernel hacking
 # Kernel hacking
 #
 #
+# CONFIG_PRINTK_TIME is not set
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOG_BUF_SHIFT=20
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_DEBUG_SLAB is not set
 # CONFIG_DEBUG_SLAB is not set
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_SPINLOCK is not set
@@ -1077,6 +1094,7 @@ CONFIG_CRYPTO_MD5=m
 # CONFIG_CRYPTO_SHA256 is not set
 # CONFIG_CRYPTO_SHA256 is not set
 # CONFIG_CRYPTO_SHA512 is not set
 # CONFIG_CRYPTO_SHA512 is not set
 # CONFIG_CRYPTO_WP512 is not set
 # CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_DES=m
 # CONFIG_CRYPTO_BLOWFISH is not set
 # CONFIG_CRYPTO_BLOWFISH is not set
 # CONFIG_CRYPTO_TWOFISH is not set
 # CONFIG_CRYPTO_TWOFISH is not set

+ 4 - 30
arch/ia64/hp/common/sba_iommu.c

@@ -1944,43 +1944,17 @@ sba_connect_bus(struct pci_bus *bus)
 static void __init
 static void __init
 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
 {
 {
-	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
-	union acpi_object *obj;
-	acpi_handle phandle;
 	unsigned int node;
 	unsigned int node;
+	int pxm;
 
 
 	ioc->node = MAX_NUMNODES;
 	ioc->node = MAX_NUMNODES;
 
 
-	/*
-	 * Check for a _PXM on this node first.  We don't typically see
-	 * one here, so we'll end up getting it from the parent.
-	 */
-	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) {
-		if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
-			return;
-
-		/* Reset the acpi buffer */
-		buffer.length = ACPI_ALLOCATE_BUFFER;
-		buffer.pointer = NULL;
-
-		if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL,
-		                                      &buffer)))
-			return;
-	}
+	pxm = acpi_get_pxm(handle);
 
 
-	if (!buffer.length || !buffer.pointer)
+	if (pxm < 0)
 		return;
 		return;
 
 
-	obj = buffer.pointer;
-
-	if (obj->type != ACPI_TYPE_INTEGER ||
-	    obj->integer.value >= MAX_PXM_DOMAINS) {
-		acpi_os_free(buffer.pointer);
-		return;
-	}
-
-	node = pxm_to_nid_map[obj->integer.value];
-	acpi_os_free(buffer.pointer);
+	node = pxm_to_nid_map[pxm];
 
 
 	if (node >= MAX_NUMNODES || !node_online(node))
 	if (node >= MAX_NUMNODES || !node_online(node))
 		return;
 		return;

+ 5 - 18
arch/ia64/kernel/acpi.c

@@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
 	union acpi_object *obj;
 	union acpi_object *obj;
 	struct acpi_table_iosapic *iosapic;
 	struct acpi_table_iosapic *iosapic;
 	unsigned int gsi_base;
 	unsigned int gsi_base;
-	int node;
+	int pxm, node;
 
 
 	/* Only care about objects w/ a method that returns the MADT */
 	/* Only care about objects w/ a method that returns the MADT */
 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
@@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
 	gsi_base = iosapic->global_irq_base;
 	gsi_base = iosapic->global_irq_base;
 
 
 	acpi_os_free(buffer.pointer);
 	acpi_os_free(buffer.pointer);
-	buffer.length = ACPI_ALLOCATE_BUFFER;
-	buffer.pointer = NULL;
 
 
 	/*
 	/*
-	 * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell
+	 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
 	 * us which node to associate this with.
 	 * us which node to associate this with.
 	 */
 	 */
-	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer)))
-		return AE_OK;
-
-	if (!buffer.length || !buffer.pointer)
-		return AE_OK;
-
-	obj = buffer.pointer;
-
-	if (obj->type != ACPI_TYPE_INTEGER ||
-	    obj->integer.value >= MAX_PXM_DOMAINS) {
-		acpi_os_free(buffer.pointer);
+	pxm = acpi_get_pxm(handle);
+	if (pxm < 0)
 		return AE_OK;
 		return AE_OK;
-	}
 
 
-	node = pxm_to_nid_map[obj->integer.value];
-	acpi_os_free(buffer.pointer);
+	node = pxm_to_nid_map[pxm];
 
 
 	if (node >= MAX_NUMNODES || !node_online(node) ||
 	if (node >= MAX_NUMNODES || !node_online(node) ||
 	    cpus_empty(node_to_cpumask(node)))
 	    cpus_empty(node_to_cpumask(node)))

+ 1 - 1
arch/ia64/kernel/entry.S

@@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
 	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
 	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
 	.mem.offset 8,0
 	.mem.offset 8,0
 	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
 	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
-END(ia64_ret_from_ia32_execve_syscall)
+END(ia64_ret_from_ia32_execve)
 	// fall through
 	// fall through
 #endif /* CONFIG_IA32_SUPPORT */
 #endif /* CONFIG_IA32_SUPPORT */
 GLOBAL_ENTRY(ia64_leave_kernel)
 GLOBAL_ENTRY(ia64_leave_kernel)

+ 2 - 2
arch/ia64/kernel/mca_drv.c

@@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr)
 	spin_unlock(&mca_bh_lock);
 	spin_unlock(&mca_bh_lock);
 
 
 	/* This process is about to be killed itself */
 	/* This process is about to be killed itself */
-	force_sig(SIGKILL, current);
-	schedule();
+	do_exit(SIGKILL);
 }
 }
 
 
 /**
 /**
@@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
 			psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
 			psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
 			psr2->cpl = 0;
 			psr2->cpl = 0;
 			psr2->ri  = 0;
 			psr2->ri  = 0;
+			psr2->i  = 0;
 
 
 			return 1;
 			return 1;
 		}
 		}

+ 15 - 3
arch/ia64/kernel/mca_drv_asm.S

@@ -10,6 +10,7 @@
 
 
 #include <asm/asmmacro.h>
 #include <asm/asmmacro.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
+#include <asm/ptrace.h>
 
 
 GLOBAL_ENTRY(mca_handler_bhhook)
 GLOBAL_ENTRY(mca_handler_bhhook)
 	invala						// clear RSE ?
 	invala						// clear RSE ?
@@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook)
 	;;						
 	;;						
 	alloc		r16=ar.pfs,0,2,1,0		// make a new frame
 	alloc		r16=ar.pfs,0,2,1,0		// make a new frame
 	;;
 	;;
+	mov		ar.rsc=0
+	;;
 	mov		r13=IA64_KR(CURRENT)		// current task pointer
 	mov		r13=IA64_KR(CURRENT)		// current task pointer
 	;;
 	;;
-	adds		r12=IA64_TASK_THREAD_KSP_OFFSET,r13
+	mov		r2=r13
+	;;
+	addl		r22=IA64_RBS_OFFSET,r2
+	;;
+	mov		ar.bspstore=r22
 	;;
 	;;
-	ld8		r12=[r12]			// stack pointer
+	addl		sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
 	;;
 	;;
+	adds		r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
+	;;
+	st1		[r2]=r0				// clear current->thread.on_ustack flag
 	mov		loc0=r16
 	mov		loc0=r16
 	movl		loc1=mca_handler_bh		// recovery C function
 	movl		loc1=mca_handler_bh		// recovery C function
 	;;
 	;;
@@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook)
 	;;
 	;;
 	mov		loc1=rp
 	mov		loc1=rp
 	;;
 	;;
-	br.call.sptk.many    rp=b6			// not return ...
+	ssm		psr.i
+	;;
+	br.call.sptk.many    rp=b6			// does not return ...
 	;;
 	;;
 	mov		ar.pfs=loc0
 	mov		ar.pfs=loc0
 	mov 		rp=loc1
 	mov 		rp=loc1

+ 33 - 10
arch/ia64/kernel/perfmon.c

@@ -1265,6 +1265,8 @@ out:
 }
 }
 EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
 EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
 
 
+extern void update_pal_halt_status(int);
+
 static int
 static int
 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
 {
 {
@@ -1311,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
 		is_syswide,
 		is_syswide,
 		cpu));
 		cpu));
 
 
+	/*
+	 * disable default_idle() to go to PAL_HALT
+	 */
+	update_pal_halt_status(0);
+
 	UNLOCK_PFS(flags);
 	UNLOCK_PFS(flags);
 
 
 	return 0;
 	return 0;
@@ -1366,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
 		is_syswide,
 		is_syswide,
 		cpu));
 		cpu));
 
 
+	/*
+	 * if possible, enable default_idle() to go into PAL_HALT
+	 */
+	if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
+		update_pal_halt_status(1);
+
 	UNLOCK_PFS(flags);
 	UNLOCK_PFS(flags);
 
 
 	return 0;
 	return 0;
@@ -4202,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
 		DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
 		DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
 			req->load_pid,
 			req->load_pid,
 			ctx->ctx_state));
 			ctx->ctx_state));
-		return -EINVAL;
+		return -EBUSY;
 	}
 	}
 
 
 	DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
 	DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
@@ -4704,16 +4717,26 @@ recheck:
 	if (task == current || ctx->ctx_fl_system) return 0;
 	if (task == current || ctx->ctx_fl_system) return 0;
 
 
 	/*
 	/*
-	 * if context is UNLOADED we are safe to go
-	 */
-	if (state == PFM_CTX_UNLOADED) return 0;
-
-	/*
-	 * no command can operate on a zombie context
+	 * we are monitoring another thread
 	 */
 	 */
-	if (state == PFM_CTX_ZOMBIE) {
-		DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
-		return -EINVAL;
+	switch(state) {
+		case PFM_CTX_UNLOADED:
+			/*
+			 * if context is UNLOADED we are safe to go
+			 */
+			return 0;
+		case PFM_CTX_ZOMBIE:
+			/*
+			 * no command can operate on a zombie context
+			 */
+			DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
+			return -EINVAL;
+		case PFM_CTX_MASKED:
+			/*
+			 * PMU state has been saved to software even though
+			 * the thread may still be running.
+			 */
+			if (cmd != PFM_UNLOAD_CONTEXT) return 0;
 	}
 	}
 
 
 	/*
 	/*

+ 36 - 19
arch/ia64/kernel/process.c

@@ -50,7 +50,7 @@
 #include "sigframe.h"
 #include "sigframe.h"
 
 
 void (*ia64_mark_idle)(int);
 void (*ia64_mark_idle)(int);
-static cpumask_t cpu_idle_map;
+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
 
 
 unsigned long boot_option_idle_override = 0;
 unsigned long boot_option_idle_override = 0;
 EXPORT_SYMBOL(boot_option_idle_override);
 EXPORT_SYMBOL(boot_option_idle_override);
@@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
 		ia64_do_signal(oldset, scr, in_syscall);
 		ia64_do_signal(oldset, scr, in_syscall);
 }
 }
 
 
-static int pal_halt = 1;
+static int pal_halt        = 1;
+static int can_do_pal_halt = 1;
+
 static int __init nohalt_setup(char * str)
 static int __init nohalt_setup(char * str)
 {
 {
 	pal_halt = 0;
 	pal_halt = 0;
@@ -181,16 +183,20 @@ static int __init nohalt_setup(char * str)
 }
 }
 __setup("nohalt", nohalt_setup);
 __setup("nohalt", nohalt_setup);
 
 
+void
+update_pal_halt_status(int status)
+{
+	can_do_pal_halt = pal_halt && status;
+}
+
 /*
 /*
  * We use this if we don't have any better idle routine..
  * We use this if we don't have any better idle routine..
  */
  */
 void
 void
 default_idle (void)
 default_idle (void)
 {
 {
-	unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
-
 	while (!need_resched())
 	while (!need_resched())
-		if (pal_halt && !pmu_active)
+		if (can_do_pal_halt)
 			safe_halt();
 			safe_halt();
 		else
 		else
 			cpu_relax();
 			cpu_relax();
@@ -223,20 +229,31 @@ static inline void play_dead(void)
 }
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 #endif /* CONFIG_HOTPLUG_CPU */
 
 
-
 void cpu_idle_wait(void)
 void cpu_idle_wait(void)
 {
 {
-        int cpu;
-        cpumask_t map;
+	unsigned int cpu, this_cpu = get_cpu();
+	cpumask_t map;
 
 
-        for_each_online_cpu(cpu)
-                cpu_set(cpu, cpu_idle_map);
+	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
+	put_cpu();
 
 
-        wmb();
-        do {
-                ssleep(1);
-                cpus_and(map, cpu_idle_map, cpu_online_map);
-        } while (!cpus_empty(map));
+	cpus_clear(map);
+	for_each_online_cpu(cpu) {
+		per_cpu(cpu_idle_state, cpu) = 1;
+		cpu_set(cpu, map);
+	}
+
+	__get_cpu_var(cpu_idle_state) = 0;
+
+	wmb();
+	do {
+		ssleep(1);
+		for_each_online_cpu(cpu) {
+			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
+				cpu_clear(cpu, map);
+		}
+		cpus_and(map, map, cpu_online_map);
+	} while (!cpus_empty(map));
 }
 }
 EXPORT_SYMBOL_GPL(cpu_idle_wait);
 EXPORT_SYMBOL_GPL(cpu_idle_wait);
 
 
@@ -244,7 +261,6 @@ void __attribute__((noreturn))
 cpu_idle (void)
 cpu_idle (void)
 {
 {
 	void (*mark_idle)(int) = ia64_mark_idle;
 	void (*mark_idle)(int) = ia64_mark_idle;
-	int cpu = smp_processor_id();
 
 
 	/* endless idle loop with no priority at all */
 	/* endless idle loop with no priority at all */
 	while (1) {
 	while (1) {
@@ -255,12 +271,13 @@ cpu_idle (void)
 		while (!need_resched()) {
 		while (!need_resched()) {
 			void (*idle)(void);
 			void (*idle)(void);
 
 
+			if (__get_cpu_var(cpu_idle_state))
+				__get_cpu_var(cpu_idle_state) = 0;
+
+			rmb();
 			if (mark_idle)
 			if (mark_idle)
 				(*mark_idle)(1);
 				(*mark_idle)(1);
 
 
-			if (cpu_isset(cpu, cpu_idle_map))
-				cpu_clear(cpu, cpu_idle_map);
-			rmb();
 			idle = pm_idle;
 			idle = pm_idle;
 			if (!idle)
 			if (!idle)
 				idle = default_idle;
 				idle = default_idle;

+ 1 - 1
arch/ia64/kernel/signal.c

@@ -224,7 +224,7 @@ ia64_rt_sigreturn (struct sigscratch *scr)
 	 * could be corrupted.
 	 * could be corrupted.
 	 */
 	 */
 	retval = (long) &ia64_leave_kernel;
 	retval = (long) &ia64_leave_kernel;
-	if (test_thread_flag(TIF_SYSCALL_TRACE) 
+	if (test_thread_flag(TIF_SYSCALL_TRACE)
 	    || test_thread_flag(TIF_SYSCALL_AUDIT))
 	    || test_thread_flag(TIF_SYSCALL_AUDIT))
 		/*
 		/*
 		 * strace expects to be notified after sigreturn returns even though the
 		 * strace expects to be notified after sigreturn returns even though the

+ 3 - 3
arch/ia64/lib/flush.S

@@ -1,8 +1,8 @@
 /*
 /*
  * Cache flushing routines.
  * Cache flushing routines.
  *
  *
- * Copyright (C) 1999-2001 Hewlett-Packard Co
- * Copyright (C) 1999-2001 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
  */
  */
 #include <asm/asmmacro.h>
 #include <asm/asmmacro.h>
 #include <asm/page.h>
 #include <asm/page.h>
@@ -26,7 +26,7 @@ GLOBAL_ENTRY(flush_icache_range)
 
 
 	mov ar.lc=r8
 	mov ar.lc=r8
 	;;
 	;;
-.Loop:	fc in0				// issuable on M0 only
+.Loop:	fc.i in0			// issuable on M2 only
 	add in0=32,in0
 	add in0=32,in0
 	br.cloop.sptk.few .Loop
 	br.cloop.sptk.few .Loop
 	;;
 	;;

+ 1 - 1
arch/ia64/lib/memcpy_mck.S

@@ -75,6 +75,7 @@ GLOBAL_ENTRY(memcpy)
 	mov	f6=f0
 	mov	f6=f0
 	br.cond.sptk .common_code
 	br.cond.sptk .common_code
 	;;
 	;;
+END(memcpy)
 GLOBAL_ENTRY(__copy_user)
 GLOBAL_ENTRY(__copy_user)
 	.prologue
 	.prologue
 // check dest alignment
 // check dest alignment
@@ -524,7 +525,6 @@ EK(.ex_handler,  (p17)	st8	[dst1]=r39,8);						\
 #undef B
 #undef B
 #undef C
 #undef C
 #undef D
 #undef D
-END(memcpy)
 
 
 /*
 /*
  * Due to lack of local tag support in gcc 2.x assembler, it is not clear which
  * Due to lack of local tag support in gcc 2.x assembler, it is not clear which

+ 1 - 1
arch/ia64/lib/memset.S

@@ -57,10 +57,10 @@ GLOBAL_ENTRY(memset)
 { .mmi
 { .mmi
 	.prologue
 	.prologue
 	alloc	tmp = ar.pfs, 3, 0, 0, 0
 	alloc	tmp = ar.pfs, 3, 0, 0, 0
-	.body
 	lfetch.nt1 [dest]			//
 	lfetch.nt1 [dest]			//
 	.save   ar.lc, save_lc
 	.save   ar.lc, save_lc
 	mov.i	save_lc = ar.lc
 	mov.i	save_lc = ar.lc
+	.body
 } { .mmi
 } { .mmi
 	mov	ret0 = dest			// return value
 	mov	ret0 = dest			// return value
 	cmp.ne	p_nz, p_zr = value, r0		// use stf.spill if value is zero
 	cmp.ne	p_nz, p_zr = value, r0		// use stf.spill if value is zero

+ 6 - 1
arch/ia64/sn/kernel/Makefile

@@ -4,10 +4,15 @@
 # License.  See the file "COPYING" in the main directory of this archive
 # License.  See the file "COPYING" in the main directory of this archive
 # for more details.
 # for more details.
 #
 #
-# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc.  All Rights Reserved.
+# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc.  All Rights Reserved.
 #
 #
 
 
 obj-y				+= setup.o bte.o bte_error.o irq.o mca.o idle.o \
 obj-y				+= setup.o bte.o bte_error.o irq.o mca.o idle.o \
 				   huberror.o io_init.o iomv.o klconflib.o sn2/
 				   huberror.o io_init.o iomv.o klconflib.o sn2/
 obj-$(CONFIG_IA64_GENERIC)      += machvec.o
 obj-$(CONFIG_IA64_GENERIC)      += machvec.o
 obj-$(CONFIG_SGI_TIOCX)		+= tiocx.o
 obj-$(CONFIG_SGI_TIOCX)		+= tiocx.o
+obj-$(CONFIG_IA64_SGI_SN_XP)	+= xp.o
+xp-y				:= xp_main.o xp_nofault.o
+obj-$(CONFIG_IA64_SGI_SN_XP)	+= xpc.o
+xpc-y				:= xpc_main.o xpc_channel.o xpc_partition.o
+obj-$(CONFIG_IA64_SGI_SN_XP)	+= xpnet.o

+ 6 - 4
arch/ia64/sn/kernel/io_init.c

@@ -174,6 +174,12 @@ static void sn_fixup_ionodes(void)
 		if (status)
 		if (status)
 			continue;
 			continue;
 
 
+		/* Attach the error interrupt handlers */
+		if (nasid & 1)
+			ice_error_init(hubdev);
+		else
+			hub_error_init(hubdev);
+
 		for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
 		for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
 			hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
 			hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
 
 
@@ -211,10 +217,6 @@ static void sn_fixup_ionodes(void)
 			    sn_flush_device_list;
 			    sn_flush_device_list;
 		}
 		}
 
 
-		if (!(i & 1))
-			hub_error_init(hubdev);
-		else
-			ice_error_init(hubdev);
 	}
 	}
 
 
 }
 }

+ 21 - 13
arch/ia64/sn/kernel/mca.c

@@ -37,6 +37,11 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize;
  * This function is the callback routine that SAL calls to log error
  * This function is the callback routine that SAL calls to log error
  * info for platform errors.  buf is appended to sn_oemdata, resizing as
  * info for platform errors.  buf is appended to sn_oemdata, resizing as
  * required.
  * required.
+ * Note: this is a SAL to OS callback, running under the same rules as the SAL
+ * code.  SAL calls are run with preempt disabled so this routine must not
+ * sleep.  vmalloc can sleep so print_hook cannot resize the output buffer
+ * itself, instead it must set the required size and return to let the caller
+ * resize the buffer then redrive the SAL call.
  */
  */
 static int print_hook(const char *fmt, ...)
 static int print_hook(const char *fmt, ...)
 {
 {
@@ -47,18 +52,8 @@ static int print_hook(const char *fmt, ...)
 	vsnprintf(buf, sizeof(buf), fmt, args);
 	vsnprintf(buf, sizeof(buf), fmt, args);
 	va_end(args);
 	va_end(args);
 	len = strlen(buf);
 	len = strlen(buf);
-	while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) {
-		u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000);
-		if (!newbuf) {
-			printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
-			       __FUNCTION__);
-			return 0;
-		}
-		memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
-		vfree(*sn_oemdata);
-		*sn_oemdata = newbuf;
-	}
-	memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1);
+	if (*sn_oemdata_size + len <= sn_oemdata_bufsize)
+		memcpy(*sn_oemdata + *sn_oemdata_size, buf, len);
 	*sn_oemdata_size += len;
 	*sn_oemdata_size += len;
 	return 0;
 	return 0;
 }
 }
@@ -98,7 +93,20 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
 	sn_oemdata = oemdata;
 	sn_oemdata = oemdata;
 	sn_oemdata_size = oemdata_size;
 	sn_oemdata_size = oemdata_size;
 	sn_oemdata_bufsize = 0;
 	sn_oemdata_bufsize = 0;
-	ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
+	*sn_oemdata_size = PAGE_SIZE;	/* first guess at how much data will be generated */
+	while (*sn_oemdata_size > sn_oemdata_bufsize) {
+		u8 *newbuf = vmalloc(*sn_oemdata_size);
+		if (!newbuf) {
+			printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
+			       __FUNCTION__);
+			return 1;
+		}
+		vfree(*sn_oemdata);
+		*sn_oemdata = newbuf;
+		sn_oemdata_bufsize = *sn_oemdata_size;
+		*sn_oemdata_size = 0;
+		ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
+	}
 	up(&sn_oemdata_mutex);
 	up(&sn_oemdata_mutex);
 	return 0;
 	return 0;
 }
 }

+ 24 - 16
arch/ia64/sn/kernel/setup.c

@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  * for more details.
  *
  *
- * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
  */
  */
 
 
 #include <linux/config.h>
 #include <linux/config.h>
@@ -73,6 +73,12 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
 DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
 DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
 EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
 EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
 
 
+DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]);
+EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
+
+DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
+EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
+
 partid_t sn_partid = -1;
 partid_t sn_partid = -1;
 EXPORT_SYMBOL(sn_partid);
 EXPORT_SYMBOL(sn_partid);
 char sn_system_serial_number_string[128];
 char sn_system_serial_number_string[128];
@@ -373,11 +379,11 @@ static void __init sn_init_pdas(char **cmdline_p)
 {
 {
 	cnodeid_t cnode;
 	cnodeid_t cnode;
 
 
-	memset(pda->cnodeid_to_nasid_table, -1,
-	       sizeof(pda->cnodeid_to_nasid_table));
+	memset(sn_cnodeid_to_nasid, -1,
+			sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
 	for_each_online_node(cnode)
 	for_each_online_node(cnode)
-		pda->cnodeid_to_nasid_table[cnode] =
-		    pxm_to_nasid(nid_to_pxm_map[cnode]);
+		sn_cnodeid_to_nasid[cnode] =
+				pxm_to_nasid(nid_to_pxm_map[cnode]);
 
 
 	numionodes = num_online_nodes();
 	numionodes = num_online_nodes();
 	scan_for_ionodes();
 	scan_for_ionodes();
@@ -477,7 +483,8 @@ void __init sn_cpu_init(void)
 
 
 	cnode = nasid_to_cnodeid(nasid);
 	cnode = nasid_to_cnodeid(nasid);
 
 
-	pda->p_nodepda = nodepdaindr[cnode];
+	sn_nodepda = nodepdaindr[cnode];
+
 	pda->led_address =
 	pda->led_address =
 	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
 	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
 	pda->led_state = LED_ALWAYS_SET;
 	pda->led_state = LED_ALWAYS_SET;
@@ -486,15 +493,18 @@ void __init sn_cpu_init(void)
 	pda->idle_flag = 0;
 	pda->idle_flag = 0;
 
 
 	if (cpuid != 0) {
 	if (cpuid != 0) {
-		memcpy(pda->cnodeid_to_nasid_table,
-		       pdacpu(0)->cnodeid_to_nasid_table,
-		       sizeof(pda->cnodeid_to_nasid_table));
+		/* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
+		memcpy(sn_cnodeid_to_nasid,
+		       (&per_cpu(__sn_cnodeid_to_nasid, 0)),
+		       sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
 	}
 	}
 
 
 	/*
 	/*
 	 * Check for WARs.
 	 * Check for WARs.
 	 * Only needs to be done once, on BSP.
 	 * Only needs to be done once, on BSP.
-	 * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
+	 * Has to be done after loop above, because it uses this cpu's
+	 * sn_cnodeid_to_nasid table which was just initialized if this
+	 * isn't cpu 0.
 	 * Has to be done before assignment below.
 	 * Has to be done before assignment below.
 	 */
 	 */
 	if (!wars_have_been_checked) {
 	if (!wars_have_been_checked) {
@@ -580,8 +590,7 @@ static void __init scan_for_ionodes(void)
 		brd = find_lboard_any(brd, KLTYPE_SNIA);
 		brd = find_lboard_any(brd, KLTYPE_SNIA);
 
 
 		while (brd) {
 		while (brd) {
-			pda->cnodeid_to_nasid_table[numionodes] =
-			    brd->brd_nasid;
+			sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
 			physical_node_map[brd->brd_nasid] = numionodes;
 			physical_node_map[brd->brd_nasid] = numionodes;
 			root_lboard[numionodes] = brd;
 			root_lboard[numionodes] = brd;
 			numionodes++;
 			numionodes++;
@@ -602,8 +611,7 @@ static void __init scan_for_ionodes(void)
 				      root_lboard[nasid_to_cnodeid(nasid)],
 				      root_lboard[nasid_to_cnodeid(nasid)],
 				      KLTYPE_TIO);
 				      KLTYPE_TIO);
 		while (brd) {
 		while (brd) {
-			pda->cnodeid_to_nasid_table[numionodes] =
-			    brd->brd_nasid;
+			sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
 			physical_node_map[brd->brd_nasid] = numionodes;
 			physical_node_map[brd->brd_nasid] = numionodes;
 			root_lboard[numionodes] = brd;
 			root_lboard[numionodes] = brd;
 			numionodes++;
 			numionodes++;
@@ -614,7 +622,6 @@ static void __init scan_for_ionodes(void)
 			brd = find_lboard_any(brd, KLTYPE_TIO);
 			brd = find_lboard_any(brd, KLTYPE_TIO);
 		}
 		}
 	}
 	}
-
 }
 }
 
 
 int
 int
@@ -623,7 +630,8 @@ nasid_slice_to_cpuid(int nasid, int slice)
 	long cpu;
 	long cpu;
 	
 	
 	for (cpu=0; cpu < NR_CPUS; cpu++) 
 	for (cpu=0; cpu < NR_CPUS; cpu++) 
-		if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice)
+		if (cpuid_to_nasid(cpu) == nasid &&
+					cpuid_to_slice(cpu) == slice)
 			return cpu;
 			return cpu;
 
 
 	return -1;
 	return -1;

+ 32 - 28
arch/ia64/sn/kernel/tiocx.c

@@ -21,6 +21,8 @@
 #include <asm/sn/types.h>
 #include <asm/sn/types.h>
 #include <asm/sn/shubio.h>
 #include <asm/sn/shubio.h>
 #include <asm/sn/tiocx.h>
 #include <asm/sn/tiocx.h>
+#include <asm/sn/l1.h>
+#include <asm/sn/module.h>
 #include "tio.h"
 #include "tio.h"
 #include "xtalk/xwidgetdev.h"
 #include "xtalk/xwidgetdev.h"
 #include "xtalk/hubdev.h"
 #include "xtalk/hubdev.h"
@@ -308,14 +310,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
 	}
 	}
 }
 }
 
 
-uint64_t
-tiocx_dma_addr(uint64_t addr)
+uint64_t tiocx_dma_addr(uint64_t addr)
 {
 {
 	return PHYS_TO_TIODMA(addr);
 	return PHYS_TO_TIODMA(addr);
 }
 }
 
 
-uint64_t
-tiocx_swin_base(int nasid)
+uint64_t tiocx_swin_base(int nasid)
 {
 {
 	return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
 	return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
 }
 }
@@ -330,19 +330,6 @@ EXPORT_SYMBOL(tiocx_bus_type);
 EXPORT_SYMBOL(tiocx_dma_addr);
 EXPORT_SYMBOL(tiocx_dma_addr);
 EXPORT_SYMBOL(tiocx_swin_base);
 EXPORT_SYMBOL(tiocx_swin_base);
 
 
-static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address)
-{
-
-	struct ia64_sal_retval ret_stuff;
-	ret_stuff.status = 0;
-	ret_stuff.v0 = 0;
-
-	ia64_sal_oemcall_nolock(&ret_stuff,
-				SN_SAL_IOIF_GET_HUBDEV_INFO,
-				handle, address, 0, 0, 0, 0, 0);
-	return ret_stuff.v0;
-}
-
 static void tio_conveyor_set(nasid_t nasid, int enable_flag)
 static void tio_conveyor_set(nasid_t nasid, int enable_flag)
 {
 {
 	uint64_t ice_frz;
 	uint64_t ice_frz;
@@ -379,7 +366,29 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
 	udelay(2000);
 	udelay(2000);
 }
 }
 
 
-static int fpga_attached(nasid_t nasid)
+static int tiocx_btchar_get(int nasid)
+{
+	moduleid_t module_id;
+	geoid_t geoid;
+	int cnodeid;
+
+	cnodeid = nasid_to_cnodeid(nasid);
+	geoid = cnodeid_get_geoid(cnodeid);
+	module_id = geo_module(geoid);
+	return MODULE_GET_BTCHAR(module_id);
+}
+
+static int is_fpga_brick(int nasid)
+{
+	switch (tiocx_btchar_get(nasid)) {
+	case L1_BRICKTYPE_SA:
+	case L1_BRICKTYPE_ATHENA:
+		return 1;
+	}
+	return 0;
+}
+
+static int bitstream_loaded(nasid_t nasid)
 {
 {
 	uint64_t cx_credits;
 	uint64_t cx_credits;
 
 
@@ -396,7 +405,7 @@ static int tiocx_reload(struct cx_dev *cx_dev)
 	int mfg_num = CX_DEV_NONE;
 	int mfg_num = CX_DEV_NONE;
 	nasid_t nasid = cx_dev->cx_id.nasid;
 	nasid_t nasid = cx_dev->cx_id.nasid;
 
 
-	if (fpga_attached(nasid)) {
+	if (bitstream_loaded(nasid)) {
 		uint64_t cx_id;
 		uint64_t cx_id;
 
 
 		cx_id =
 		cx_id =
@@ -427,9 +436,10 @@ static ssize_t show_cxdev_control(struct device *dev, char *buf)
 {
 {
 	struct cx_dev *cx_dev = to_cx_dev(dev);
 	struct cx_dev *cx_dev = to_cx_dev(dev);
 
 
-	return sprintf(buf, "0x%x 0x%x 0x%x\n",
+	return sprintf(buf, "0x%x 0x%x 0x%x %d\n",
 		       cx_dev->cx_id.nasid,
 		       cx_dev->cx_id.nasid,
-		       cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num);
+		       cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num,
+		       tiocx_btchar_get(cx_dev->cx_id.nasid));
 }
 }
 
 
 static ssize_t store_cxdev_control(struct device *dev, const char *buf,
 static ssize_t store_cxdev_control(struct device *dev, const char *buf,
@@ -475,20 +485,14 @@ static int __init tiocx_init(void)
 		if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
 		if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
 			break;	/* No more nasids .. bail out of loop */
 			break;	/* No more nasids .. bail out of loop */
 
 
-		if (nasid & 0x1) {	/* TIO's are always odd */
+		if ((nasid & 0x1) && is_fpga_brick(nasid)) {
 			struct hubdev_info *hubdev;
 			struct hubdev_info *hubdev;
-			uint64_t status;
 			struct xwidget_info *widgetp;
 			struct xwidget_info *widgetp;
 
 
 			DBG("Found TIO at nasid 0x%x\n", nasid);
 			DBG("Found TIO at nasid 0x%x\n", nasid);
 
 
 			hubdev =
 			hubdev =
 			    (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
 			    (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
-			status =
-			    tiocx_get_hubdev_info(nasid,
-						  (uint64_t) __pa(hubdev));
-			if (status)
-				continue;
 
 
 			widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
 			widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
 
 

+ 289 - 0
arch/ia64/sn/kernel/xp_main.c

@@ -0,0 +1,289 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+
+/*
+ * Cross Partition (XP) base.
+ *
+ *	XP provides a base from which its users can interact
+ *	with XPC, yet not be dependent on XPC.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/xp.h>
+
+
+/*
+ * Target of nofault PIO read.
+ */
+u64 xp_nofault_PIOR_target;
+
+
+/*
+ * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
+ * users of XPC.
+ */
+struct xpc_registration xpc_registrations[XPC_NCHANNELS];
+
+
+/*
+ * Initialize the XPC interface to indicate that XPC isn't loaded.
+ */
+static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
+
+struct xpc_interface xpc_interface = {
+	(void (*)(int)) xpc_notloaded,
+	(void (*)(int)) xpc_notloaded,
+	(enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
+	(enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
+	(enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
+							xpc_notloaded,
+	(void (*)(partid_t, int, void *)) xpc_notloaded,
+	(enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
+};
+
+
+/*
+ * XPC calls this when it (the XPC module) has been loaded.
+ */
+void
+xpc_set_interface(void (*connect)(int),
+		void (*disconnect)(int),
+		enum xpc_retval (*allocate)(partid_t, int, u32, void **),
+		enum xpc_retval (*send)(partid_t, int, void *),
+		enum xpc_retval (*send_notify)(partid_t, int, void *,
+						xpc_notify_func, void *),
+		void (*received)(partid_t, int, void *),
+		enum xpc_retval (*partid_to_nasids)(partid_t, void *))
+{
+	xpc_interface.connect = connect;
+	xpc_interface.disconnect = disconnect;
+	xpc_interface.allocate = allocate;
+	xpc_interface.send = send;
+	xpc_interface.send_notify = send_notify;
+	xpc_interface.received = received;
+	xpc_interface.partid_to_nasids = partid_to_nasids;
+}
+
+
+/*
+ * XPC calls this when it (the XPC module) is being unloaded.
+ */
+void
+xpc_clear_interface(void)
+{
+	xpc_interface.connect = (void (*)(int)) xpc_notloaded;
+	xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
+	xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
+					void **)) xpc_notloaded;
+	xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
+					xpc_notloaded;
+	xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
+				    xpc_notify_func, void *)) xpc_notloaded;
+	xpc_interface.received = (void (*)(partid_t, int, void *))
+					xpc_notloaded;
+	xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
+					xpc_notloaded;
+}
+
+
+/*
+ * Register for automatic establishment of a channel connection whenever
+ * a partition comes up.
+ *
+ * Arguments:
+ *
+ *	ch_number - channel # to register for connection.
+ *	func - function to call for asynchronous notification of channel
+ *	       state changes (i.e., connection, disconnection, error) and
+ *	       the arrival of incoming messages.
+ *      key - pointer to optional user-defined value that gets passed back
+ *	      to the user on any callouts made to func.
+ *	payload_size - size in bytes of the XPC message's payload area which
+ *		       contains a user-defined message. The user should make
+ *		       this large enough to hold their largest message.
+ *	nentries - max #of XPC message entries a message queue can contain.
+ *		   The actual number, which is determined when a connection
+ * 		   is established and may be less then requested, will be
+ *		   passed to the user via the xpcConnected callout.
+ *	assigned_limit - max number of kthreads allowed to be processing
+ * 			 messages (per connection) at any given instant.
+ *	idle_limit - max number of kthreads allowed to be idle at any given
+ * 		     instant.
+ */
+enum xpc_retval
+xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
+		u16 nentries, u32 assigned_limit, u32 idle_limit)
+{
+	struct xpc_registration *registration;
+
+
+	DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
+	DBUG_ON(payload_size == 0 || nentries == 0);
+	DBUG_ON(func == NULL);
+	DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
+
+	registration = &xpc_registrations[ch_number];
+
+	if (down_interruptible(&registration->sema) != 0) {
+		return xpcInterrupted;
+	}
+
+	/* if XPC_CHANNEL_REGISTERED(ch_number) */
+	if (registration->func != NULL) {
+		up(&registration->sema);
+		return xpcAlreadyRegistered;
+	}
+
+	/* register the channel for connection */
+	registration->msg_size = XPC_MSG_SIZE(payload_size);
+	registration->nentries = nentries;
+	registration->assigned_limit = assigned_limit;
+	registration->idle_limit = idle_limit;
+	registration->key = key;
+	registration->func = func;
+
+	up(&registration->sema);
+
+	xpc_interface.connect(ch_number);
+
+	return xpcSuccess;
+}
+
+
+/*
+ * Remove the registration for automatic connection of the specified channel
+ * when a partition comes up.
+ *
+ * Before returning this xpc_disconnect() will wait for all connections on the
+ * specified channel have been closed/torndown. So the caller can be assured
+ * that they will not be receiving any more callouts from XPC to their
+ * function registered via xpc_connect().
+ *
+ * Arguments:
+ *
+ *	ch_number - channel # to unregister.
+ */
+void
+xpc_disconnect(int ch_number)
+{
+	struct xpc_registration *registration;
+
+
+	DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
+
+	registration = &xpc_registrations[ch_number];
+
+	/*
+	 * We've decided not to make this a down_interruptible(), since we
+	 * figured XPC's users will just turn around and call xpc_disconnect()
+	 * again anyways, so we might as well wait, if need be.
+	 */
+	down(&registration->sema);
+
+	/* if !XPC_CHANNEL_REGISTERED(ch_number) */
+	if (registration->func == NULL) {
+		up(&registration->sema);
+		return;
+	}
+
+	/* remove the connection registration for the specified channel */
+	registration->func = NULL;
+	registration->key = NULL;
+	registration->nentries = 0;
+	registration->msg_size = 0;
+	registration->assigned_limit = 0;
+	registration->idle_limit = 0;
+
+	xpc_interface.disconnect(ch_number);
+
+	up(&registration->sema);
+
+	return;
+}
+
+
+int __init
+xp_init(void)
+{
+	int ret, ch_number;
+	u64 func_addr = *(u64 *) xp_nofault_PIOR;
+	u64 err_func_addr = *(u64 *) xp_error_PIOR;
+
+
+	if (!ia64_platform_is("sn2")) {
+		return -ENODEV;
+	}
+
+	/*
+	 * Register a nofault code region which performs a cross-partition
+	 * PIO read. If the PIO read times out, the MCA handler will consume
+	 * the error and return to a kernel-provided instruction to indicate
+	 * an error. This PIO read exists because it is guaranteed to timeout
+	 * if the destination is down (AMO operations do not timeout on at
+	 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
+	 * work around).
+	 */
+	if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
+						err_func_addr, 1, 1)) != 0) {
+		printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
+			ret);
+	}
+	/*
+	 * Setup the nofault PIO read target. (There is no special reason why
+	 * SH_IPI_ACCESS was selected.)
+	 */
+	if (is_shub2()) {
+		xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
+	} else {
+		xp_nofault_PIOR_target = SH1_IPI_ACCESS;
+	}
+
+	/* initialize the connection registration semaphores */
+	for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
+		sema_init(&xpc_registrations[ch_number].sema, 1);  /* mutex */
+	}
+
+	return 0;
+}
+module_init(xp_init);
+
+
+void __exit
+xp_exit(void)
+{
+	u64 func_addr = *(u64 *) xp_nofault_PIOR;
+	u64 err_func_addr = *(u64 *) xp_error_PIOR;
+
+
+	/* unregister the PIO read nofault code region */
+	(void) sn_register_nofault_code(func_addr, err_func_addr,
+					err_func_addr, 1, 0);
+}
+module_exit(xp_exit);
+
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_DESCRIPTION("Cross Partition (XP) base");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(xp_nofault_PIOR);
+EXPORT_SYMBOL(xp_nofault_PIOR_target);
+EXPORT_SYMBOL(xpc_registrations);
+EXPORT_SYMBOL(xpc_interface);
+EXPORT_SYMBOL(xpc_clear_interface);
+EXPORT_SYMBOL(xpc_set_interface);
+EXPORT_SYMBOL(xpc_connect);
+EXPORT_SYMBOL(xpc_disconnect);
+

+ 31 - 0
arch/ia64/sn/kernel/xp_nofault.S

@@ -0,0 +1,31 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+
+/*
+ * The xp_nofault_PIOR function takes a pointer to a remote PIO register
+ * and attempts to load and consume a value from it.  This function
+ * will be registered as a nofault code block.  In the event that the
+ * PIO read fails, the MCA handler will force the error to look
+ * corrected and vector to the xp_error_PIOR which will return an error.
+ *
+ *	extern int xp_nofault_PIOR(void *remote_register);
+ */
+
+	.global xp_nofault_PIOR
+xp_nofault_PIOR:
+	mov	r8=r0			// Stage a success return value
+	ld8.acq	r9=[r32];;		// PIO Read the specified register
+	adds	r9=1,r9			// Add to force a consume
+	br.ret.sptk.many b0;;		// Return success
+
+	.global xp_error_PIOR
+xp_error_PIOR:
+	mov	r8=1			// Return value of 1
+	br.ret.sptk.many b0;;		// Return failure
+

+ 991 - 0
arch/ia64/sn/kernel/xpc.h

@@ -0,0 +1,991 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+
+/*
+ * Cross Partition Communication (XPC) structures and macros.
+ */
+
+#ifndef _IA64_SN_KERNEL_XPC_H
+#define _IA64_SN_KERNEL_XPC_H
+
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/sysctl.h>
+#include <linux/device.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/mspec.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/xp.h>
+
+
+/*
+ * XPC Version numbers consist of a major and minor number. XPC can always
+ * talk to versions with same major #, and never talk to versions with a
+ * different major #.
+ */
+#define _XPC_VERSION(_maj, _min)	(((_maj) << 4) | ((_min) & 0xf))
+#define XPC_VERSION_MAJOR(_v)		((_v) >> 4)
+#define XPC_VERSION_MINOR(_v)		((_v) & 0xf)
+
+
+/*
+ * The next macros define word or bit representations for given
+ * C-brick nasid in either the SAL provided bit array representing
+ * nasids in the partition/machine or the AMO_t array used for
+ * inter-partition initiation communications.
+ *
+ * For SN2 machines, C-Bricks are alway even numbered NASIDs.  As
+ * such, some space will be saved by insisting that nasid information
+ * passed from SAL always be packed for C-Bricks and the
+ * cross-partition interrupts use the same packing scheme.
+ */
+#define XPC_NASID_W_INDEX(_n)	(((_n) / 64) / 2)
+#define XPC_NASID_B_INDEX(_n)	(((_n) / 2) & (64 - 1))
+#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
+				    (1UL << XPC_NASID_B_INDEX(_n)))
+#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
+
+#define XPC_HB_DEFAULT_INTERVAL		5	/* incr HB every x secs */
+#define XPC_HB_CHECK_DEFAULT_TIMEOUT	20	/* check HB every x secs */
+
+/* define the process name of HB checker and the CPU it is pinned to */
+#define XPC_HB_CHECK_THREAD_NAME	"xpc_hb"
+#define XPC_HB_CHECK_CPU		0
+
+/* define the process name of the discovery thread */
+#define XPC_DISCOVERY_THREAD_NAME	"xpc_discovery"
+
+
+#define XPC_HB_ALLOWED(_p, _v)	((_v)->heartbeating_to_mask & (1UL << (_p)))
+#define XPC_ALLOW_HB(_p, _v)	(_v)->heartbeating_to_mask |= (1UL << (_p))
+#define XPC_DISALLOW_HB(_p, _v)	(_v)->heartbeating_to_mask &= (~(1UL << (_p)))
+
+
+/*
+ * Reserved Page provided by SAL.
+ *
+ * SAL provides one page per partition of reserved memory.  When SAL
+ * initialization is complete, SAL_signature, SAL_version, partid,
+ * part_nasids, and mach_nasids are set.
+ *
+ * Note: Until vars_pa is set, the partition XPC code has not been initialized.
+ */
+struct xpc_rsvd_page {
+	u64 SAL_signature;	/* SAL unique signature */
+	u64 SAL_version;	/* SAL specified version */
+	u8 partid;		/* partition ID from SAL */
+	u8 version;
+	u8 pad[6];		/* pad to u64 align */
+	u64 vars_pa;
+	u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
+	u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
+};
+#define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */
+
+#define XPC_RSVD_PAGE_ALIGNED_SIZE \
+			(L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)))
+
+
+/*
+ * Define the structures by which XPC variables can be exported to other
+ * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
+ */
+
+/*
+ * The following structure describes the partition generic variables
+ * needed by other partitions in order to properly initialize.
+ *
+ * struct xpc_vars version number also applies to struct xpc_vars_part.
+ * Changes to either structure and/or related functionality should be
+ * reflected by incrementing either the major or minor version numbers
+ * of struct xpc_vars.
+ */
+struct xpc_vars {
+	u8 version;
+	u64 heartbeat;
+	u64 heartbeating_to_mask;
+	u64 kdb_status;		/* 0 = machine running */
+	int act_nasid;
+	int act_phys_cpuid;
+	u64 vars_part_pa;
+	u64 amos_page_pa;	/* paddr of page of AMOs from MSPEC driver */
+	AMO_t *amos_page;	/* vaddr of page of AMOs from MSPEC driver */
+	AMO_t *act_amos;	/* pointer to the first activation AMO */
+};
+#define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */
+
+#define XPC_VARS_ALIGNED_SIZE  (L1_CACHE_ALIGN(sizeof(struct xpc_vars)))
+
+/*
+ * The following structure describes the per partition specific variables.
+ *
+ * An array of these structures, one per partition, will be defined. As a
+ * partition becomes active XPC will copy the array entry corresponding to
+ * itself from that partition. It is desirable that the size of this
+ * structure evenly divide into a cacheline, such that none of the entries
+ * in this array crosses a cacheline boundary. As it is now, each entry
+ * occupies half a cacheline.
+ */
+struct xpc_vars_part {
+	u64 magic;
+
+	u64 openclose_args_pa;	/* physical address of open and close args */
+	u64 GPs_pa;		/* physical address of Get/Put values */
+
+	u64 IPI_amo_pa;		/* physical address of IPI AMO_t structure */
+	int IPI_nasid;		/* nasid of where to send IPIs */
+	int IPI_phys_cpuid;	/* physical CPU ID of where to send IPIs */
+
+	u8 nchannels;		/* #of defined channels supported */
+
+	u8 reserved[23];	/* pad to a full 64 bytes */
+};
+
+/*
+ * The vars_part MAGIC numbers play a part in the first contact protocol.
+ *
+ * MAGIC1 indicates that the per partition specific variables for a remote
+ * partition have been initialized by this partition.
+ *
+ * MAGIC2 indicates that this partition has pulled the remote partititions
+ * per partition variables that pertain to this partition.
+ */
+#define XPC_VP_MAGIC1	0x0053524156435058L  /* 'XPCVARS\0'L (little endian) */
+#define XPC_VP_MAGIC2	0x0073726176435058L  /* 'XPCvars\0'L (little endian) */
+
+
+
+/*
+ * Functions registered by add_timer() or called by kernel_thread() only
+ * allow for a single 64-bit argument. The following macros can be used to
+ * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from
+ * the passed argument.
+ */
+#define XPC_PACK_ARGS(_arg1, _arg2) \
+			((((u64) _arg1) & 0xffffffff) | \
+			((((u64) _arg2) & 0xffffffff) << 32))
+
+#define XPC_UNPACK_ARG1(_args)	(((u64) _args) & 0xffffffff)
+#define XPC_UNPACK_ARG2(_args)	((((u64) _args) >> 32) & 0xffffffff)
+
+
+
+/*
+ * Define a Get/Put value pair (pointers) used with a message queue.
+ */
+struct xpc_gp {
+	s64 get;	/* Get value */
+	s64 put;	/* Put value */
+};
+
+#define XPC_GP_SIZE \
+		L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
+
+
+
+/*
+ * Define a structure that contains arguments associated with opening and
+ * closing a channel.
+ */
+struct xpc_openclose_args {
+	u16 reason;		/* reason why channel is closing */
+	u16 msg_size;		/* sizeof each message entry */
+	u16 remote_nentries;	/* #of message entries in remote msg queue */
+	u16 local_nentries;	/* #of message entries in local msg queue */
+	u64 local_msgqueue_pa;	/* physical address of local message queue */
+};
+
+#define XPC_OPENCLOSE_ARGS_SIZE \
+	      L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
+
+
+
+/* struct xpc_msg flags */
+
+#define	XPC_M_DONE		0x01	/* msg has been received/consumed */
+#define	XPC_M_READY		0x02	/* msg is ready to be sent */
+#define	XPC_M_INTERRUPT		0x04	/* send interrupt when msg consumed */
+
+
+#define XPC_MSG_ADDRESS(_payload) \
+		((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
+
+
+
+/*
+ * Defines notify entry.
+ *
+ * This is used to notify a message's sender that their message was received
+ * and consumed by the intended recipient.
+ */
+struct xpc_notify {
+	struct semaphore sema;		/* notify semaphore */
+	u8 type;			/* type of notification */
+
+	/* the following two fields are only used if type == XPC_N_CALL */
+	xpc_notify_func func;		/* user's notify function */
+	void *key;			/* pointer to user's key */
+};
+
+/* struct xpc_notify type of notification */
+
+#define	XPC_N_CALL		0x01	/* notify function provided by user */
+
+
+
+/*
+ * Define the structure that manages all the stuff required by a channel. In
+ * particular, they are used to manage the messages sent across the channel.
+ *
+ * This structure is private to a partition, and is NOT shared across the
+ * partition boundary.
+ *
+ * There is an array of these structures for each remote partition. It is
+ * allocated at the time a partition becomes active. The array contains one
+ * of these structures for each potential channel connection to that partition.
+ *
+ * Each of these structures manages two message queues (circular buffers).
+ * They are allocated at the time a channel connection is made. One of
+ * these message queues (local_msgqueue) holds the locally created messages
+ * that are destined for the remote partition. The other of these message
+ * queues (remote_msgqueue) is a locally cached copy of the remote partition's
+ * own local_msgqueue.
+ *
+ * The following is a description of the Get/Put pointers used to manage these
+ * two message queues. Consider the local_msgqueue to be on one partition
+ * and the remote_msgqueue to be its cached copy on another partition. A
+ * description of what each of the lettered areas contains is included.
+ *
+ *
+ *                     local_msgqueue      remote_msgqueue
+ *
+ *                        |/////////|      |/////////|
+ *    w_remote_GP.get --> +---------+      |/////////|
+ *                        |    F    |      |/////////|
+ *     remote_GP.get  --> +---------+      +---------+ <-- local_GP->get
+ *                        |         |      |         |
+ *                        |         |      |    E    |
+ *                        |         |      |         |
+ *                        |         |      +---------+ <-- w_local_GP.get
+ *                        |    B    |      |/////////|
+ *                        |         |      |////D////|
+ *                        |         |      |/////////|
+ *                        |         |      +---------+ <-- w_remote_GP.put
+ *                        |         |      |////C////|
+ *      local_GP->put --> +---------+      +---------+ <-- remote_GP.put
+ *                        |         |      |/////////|
+ *                        |    A    |      |/////////|
+ *                        |         |      |/////////|
+ *     w_local_GP.put --> +---------+      |/////////|
+ *                        |/////////|      |/////////|
+ *
+ *
+ *	    ( remote_GP.[get|put] are cached copies of the remote
+ *	      partition's local_GP->[get|put], and thus their values can
+ *	      lag behind their counterparts on the remote partition. )
+ *
+ *
+ *  A - Messages that have been allocated, but have not yet been sent to the
+ *	remote partition.
+ *
+ *  B - Messages that have been sent, but have not yet been acknowledged by the
+ *      remote partition as having been received.
+ *
+ *  C - Area that needs to be prepared for the copying of sent messages, by
+ *	the clearing of the message flags of any previously received messages.
+ *
+ *  D - Area into which sent messages are to be copied from the remote
+ *	partition's local_msgqueue and then delivered to their intended
+ *	recipients. [ To allow for a multi-message copy, another pointer
+ *	(next_msg_to_pull) has been added to keep track of the next message
+ *	number needing to be copied (pulled). It chases after w_remote_GP.put.
+ *	Any messages lying between w_local_GP.get and next_msg_to_pull have
+ *	been copied and are ready to be delivered. ]
+ *
+ *  E - Messages that have been copied and delivered, but have not yet been
+ *	acknowledged by the recipient as having been received.
+ *
+ *  F - Messages that have been acknowledged, but XPC has not yet notified the
+ *	sender that the message was received by its intended recipient.
+ *	This is also an area that needs to be prepared for the allocating of
+ *	new messages, by the clearing of the message flags of the acknowledged
+ *	messages.
+ */
+struct xpc_channel {
+	partid_t partid;		/* ID of remote partition connected */
+	spinlock_t lock;		/* lock for updating this structure */
+	u32 flags;			/* general flags */
+
+	enum xpc_retval reason;		/* reason why channel is disconnect'g */
+	int reason_line;		/* line# disconnect initiated from */
+
+	u16 number;			/* channel # */
+
+	u16 msg_size;			/* sizeof each msg entry */
+	u16 local_nentries;		/* #of msg entries in local msg queue */
+	u16 remote_nentries;		/* #of msg entries in remote msg queue*/
+
+	void *local_msgqueue_base;	/* base address of kmalloc'd space */
+	struct xpc_msg *local_msgqueue;	/* local message queue */
+	void *remote_msgqueue_base;	/* base address of kmalloc'd space */
+	struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
+					/* local message queue */
+	u64 remote_msgqueue_pa;		/* phys addr of remote partition's */
+					/* local message queue */
+
+	atomic_t references;		/* #of external references to queues */
+
+	atomic_t n_on_msg_allocate_wq;   /* #on msg allocation wait queue */
+	wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
+
+	/* queue of msg senders who want to be notified when msg received */
+
+	atomic_t n_to_notify;		/* #of msg senders to notify */
+	struct xpc_notify *notify_queue;/* notify queue for messages sent */
+
+	xpc_channel_func func;		/* user's channel function */
+	void *key;			/* pointer to user's key */
+
+	struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
+	struct semaphore teardown_sema;    /* wait for teardown completion */
+
+	struct xpc_openclose_args *local_openclose_args; /* args passed on */
+					/* opening or closing of channel */
+
+	/* various flavors of local and remote Get/Put values */
+
+	struct xpc_gp *local_GP;	/* local Get/Put values */
+	struct xpc_gp remote_GP;	/* remote Get/Put values */
+	struct xpc_gp w_local_GP;	/* working local Get/Put values */
+	struct xpc_gp w_remote_GP;	/* working remote Get/Put values */
+	s64 next_msg_to_pull;		/* Put value of next msg to pull */
+
+	/* kthread management related fields */
+
+// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
+// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
+// >>> dependent on activity over the last interval of time
+	atomic_t kthreads_assigned;	/* #of kthreads assigned to channel */
+	u32 kthreads_assigned_limit; 	/* limit on #of kthreads assigned */
+	atomic_t kthreads_idle;		/* #of kthreads idle waiting for work */
+	u32 kthreads_idle_limit;	/* limit on #of kthreads idle */
+	atomic_t kthreads_active;	/* #of kthreads actively working */
+	// >>> following field is temporary
+	u32 kthreads_created;		/* total #of kthreads created */
+
+	wait_queue_head_t idle_wq;	/* idle kthread wait queue */
+
+} ____cacheline_aligned;
+
+
+/* struct xpc_channel flags */
+
+#define	XPC_C_WASCONNECTED	0x00000001 /* channel was connected */
+
+#define	XPC_C_ROPENREPLY	0x00000002 /* remote open channel reply */
+#define	XPC_C_OPENREPLY		0x00000004 /* local open channel reply */
+#define	XPC_C_ROPENREQUEST	0x00000008 /* remote open channel request */
+#define	XPC_C_OPENREQUEST	0x00000010 /* local open channel request */
+
+#define	XPC_C_SETUP		0x00000020 /* channel's msgqueues are alloc'd */
+#define	XPC_C_CONNECTCALLOUT	0x00000040 /* channel connected callout made */
+#define	XPC_C_CONNECTED		0x00000080 /* local channel is connected */
+#define	XPC_C_CONNECTING	0x00000100 /* channel is being connected */
+
+#define	XPC_C_RCLOSEREPLY	0x00000200 /* remote close channel reply */
+#define	XPC_C_CLOSEREPLY	0x00000400 /* local close channel reply */
+#define	XPC_C_RCLOSEREQUEST	0x00000800 /* remote close channel request */
+#define	XPC_C_CLOSEREQUEST	0x00001000 /* local close channel request */
+
+#define	XPC_C_DISCONNECTED	0x00002000 /* channel is disconnected */
+#define	XPC_C_DISCONNECTING	0x00004000 /* channel is being disconnected */
+
+
+
+/*
+ * Manages channels on a partition basis. There is one of these structures
+ * for each partition (a partition will never utilize the structure that
+ * represents itself).
+ */
+struct xpc_partition {
+
+	/* XPC HB infrastructure */
+
+	u64 remote_rp_pa;		/* phys addr of partition's rsvd pg */
+	u64 remote_vars_pa;		/* phys addr of partition's vars */
+	u64 remote_vars_part_pa;	/* phys addr of partition's vars part */
+	u64 last_heartbeat;		/* HB at last read */
+	u64 remote_amos_page_pa;	/* phys addr of partition's amos page */
+	int remote_act_nasid;		/* active part's act/deact nasid */
+	int remote_act_phys_cpuid;	/* active part's act/deact phys cpuid */
+	u32 act_IRQ_rcvd;		/* IRQs since activation */
+	spinlock_t act_lock;		/* protect updating of act_state */
+	u8 act_state;			/* from XPC HB viewpoint */
+	enum xpc_retval reason;		/* reason partition is deactivating */
+	int reason_line;		/* line# deactivation initiated from */
+	int reactivate_nasid;		/* nasid in partition to reactivate */
+
+
+	/* XPC infrastructure referencing and teardown control */
+
+	u8 setup_state;			/* infrastructure setup state */
+	wait_queue_head_t teardown_wq;	/* kthread waiting to teardown infra */
+	atomic_t references;		/* #of references to infrastructure */
+
+
+	/*
+	 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
+	 * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
+	 * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
+	 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
+	 */
+
+
+	u8 nchannels;		   /* #of defined channels supported */
+	atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
+	struct xpc_channel *channels;/* array of channel structures */
+
+	void *local_GPs_base;	  /* base address of kmalloc'd space */
+	struct xpc_gp *local_GPs; /* local Get/Put values */
+	void *remote_GPs_base;    /* base address of kmalloc'd space */
+	struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
+				  /* values */
+	u64 remote_GPs_pa;	  /* phys address of remote partition's local */
+				  /* Get/Put values */
+
+
+	/* fields used to pass args when opening or closing a channel */
+
+	void *local_openclose_args_base;  /* base address of kmalloc'd space */
+	struct xpc_openclose_args *local_openclose_args;  /* local's args */
+	void *remote_openclose_args_base; /* base address of kmalloc'd space */
+	struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
+					  /* args */
+	u64 remote_openclose_args_pa;	  /* phys addr of remote's args */
+
+
+	/* IPI sending, receiving and handling related fields */
+
+	int remote_IPI_nasid;	    /* nasid of where to send IPIs */
+	int remote_IPI_phys_cpuid;  /* phys CPU ID of where to send IPIs */
+	AMO_t *remote_IPI_amo_va;   /* address of remote IPI AMO_t structure */
+
+	AMO_t *local_IPI_amo_va;    /* address of IPI AMO_t structure */
+	u64 local_IPI_amo;	    /* IPI amo flags yet to be handled */
+	char IPI_owner[8];	    /* IPI owner's name */
+	struct timer_list dropped_IPI_timer; /* dropped IPI timer */
+
+	spinlock_t IPI_lock;	    /* IPI handler lock */
+
+
+	/* channel manager related fields */
+
+	atomic_t channel_mgr_requests;	/* #of requests to activate chan mgr */
+	wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
+
+} ____cacheline_aligned;
+
+
+/* struct xpc_partition act_state values (for XPC HB) */
+
+#define	XPC_P_INACTIVE		0x00	/* partition is not active */
+#define XPC_P_ACTIVATION_REQ	0x01	/* created thread to activate */
+#define XPC_P_ACTIVATING	0x02	/* activation thread started */
+#define XPC_P_ACTIVE		0x03	/* xpc_partition_up() was called */
+#define XPC_P_DEACTIVATING	0x04	/* partition deactivation initiated */
+
+
+#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
+			xpc_deactivate_partition(__LINE__, (_p), (_reason))
+
+
+/* struct xpc_partition setup_state values */
+
+#define XPC_P_UNSET		0x00	/* infrastructure was never setup */
+#define XPC_P_SETUP		0x01	/* infrastructure is setup */
+#define XPC_P_WTEARDOWN		0x02	/* waiting to teardown infrastructure */
+#define XPC_P_TORNDOWN		0x03	/* infrastructure is torndown */
+
+
+/*
+ * struct xpc_partition IPI_timer #of seconds to wait before checking for
+ * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
+ * after the IPI was received.
+ */
+#define XPC_P_DROPPED_IPI_WAIT	(0.25 * HZ)
+
+
+#define XPC_PARTID(_p)	((partid_t) ((_p) - &xpc_partitions[0]))
+
+
+
+/* found in xp_main.c */
+extern struct xpc_registration xpc_registrations[];
+
+
+/* >>> found in xpc_main.c only */
+extern struct device *xpc_part;
+extern struct device *xpc_chan;
+extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *);
+extern void xpc_dropped_IPI_check(struct xpc_partition *);
+extern void xpc_activate_kthreads(struct xpc_channel *, int);
+extern void xpc_create_kthreads(struct xpc_channel *, int);
+extern void xpc_disconnect_wait(int);
+
+
+/* found in xpc_main.c and efi-xpc.c */
+extern void xpc_activate_partition(struct xpc_partition *);
+
+
+/* found in xpc_partition.c */
+extern int xpc_exiting;
+extern int xpc_hb_interval;
+extern int xpc_hb_check_interval;
+extern struct xpc_vars *xpc_vars;
+extern struct xpc_rsvd_page *xpc_rsvd_page;
+extern struct xpc_vars_part *xpc_vars_part;
+extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
+extern char xpc_remote_copy_buffer[];
+extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
+extern void xpc_allow_IPI_ops(void);
+extern void xpc_restrict_IPI_ops(void);
+extern int xpc_identify_act_IRQ_sender(void);
+extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
+extern void xpc_mark_partition_inactive(struct xpc_partition *);
+extern void xpc_discovery(void);
+extern void xpc_check_remote_hb(void);
+extern void xpc_deactivate_partition(const int, struct xpc_partition *,
+						enum xpc_retval);
+extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
+
+
+/* found in xpc_channel.c */
+extern void xpc_initiate_connect(int);
+extern void xpc_initiate_disconnect(int);
+extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
+extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
+extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
+						xpc_notify_func, void *);
+extern void xpc_initiate_received(partid_t, int, void *);
+extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
+extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
+extern void xpc_process_channel_activity(struct xpc_partition *);
+extern void xpc_connected_callout(struct xpc_channel *);
+extern void xpc_deliver_msg(struct xpc_channel *);
+extern void xpc_disconnect_channel(const int, struct xpc_channel *,
+					enum xpc_retval, unsigned long *);
+extern void xpc_disconnected_callout(struct xpc_channel *);
+extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval);
+extern void xpc_teardown_infrastructure(struct xpc_partition *);
+
+
+
+static inline void
+xpc_wakeup_channel_mgr(struct xpc_partition *part)
+{
+	if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
+		wake_up(&part->channel_mgr_wq);
+	}
+}
+
+
+
+/*
+ * These next two inlines are used to keep us from tearing down a channel's
+ * msg queues while a thread may be referencing them.
+ */
+static inline void
+xpc_msgqueue_ref(struct xpc_channel *ch)
+{
+	atomic_inc(&ch->references);
+}
+
+static inline void
+xpc_msgqueue_deref(struct xpc_channel *ch)
+{
+	s32 refs = atomic_dec_return(&ch->references);
+
+	DBUG_ON(refs < 0);
+	if (refs == 0) {
+		xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
+	}
+}
+
+
+
+#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
+		xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
+
+
+/*
+ * These two inlines are used to keep us from tearing down a partition's
+ * setup infrastructure while a thread may be referencing it.
+ */
+static inline void
+xpc_part_deref(struct xpc_partition *part)
+{
+	s32 refs = atomic_dec_return(&part->references);
+
+
+	DBUG_ON(refs < 0);
+	if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
+		wake_up(&part->teardown_wq);
+	}
+}
+
+static inline int
+xpc_part_ref(struct xpc_partition *part)
+{
+	int setup;
+
+
+	atomic_inc(&part->references);
+	setup = (part->setup_state == XPC_P_SETUP);
+	if (!setup) {
+		xpc_part_deref(part);
+	}
+	return setup;
+}
+
+
+
+/*
+ * The following macro is to be used for the setting of the reason and
+ * reason_line fields in both the struct xpc_channel and struct xpc_partition
+ * structures.
+ */
+#define XPC_SET_REASON(_p, _reason, _line) \
+	{ \
+		(_p)->reason = _reason; \
+		(_p)->reason_line = _line; \
+	}
+
+
+
+/*
+ * The following set of macros and inlines are used for the sending and
+ * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
+ * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
+ * the other that is associated with channel activity (SGI_XPC_NOTIFY).
+ */
+
+static inline u64
+xpc_IPI_receive(AMO_t *amo)
+{
+	return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
+}
+
+
+static inline enum xpc_retval
+xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
+{
+	int ret = 0;
+	unsigned long irq_flags;
+
+
+	local_irq_save(irq_flags);
+
+	FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
+	sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
+
+	/*
+	 * We must always use the nofault function regardless of whether we
+	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+	 * didn't, we'd never know that the other partition is down and would
+	 * keep sending IPIs and AMOs to it until the heartbeat times out.
+	 */
+	ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
+				xp_nofault_PIOR_target));
+
+	local_irq_restore(irq_flags);
+
+	return ((ret == 0) ? xpcSuccess : xpcPioReadError);
+}
+
+
+/*
+ * IPIs associated with SGI_XPC_ACTIVATE IRQ.
+ */
+
+/*
+ * Flag the appropriate AMO variable and send an IPI to the specified node.
+ */
+static inline void
+xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid,
+			int to_phys_cpuid)
+{
+	int w_index = XPC_NASID_W_INDEX(from_nasid);
+	int b_index = XPC_NASID_B_INDEX(from_nasid);
+	AMO_t *amos = (AMO_t *) __va(amos_page +
+					(XP_MAX_PARTITIONS * sizeof(AMO_t)));
+
+
+	(void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
+				to_phys_cpuid, SGI_XPC_ACTIVATE);
+}
+
+static inline void
+xpc_IPI_send_activate(struct xpc_vars *vars)
+{
+	xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
+				vars->act_nasid, vars->act_phys_cpuid);
+}
+
+static inline void
+xpc_IPI_send_activated(struct xpc_partition *part)
+{
+	xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
+			part->remote_act_nasid, part->remote_act_phys_cpuid);
+}
+
+static inline void
+xpc_IPI_send_reactivate(struct xpc_partition *part)
+{
+	xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
+				xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
+}
+
+
+/*
+ * IPIs associated with SGI_XPC_NOTIFY IRQ.
+ */
+
+/*
+ * Send an IPI to the remote partition that is associated with the
+ * specified channel.
+ */
+#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
+		xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
+
+static inline void
+xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
+			unsigned long *irq_flags)
+{
+	struct xpc_partition *part = &xpc_partitions[ch->partid];
+	enum xpc_retval ret;
+
+
+	if (likely(part->act_state != XPC_P_DEACTIVATING)) {
+		ret = xpc_IPI_send(part->remote_IPI_amo_va,
+					(u64) ipi_flag << (ch->number * 8),
+					part->remote_IPI_nasid,
+					part->remote_IPI_phys_cpuid,
+					SGI_XPC_NOTIFY);
+		dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
+			ipi_flag_string, ch->partid, ch->number, ret);
+		if (unlikely(ret != xpcSuccess)) {
+			if (irq_flags != NULL) {
+				spin_unlock_irqrestore(&ch->lock, *irq_flags);
+			}
+			XPC_DEACTIVATE_PARTITION(part, ret);
+			if (irq_flags != NULL) {
+				spin_lock_irqsave(&ch->lock, *irq_flags);
+			}
+		}
+	}
+}
+
+
+/*
+ * Make it look like the remote partition, which is associated with the
+ * specified channel, sent us an IPI. This faked IPI will be handled
+ * by xpc_dropped_IPI_check().
+ */
+#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
+		xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
+
+static inline void
+xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
+				char *ipi_flag_string)
+{
+	struct xpc_partition *part = &xpc_partitions[ch->partid];
+
+
+	FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
+			FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
+	dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
+		ipi_flag_string, ch->partid, ch->number);
+}
+
+
+/*
+ * The sending and receiving of IPIs includes the setting of an AMO variable
+ * to indicate the reason the IPI was sent. The 64-bit variable is divided
+ * up into eight bytes, ordered from right to left. Byte zero pertains to
+ * channel 0, byte one to channel 1, and so on. Each byte is described by
+ * the following IPI flags.
+ */
+
+#define	XPC_IPI_CLOSEREQUEST	0x01
+#define	XPC_IPI_CLOSEREPLY	0x02
+#define	XPC_IPI_OPENREQUEST	0x04
+#define	XPC_IPI_OPENREPLY	0x08
+#define	XPC_IPI_MSGREQUEST	0x10
+
+
+/* given an AMO variable and a channel#, get its associated IPI flags */
+#define XPC_GET_IPI_FLAGS(_amo, _c)	((u8) (((_amo) >> ((_c) * 8)) & 0xff))
+
+#define	XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)
+#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & 0x1010101010101010)
+
+
+static inline void
+xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+	struct xpc_openclose_args *args = ch->local_openclose_args;
+
+
+	args->reason = ch->reason;
+
+	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
+}
+
+static inline void
+xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
+}
+
+static inline void
+xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+	struct xpc_openclose_args *args = ch->local_openclose_args;
+
+
+	args->msg_size = ch->msg_size;
+	args->local_nentries = ch->local_nentries;
+
+	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
+}
+
+static inline void
+xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+	struct xpc_openclose_args *args = ch->local_openclose_args;
+
+
+	args->remote_nentries = ch->remote_nentries;
+	args->local_nentries = ch->local_nentries;
+	args->local_msgqueue_pa = __pa(ch->local_msgqueue);
+
+	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
+}
+
+static inline void
+xpc_IPI_send_msgrequest(struct xpc_channel *ch)
+{
+	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
+}
+
+static inline void
+xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
+{
+	XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
+}
+
+
+/*
+ * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
+ * pages are located in the lowest granule. The lowest granule uses 4k pages
+ * for cached references and an alternate TLB handler to never provide a
+ * cacheable mapping for the entire region. This will prevent speculative
+ * reading of cached copies of our lines from being issued which will cause
+ * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
+ * (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c)
+ * and an additional 16 AMO variables for partition activation (xpc_hb.c).
+ */
+static inline AMO_t *
+xpc_IPI_init(partid_t partid)
+{
+	AMO_t *part_amo = xpc_vars->amos_page + partid;
+
+
+	xpc_IPI_receive(part_amo);
+	return part_amo;
+}
+
+
+
+static inline enum xpc_retval
+xpc_map_bte_errors(bte_result_t error)
+{
+	switch (error) {
+	case BTE_SUCCESS:	return xpcSuccess;
+	case BTEFAIL_DIR:	return xpcBteDirectoryError;
+	case BTEFAIL_POISON:	return xpcBtePoisonError;
+	case BTEFAIL_WERR:	return xpcBteWriteError;
+	case BTEFAIL_ACCESS:	return xpcBteAccessError;
+	case BTEFAIL_PWERR:	return xpcBtePWriteError;
+	case BTEFAIL_PRERR:	return xpcBtePReadError;
+	case BTEFAIL_TOUT:	return xpcBteTimeOutError;
+	case BTEFAIL_XTERR:	return xpcBteXtalkError;
+	case BTEFAIL_NOTAVAIL:	return xpcBteNotAvailable;
+	default:		return xpcBteUnmappedError;
+	}
+}
+
+
+
+static inline void *
+xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base)
+{
+	/* see if kmalloc will give us cachline aligned memory by default */
+	*base = kmalloc(size, flags);
+	if (*base == NULL) {
+		return NULL;
+	}
+	if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
+		return *base;
+	}
+	kfree(*base);
+
+	/* nope, we'll have to do it ourselves */
+	*base = kmalloc(size + L1_CACHE_BYTES, flags);
+	if (*base == NULL) {
+		return NULL;
+	}
+	return (void *) L1_CACHE_ALIGN((u64) *base);
+}
+
+
+/*
+ * Check to see if there is any channel activity to/from the specified
+ * partition.
+ */
+static inline void
+xpc_check_for_channel_activity(struct xpc_partition *part)
+{
+	u64 IPI_amo;
+	unsigned long irq_flags;
+
+
+	IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
+	if (IPI_amo == 0) {
+		return;
+	}
+
+	spin_lock_irqsave(&part->IPI_lock, irq_flags);
+	part->local_IPI_amo |= IPI_amo;
+	spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
+
+	dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
+		XPC_PARTID(part), IPI_amo);
+
+	xpc_wakeup_channel_mgr(part);
+}
+
+
+#endif /* _IA64_SN_KERNEL_XPC_H */
+

+ 2297 - 0
arch/ia64/sn/kernel/xpc_channel.c

@@ -0,0 +1,2297 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+
+/*
+ * Cross Partition Communication (XPC) channel support.
+ *
+ *	This is the part of XPC that manages the channels and
+ *	sends/receives messages across them to/from other partitions.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/sn_sal.h>
+#include "xpc.h"
+
+
+/*
+ * Set up the initial values for the XPartition Communication channels.
+ */
+static void
+xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
+{
+	int ch_number;
+	struct xpc_channel *ch;
+
+
+	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
+		ch = &part->channels[ch_number];
+
+		ch->partid = partid;
+		ch->number = ch_number;
+		ch->flags = XPC_C_DISCONNECTED;
+
+		ch->local_GP = &part->local_GPs[ch_number];
+		ch->local_openclose_args =
+					&part->local_openclose_args[ch_number];
+
+		atomic_set(&ch->kthreads_assigned, 0);
+		atomic_set(&ch->kthreads_idle, 0);
+		atomic_set(&ch->kthreads_active, 0);
+
+		atomic_set(&ch->references, 0);
+		atomic_set(&ch->n_to_notify, 0);
+
+		spin_lock_init(&ch->lock);
+		sema_init(&ch->msg_to_pull_sema, 1);	/* mutex */
+
+		atomic_set(&ch->n_on_msg_allocate_wq, 0);
+		init_waitqueue_head(&ch->msg_allocate_wq);
+		init_waitqueue_head(&ch->idle_wq);
+	}
+}
+
+
+/*
+ * Setup the infrastructure necessary to support XPartition Communication
+ * between the specified remote partition and the local one.
+ */
+enum xpc_retval
+xpc_setup_infrastructure(struct xpc_partition *part)
+{
+	int ret;
+	struct timer_list *timer;
+	partid_t partid = XPC_PARTID(part);
+
+
+	/*
+	 * Zero out MOST of the entry for this partition. Only the fields
+	 * starting with `nchannels' will be zeroed. The preceding fields must
+	 * remain `viable' across partition ups and downs, since they may be
+	 * referenced during this memset() operation.
+	 */
+	memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
+				offsetof(struct xpc_partition, nchannels));
+
+	/*
+	 * Allocate all of the channel structures as a contiguous chunk of
+	 * memory.
+	 */
+	part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
+								GFP_KERNEL);
+	if (part->channels == NULL) {
+		dev_err(xpc_chan, "can't get memory for channels\n");
+		return xpcNoMemory;
+	}
+	memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS);
+
+	part->nchannels = XPC_NCHANNELS;
+
+
+	/* allocate all the required GET/PUT values */
+
+	part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
+					GFP_KERNEL, &part->local_GPs_base);
+	if (part->local_GPs == NULL) {
+		kfree(part->channels);
+		part->channels = NULL;
+		dev_err(xpc_chan, "can't get memory for local get/put "
+			"values\n");
+		return xpcNoMemory;
+	}
+	memset(part->local_GPs, 0, XPC_GP_SIZE);
+
+	part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
+					GFP_KERNEL, &part->remote_GPs_base);
+	if (part->remote_GPs == NULL) {
+		kfree(part->channels);
+		part->channels = NULL;
+		kfree(part->local_GPs_base);
+		part->local_GPs = NULL;
+		dev_err(xpc_chan, "can't get memory for remote get/put "
+			"values\n");
+		return xpcNoMemory;
+	}
+	memset(part->remote_GPs, 0, XPC_GP_SIZE);
+
+
+	/* allocate all the required open and close args */
+
+	part->local_openclose_args = xpc_kmalloc_cacheline_aligned(
+					XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
+					&part->local_openclose_args_base);
+	if (part->local_openclose_args == NULL) {
+		kfree(part->channels);
+		part->channels = NULL;
+		kfree(part->local_GPs_base);
+		part->local_GPs = NULL;
+		kfree(part->remote_GPs_base);
+		part->remote_GPs = NULL;
+		dev_err(xpc_chan, "can't get memory for local connect args\n");
+		return xpcNoMemory;
+	}
+	memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
+
+	part->remote_openclose_args = xpc_kmalloc_cacheline_aligned(
+					XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
+					&part->remote_openclose_args_base);
+	if (part->remote_openclose_args == NULL) {
+		kfree(part->channels);
+		part->channels = NULL;
+		kfree(part->local_GPs_base);
+		part->local_GPs = NULL;
+		kfree(part->remote_GPs_base);
+		part->remote_GPs = NULL;
+		kfree(part->local_openclose_args_base);
+		part->local_openclose_args = NULL;
+		dev_err(xpc_chan, "can't get memory for remote connect args\n");
+		return xpcNoMemory;
+	}
+	memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
+
+
+	xpc_initialize_channels(part, partid);
+
+	atomic_set(&part->nchannels_active, 0);
+
+
+	/* local_IPI_amo were set to 0 by an earlier memset() */
+
+	/* Initialize this partitions AMO_t structure */
+	part->local_IPI_amo_va = xpc_IPI_init(partid);
+
+	spin_lock_init(&part->IPI_lock);
+
+	atomic_set(&part->channel_mgr_requests, 1);
+	init_waitqueue_head(&part->channel_mgr_wq);
+
+	sprintf(part->IPI_owner, "xpc%02d", partid);
+	ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
+				part->IPI_owner, (void *) (u64) partid);
+	if (ret != 0) {
+		kfree(part->channels);
+		part->channels = NULL;
+		kfree(part->local_GPs_base);
+		part->local_GPs = NULL;
+		kfree(part->remote_GPs_base);
+		part->remote_GPs = NULL;
+		kfree(part->local_openclose_args_base);
+		part->local_openclose_args = NULL;
+		kfree(part->remote_openclose_args_base);
+		part->remote_openclose_args = NULL;
+		dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
+			"errno=%d\n", -ret);
+		return xpcLackOfResources;
+	}
+
+	/* Setup a timer to check for dropped IPIs */
+	timer = &part->dropped_IPI_timer;
+	init_timer(timer);
+	timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
+	timer->data = (unsigned long) part;
+	timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
+	add_timer(timer);
+
+	/*
+	 * With the setting of the partition setup_state to XPC_P_SETUP, we're
+	 * declaring that this partition is ready to go.
+	 */
+	(volatile u8) part->setup_state = XPC_P_SETUP;
+
+
+	/*
+	 * Setup the per partition specific variables required by the
+	 * remote partition to establish channel connections with us.
+	 *
+	 * The setting of the magic # indicates that these per partition
+	 * specific variables are ready to be used.
+	 */
+	xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
+	xpc_vars_part[partid].openclose_args_pa =
+					__pa(part->local_openclose_args);
+	xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
+	xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(smp_processor_id());
+	xpc_vars_part[partid].IPI_phys_cpuid =
+					cpu_physical_id(smp_processor_id());
+	xpc_vars_part[partid].nchannels = part->nchannels;
+	(volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
+
+	return xpcSuccess;
+}
+
+
+/*
+ * Create a wrapper that hides the underlying mechanism for pulling a cacheline
+ * (or multiple cachelines) from a remote partition.
+ *
+ * src must be a cacheline aligned physical address on the remote partition.
+ * dst must be a cacheline aligned virtual address on this partition.
+ * cnt must be an cacheline sized
+ */
+static enum xpc_retval
+xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
+				const void *src, size_t cnt)
+{
+	bte_result_t bte_ret;
+
+
+	DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
+	DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
+	DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
+
+	if (part->act_state == XPC_P_DEACTIVATING) {
+		return part->reason;
+	}
+
+	bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
+				(u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
+	if (bte_ret == BTE_SUCCESS) {
+		return xpcSuccess;
+	}
+
+	dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
+		XPC_PARTID(part), bte_ret);
+
+	return xpc_map_bte_errors(bte_ret);
+}
+
+
+/*
+ * Pull the remote per partititon specific variables from the specified
+ * partition.
+ */
+enum xpc_retval
+xpc_pull_remote_vars_part(struct xpc_partition *part)
+{
+	u8 buffer[L1_CACHE_BYTES * 2];
+	struct xpc_vars_part *pulled_entry_cacheline =
+			(struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
+	struct xpc_vars_part *pulled_entry;
+	u64 remote_entry_cacheline_pa, remote_entry_pa;
+	partid_t partid = XPC_PARTID(part);
+	enum xpc_retval ret;
+
+
+	/* pull the cacheline that contains the variables we're interested in */
+
+	DBUG_ON(part->remote_vars_part_pa !=
+				L1_CACHE_ALIGN(part->remote_vars_part_pa));
+	DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
+
+	remote_entry_pa = part->remote_vars_part_pa +
+			sn_partition_id * sizeof(struct xpc_vars_part);
+
+	remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
+
+	pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
+				(remote_entry_pa & (L1_CACHE_BYTES - 1)));
+
+	ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
+					(void *) remote_entry_cacheline_pa,
+					L1_CACHE_BYTES);
+	if (ret != xpcSuccess) {
+		dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
+			"partition %d, ret=%d\n", partid, ret);
+		return ret;
+	}
+
+
+	/* see if they've been set up yet */
+
+	if (pulled_entry->magic != XPC_VP_MAGIC1 &&
+				pulled_entry->magic != XPC_VP_MAGIC2) {
+
+		if (pulled_entry->magic != 0) {
+			dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
+				"partition %d has bad magic value (=0x%lx)\n",
+				partid, sn_partition_id, pulled_entry->magic);
+			return xpcBadMagic;
+		}
+
+		/* they've not been initialized yet */
+		return xpcRetry;
+	}
+
+	if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
+
+		/* validate the variables */
+
+		if (pulled_entry->GPs_pa == 0 ||
+				pulled_entry->openclose_args_pa == 0 ||
+					pulled_entry->IPI_amo_pa == 0) {
+
+			dev_err(xpc_chan, "partition %d's XPC vars_part for "
+				"partition %d are not valid\n", partid,
+				sn_partition_id);
+			return xpcInvalidAddress;
+		}
+
+		/* the variables we imported look to be valid */
+
+		part->remote_GPs_pa = pulled_entry->GPs_pa;
+		part->remote_openclose_args_pa =
+					pulled_entry->openclose_args_pa;
+		part->remote_IPI_amo_va =
+				      (AMO_t *) __va(pulled_entry->IPI_amo_pa);
+		part->remote_IPI_nasid = pulled_entry->IPI_nasid;
+		part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
+
+		if (part->nchannels > pulled_entry->nchannels) {
+			part->nchannels = pulled_entry->nchannels;
+		}
+
+		/* let the other side know that we've pulled their variables */
+
+		(volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
+	}
+
+	if (pulled_entry->magic == XPC_VP_MAGIC1) {
+		return xpcRetry;
+	}
+
+	return xpcSuccess;
+}
+
+
+/*
+ * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
+ */
+static u64
+xpc_get_IPI_flags(struct xpc_partition *part)
+{
+	unsigned long irq_flags;
+	u64 IPI_amo;
+	enum xpc_retval ret;
+
+
+	/*
+	 * See if there are any IPI flags to be handled.
+	 */
+
+	spin_lock_irqsave(&part->IPI_lock, irq_flags);
+	if ((IPI_amo = part->local_IPI_amo) != 0) {
+		part->local_IPI_amo = 0;
+	}
+	spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
+
+
+	if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
+		ret = xpc_pull_remote_cachelines(part,
+					part->remote_openclose_args,
+					(void *) part->remote_openclose_args_pa,
+					XPC_OPENCLOSE_ARGS_SIZE);
+		if (ret != xpcSuccess) {
+			XPC_DEACTIVATE_PARTITION(part, ret);
+
+			dev_dbg(xpc_chan, "failed to pull openclose args from "
+				"partition %d, ret=%d\n", XPC_PARTID(part),
+				ret);
+
+			/* don't bother processing IPIs anymore */
+			IPI_amo = 0;
+		}
+	}
+
+	if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
+		ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
+						(void *) part->remote_GPs_pa,
+						XPC_GP_SIZE);
+		if (ret != xpcSuccess) {
+			XPC_DEACTIVATE_PARTITION(part, ret);
+
+			dev_dbg(xpc_chan, "failed to pull GPs from partition "
+				"%d, ret=%d\n", XPC_PARTID(part), ret);
+
+			/* don't bother processing IPIs anymore */
+			IPI_amo = 0;
+		}
+	}
+
+	return IPI_amo;
+}
+
+
+/*
+ * Allocate the local message queue and the notify queue.
+ */
+static enum xpc_retval
+xpc_allocate_local_msgqueue(struct xpc_channel *ch)
+{
+	unsigned long irq_flags;
+	int nentries;
+	size_t nbytes;
+
+
+	// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
+	// >>> iterations of the for-loop, bail if set?
+
+	// >>> should we impose a minumum #of entries? like 4 or 8?
+	for (nentries = ch->local_nentries; nentries > 0; nentries--) {
+
+		nbytes = nentries * ch->msg_size;
+		ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
+						(GFP_KERNEL | GFP_DMA),
+						&ch->local_msgqueue_base);
+		if (ch->local_msgqueue == NULL) {
+			continue;
+		}
+		memset(ch->local_msgqueue, 0, nbytes);
+
+		nbytes = nentries * sizeof(struct xpc_notify);
+		ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA));
+		if (ch->notify_queue == NULL) {
+			kfree(ch->local_msgqueue_base);
+			ch->local_msgqueue = NULL;
+			continue;
+		}
+		memset(ch->notify_queue, 0, nbytes);
+
+		spin_lock_irqsave(&ch->lock, irq_flags);
+		if (nentries < ch->local_nentries) {
+			dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
+				"partid=%d, channel=%d\n", nentries,
+				ch->local_nentries, ch->partid, ch->number);
+
+			ch->local_nentries = nentries;
+		}
+		spin_unlock_irqrestore(&ch->lock, irq_flags);
+		return xpcSuccess;
+	}
+
+	dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
+		"queue, partid=%d, channel=%d\n", ch->partid, ch->number);
+	return xpcNoMemory;
+}
+
+
+/*
+ * Allocate the cached remote message queue.
+ */
+static enum xpc_retval
+xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
+{
+	unsigned long irq_flags;
+	int nentries;
+	size_t nbytes;
+
+
+	DBUG_ON(ch->remote_nentries <= 0);
+
+	// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
+	// >>> iterations of the for-loop, bail if set?
+
+	// >>> should we impose a minumum #of entries? like 4 or 8?
+	for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
+
+		nbytes = nentries * ch->msg_size;
+		ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
+						(GFP_KERNEL | GFP_DMA),
+						&ch->remote_msgqueue_base);
+		if (ch->remote_msgqueue == NULL) {
+			continue;
+		}
+		memset(ch->remote_msgqueue, 0, nbytes);
+
+		spin_lock_irqsave(&ch->lock, irq_flags);
+		if (nentries < ch->remote_nentries) {
+			dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
+				"partid=%d, channel=%d\n", nentries,
+				ch->remote_nentries, ch->partid, ch->number);
+
+			ch->remote_nentries = nentries;
+		}
+		spin_unlock_irqrestore(&ch->lock, irq_flags);
+		return xpcSuccess;
+	}
+
+	dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
+		"partid=%d, channel=%d\n", ch->partid, ch->number);
+	return xpcNoMemory;
+}
+
+
+/*
+ * Allocate message queues and other stuff associated with a channel.
+ *
+ * Note: Assumes all of the channel sizes are filled in.
+ */
+static enum xpc_retval
+xpc_allocate_msgqueues(struct xpc_channel *ch)
+{
+	unsigned long irq_flags;
+	int i;
+	enum xpc_retval ret;
+
+
+	DBUG_ON(ch->flags & XPC_C_SETUP);
+
+	if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
+		return ret;
+	}
+
+	if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
+		kfree(ch->local_msgqueue_base);
+		ch->local_msgqueue = NULL;
+		kfree(ch->notify_queue);
+		ch->notify_queue = NULL;
+		return ret;
+	}
+
+	for (i = 0; i < ch->local_nentries; i++) {
+		/* use a semaphore as an event wait queue */
+		sema_init(&ch->notify_queue[i].sema, 0);
+	}
+
+	sema_init(&ch->teardown_sema, 0);	/* event wait */
+
+	spin_lock_irqsave(&ch->lock, irq_flags);
+	ch->flags |= XPC_C_SETUP;
+	spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+	return xpcSuccess;
+}
+
+
+/*
+ * Process a connect message from a remote partition.
+ *
+ * Note: xpc_process_connect() is expecting to be called with the
+ * spin_lock_irqsave held and will leave it locked upon return.
+ */
+static void
+xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+	enum xpc_retval ret;
+
+
+	DBUG_ON(!spin_is_locked(&ch->lock));
+
+	if (!(ch->flags & XPC_C_OPENREQUEST) ||
+				!(ch->flags & XPC_C_ROPENREQUEST)) {
+		/* nothing more to do for now */
+		return;
+	}
+	DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
+
+	if (!(ch->flags & XPC_C_SETUP)) {
+		spin_unlock_irqrestore(&ch->lock, *irq_flags);
+		ret = xpc_allocate_msgqueues(ch);
+		spin_lock_irqsave(&ch->lock, *irq_flags);
+
+		if (ret != xpcSuccess) {
+			XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
+		}
+		if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
+			return;
+		}
+
+		DBUG_ON(!(ch->flags & XPC_C_SETUP));
+		DBUG_ON(ch->local_msgqueue == NULL);
+		DBUG_ON(ch->remote_msgqueue == NULL);
+	}
+
+	if (!(ch->flags & XPC_C_OPENREPLY)) {
+		ch->flags |= XPC_C_OPENREPLY;
+		xpc_IPI_send_openreply(ch, irq_flags);
+	}
+
+	if (!(ch->flags & XPC_C_ROPENREPLY)) {
+		return;
+	}
+
+	DBUG_ON(ch->remote_msgqueue_pa == 0);
+
+	ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP);	/* clear all else */
+
+	dev_info(xpc_chan, "channel %d to partition %d connected\n",
+		ch->number, ch->partid);
+
+	spin_unlock_irqrestore(&ch->lock, *irq_flags);
+	xpc_create_kthreads(ch, 1);
+	spin_lock_irqsave(&ch->lock, *irq_flags);
+}
+
+
+/*
+ * Free up message queues and other stuff that were allocated for the specified
+ * channel.
+ *
+ * Note: ch->reason and ch->reason_line are left set for debugging purposes,
+ * they're cleared when XPC_C_DISCONNECTED is cleared.
+ */
+static void
+xpc_free_msgqueues(struct xpc_channel *ch)
+{
+	DBUG_ON(!spin_is_locked(&ch->lock));
+	DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
+
+	ch->remote_msgqueue_pa = 0;
+	ch->func = NULL;
+	ch->key = NULL;
+	ch->msg_size = 0;
+	ch->local_nentries = 0;
+	ch->remote_nentries = 0;
+	ch->kthreads_assigned_limit = 0;
+	ch->kthreads_idle_limit = 0;
+
+	ch->local_GP->get = 0;
+	ch->local_GP->put = 0;
+	ch->remote_GP.get = 0;
+	ch->remote_GP.put = 0;
+	ch->w_local_GP.get = 0;
+	ch->w_local_GP.put = 0;
+	ch->w_remote_GP.get = 0;
+	ch->w_remote_GP.put = 0;
+	ch->next_msg_to_pull = 0;
+
+	if (ch->flags & XPC_C_SETUP) {
+		ch->flags &= ~XPC_C_SETUP;
+
+		dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
+			ch->flags, ch->partid, ch->number);
+
+		kfree(ch->local_msgqueue_base);
+		ch->local_msgqueue = NULL;
+		kfree(ch->remote_msgqueue_base);
+		ch->remote_msgqueue = NULL;
+		kfree(ch->notify_queue);
+		ch->notify_queue = NULL;
+
+		/* in case someone is waiting for the teardown to complete */
+		up(&ch->teardown_sema);
+	}
+}
+
+
+/*
+ * spin_lock_irqsave() is expected to be held on entry.
+ */
+static void
+xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+	struct xpc_partition *part = &xpc_partitions[ch->partid];
+	u32 ch_flags = ch->flags;
+
+
+	DBUG_ON(!spin_is_locked(&ch->lock));
+
+	if (!(ch->flags & XPC_C_DISCONNECTING)) {
+		return;
+	}
+
+	DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
+
+	/* make sure all activity has settled down first */
+
+	if (atomic_read(&ch->references) > 0) {
+		return;
+	}
+	DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
+
+	/* it's now safe to free the channel's message queues */
+
+	xpc_free_msgqueues(ch);
+	DBUG_ON(ch->flags & XPC_C_SETUP);
+
+	if (part->act_state != XPC_P_DEACTIVATING) {
+
+		/* as long as the other side is up do the full protocol */
+
+		if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
+			return;
+		}
+
+		if (!(ch->flags & XPC_C_CLOSEREPLY)) {
+			ch->flags |= XPC_C_CLOSEREPLY;
+			xpc_IPI_send_closereply(ch, irq_flags);
+		}
+
+		if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
+			return;
+		}
+	}
+
+	/* both sides are disconnected now */
+
+	ch->flags = XPC_C_DISCONNECTED;	/* clear all flags, but this one */
+
+	atomic_dec(&part->nchannels_active);
+
+	if (ch_flags & XPC_C_WASCONNECTED) {
+		dev_info(xpc_chan, "channel %d to partition %d disconnected, "
+			"reason=%d\n", ch->number, ch->partid, ch->reason);
+	}
+}
+
+
+/*
+ * Process a change in the channel's remote connection state.
+ */
+static void
+xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
+				u8 IPI_flags)
+{
+	unsigned long irq_flags;
+	struct xpc_openclose_args *args =
+				&part->remote_openclose_args[ch_number];
+	struct xpc_channel *ch = &part->channels[ch_number];
+	enum xpc_retval reason;
+
+
+
+	spin_lock_irqsave(&ch->lock, irq_flags);
+
+
+	if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
+
+		dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
+			"from partid=%d, channel=%d\n", args->reason,
+			ch->partid, ch->number);
+
+		/*
+		 * If RCLOSEREQUEST is set, we're probably waiting for
+		 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
+		 * with this RCLOSEQREUQEST in the IPI_flags.
+		 */
+
+		if (ch->flags & XPC_C_RCLOSEREQUEST) {
+			DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
+			DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
+			DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
+			DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
+
+			DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY));
+			IPI_flags &= ~XPC_IPI_CLOSEREPLY;
+			ch->flags |= XPC_C_RCLOSEREPLY;
+
+			/* both sides have finished disconnecting */
+			xpc_process_disconnect(ch, &irq_flags);
+		}
+
+		if (ch->flags & XPC_C_DISCONNECTED) {
+			// >>> explain this section
+
+			if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
+				DBUG_ON(part->act_state !=
+							XPC_P_DEACTIVATING);
+				spin_unlock_irqrestore(&ch->lock, irq_flags);
+				return;
+			}
+
+			XPC_SET_REASON(ch, 0, 0);
+			ch->flags &= ~XPC_C_DISCONNECTED;
+
+			atomic_inc(&part->nchannels_active);
+			ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
+		}
+
+		IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY);
+
+		/*
+		 * The meaningful CLOSEREQUEST connection state fields are:
+		 *      reason = reason connection is to be closed
+		 */
+
+		ch->flags |= XPC_C_RCLOSEREQUEST;
+
+		if (!(ch->flags & XPC_C_DISCONNECTING)) {
+			reason = args->reason;
+			if (reason <= xpcSuccess || reason > xpcUnknownReason) {
+				reason = xpcUnknownReason;
+			} else if (reason == xpcUnregistering) {
+				reason = xpcOtherUnregistering;
+			}
+
+			XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
+		} else {
+			xpc_process_disconnect(ch, &irq_flags);
+		}
+	}
+
+
+	if (IPI_flags & XPC_IPI_CLOSEREPLY) {
+
+		dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
+			" channel=%d\n", ch->partid, ch->number);
+
+		if (ch->flags & XPC_C_DISCONNECTED) {
+			DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
+			spin_unlock_irqrestore(&ch->lock, irq_flags);
+			return;
+		}
+
+		DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
+		DBUG_ON(!(ch->flags & XPC_C_RCLOSEREQUEST));
+
+		ch->flags |= XPC_C_RCLOSEREPLY;
+
+		if (ch->flags & XPC_C_CLOSEREPLY) {
+			/* both sides have finished disconnecting */
+			xpc_process_disconnect(ch, &irq_flags);
+		}
+	}
+
+
+	if (IPI_flags & XPC_IPI_OPENREQUEST) {
+
+		dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
+			"local_nentries=%d) received from partid=%d, "
+			"channel=%d\n", args->msg_size, args->local_nentries,
+			ch->partid, ch->number);
+
+		if ((ch->flags & XPC_C_DISCONNECTING) ||
+					part->act_state == XPC_P_DEACTIVATING) {
+			spin_unlock_irqrestore(&ch->lock, irq_flags);
+			return;
+		}
+		DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
+							XPC_C_OPENREQUEST)));
+		DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
+					XPC_C_OPENREPLY | XPC_C_CONNECTED));
+
+		/*
+		 * The meaningful OPENREQUEST connection state fields are:
+		 *      msg_size = size of channel's messages in bytes
+		 *      local_nentries = remote partition's local_nentries
+		 */
+		DBUG_ON(args->msg_size == 0);
+		DBUG_ON(args->local_nentries == 0);
+
+		ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
+		ch->remote_nentries = args->local_nentries;
+
+
+		if (ch->flags & XPC_C_OPENREQUEST) {
+			if (args->msg_size != ch->msg_size) {
+				XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
+								&irq_flags);
+				spin_unlock_irqrestore(&ch->lock, irq_flags);
+				return;
+			}
+		} else {
+			ch->msg_size = args->msg_size;
+
+			XPC_SET_REASON(ch, 0, 0);
+			ch->flags &= ~XPC_C_DISCONNECTED;
+
+			atomic_inc(&part->nchannels_active);
+		}
+
+		xpc_process_connect(ch, &irq_flags);
+	}
+
+
+	if (IPI_flags & XPC_IPI_OPENREPLY) {
+
+		dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
+			"local_nentries=%d, remote_nentries=%d) received from "
+			"partid=%d, channel=%d\n", args->local_msgqueue_pa,
+			args->local_nentries, args->remote_nentries,
+			ch->partid, ch->number);
+
+		if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
+			spin_unlock_irqrestore(&ch->lock, irq_flags);
+			return;
+		}
+		DBUG_ON(!(ch->flags & XPC_C_OPENREQUEST));
+		DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
+		DBUG_ON(ch->flags & XPC_C_CONNECTED);
+
+		/*
+		 * The meaningful OPENREPLY connection state fields are:
+		 *      local_msgqueue_pa = physical address of remote
+		 *			    partition's local_msgqueue
+		 *      local_nentries = remote partition's local_nentries
+		 *      remote_nentries = remote partition's remote_nentries
+		 */
+		DBUG_ON(args->local_msgqueue_pa == 0);
+		DBUG_ON(args->local_nentries == 0);
+		DBUG_ON(args->remote_nentries == 0);
+
+		ch->flags |= XPC_C_ROPENREPLY;
+		ch->remote_msgqueue_pa = args->local_msgqueue_pa;
+
+		if (args->local_nentries < ch->remote_nentries) {
+			dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
+				"remote_nentries=%d, old remote_nentries=%d, "
+				"partid=%d, channel=%d\n",
+				args->local_nentries, ch->remote_nentries,
+				ch->partid, ch->number);
+
+			ch->remote_nentries = args->local_nentries;
+		}
+		if (args->remote_nentries < ch->local_nentries) {
+			dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
+				"local_nentries=%d, old local_nentries=%d, "
+				"partid=%d, channel=%d\n",
+				args->remote_nentries, ch->local_nentries,
+				ch->partid, ch->number);
+
+			ch->local_nentries = args->remote_nentries;
+		}
+
+		xpc_process_connect(ch, &irq_flags);
+	}
+
+	spin_unlock_irqrestore(&ch->lock, irq_flags);
+}
+
+
+/*
+ * Attempt to establish a channel connection to a remote partition.
+ */
+static enum xpc_retval
+xpc_connect_channel(struct xpc_channel *ch)
+{
+	unsigned long irq_flags;
+	struct xpc_registration *registration = &xpc_registrations[ch->number];
+
+
+	if (down_interruptible(&registration->sema) != 0) {
+		return xpcInterrupted;
+	}
+
+	if (!XPC_CHANNEL_REGISTERED(ch->number)) {
+		up(&registration->sema);
+		return xpcUnregistered;
+	}
+
+	spin_lock_irqsave(&ch->lock, irq_flags);
+
+	DBUG_ON(ch->flags & XPC_C_CONNECTED);
+	DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
+
+	if (ch->flags & XPC_C_DISCONNECTING) {
+		spin_unlock_irqrestore(&ch->lock, irq_flags);
+		up(&registration->sema);
+		return ch->reason;
+	}
+
+
+	/* add info from the channel connect registration to the channel */
+
+	ch->kthreads_assigned_limit = registration->assigned_limit;
+	ch->kthreads_idle_limit = registration->idle_limit;
+	DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
+	DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
+	DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
+
+	ch->func = registration->func;
+	DBUG_ON(registration->func == NULL);
+	ch->key = registration->key;
+
+	ch->local_nentries = registration->nentries;
+
+	if (ch->flags & XPC_C_ROPENREQUEST) {
+		if (registration->msg_size != ch->msg_size) {
+			/* the local and remote sides aren't the same */
+
+			/*
+			 * Because XPC_DISCONNECT_CHANNEL() can block we're
+			 * forced to up the registration sema before we unlock
+			 * the channel lock. But that's okay here because we're
+			 * done with the part that required the registration
+			 * sema. XPC_DISCONNECT_CHANNEL() requires that the
+			 * channel lock be locked and will unlock and relock
+			 * the channel lock as needed.
+			 */
+			up(&registration->sema);
+			XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
+								&irq_flags);
+			spin_unlock_irqrestore(&ch->lock, irq_flags);
+			return xpcUnequalMsgSizes;
+		}
+	} else {
+		ch->msg_size = registration->msg_size;
+
+		XPC_SET_REASON(ch, 0, 0);
+		ch->flags &= ~XPC_C_DISCONNECTED;
+
+		atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
+	}
+
+	up(&registration->sema);
+
+
+	/* initiate the connection */
+
+	ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
+	xpc_IPI_send_openrequest(ch, &irq_flags);
+
+	xpc_process_connect(ch, &irq_flags);
+
+	spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+	return xpcSuccess;
+}
+
+
+/*
+ * Notify those who wanted to be notified upon delivery of their message.
+ */
+static void
+xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
+{
+	struct xpc_notify *notify;
+	u8 notify_type;
+	s64 get = ch->w_remote_GP.get - 1;
+
+
+	while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
+
+		notify = &ch->notify_queue[get % ch->local_nentries];
+
+		/*
+		 * See if the notify entry indicates it was associated with
+		 * a message who's sender wants to be notified. It is possible
+		 * that it is, but someone else is doing or has done the
+		 * notification.
+		 */
+		notify_type = notify->type;
+		if (notify_type == 0 ||
+				cmpxchg(&notify->type, notify_type, 0) !=
+								notify_type) {
+			continue;
+		}
+
+		DBUG_ON(notify_type != XPC_N_CALL);
+
+		atomic_dec(&ch->n_to_notify);
+
+		if (notify->func != NULL) {
+			dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
+				"msg_number=%ld, partid=%d, channel=%d\n",
+				(void *) notify, get, ch->partid, ch->number);
+
+			notify->func(reason, ch->partid, ch->number,
+								notify->key);
+
+			dev_dbg(xpc_chan, "notify->func() returned, "
+				"notify=0x%p, msg_number=%ld, partid=%d, "
+				"channel=%d\n", (void *) notify, get,
+				ch->partid, ch->number);
+		}
+	}
+}
+
+
+/*
+ * Clear some of the msg flags in the local message queue.
+ */
+static inline void
+xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
+{
+	struct xpc_msg *msg;
+	s64 get;
+
+
+	get = ch->w_remote_GP.get;
+	do {
+		msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
+				(get % ch->local_nentries) * ch->msg_size);
+		msg->flags = 0;
+	} while (++get < (volatile s64) ch->remote_GP.get);
+}
+
+
+/*
+ * Clear some of the msg flags in the remote message queue.
+ */
+static inline void
+xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
+{
+	struct xpc_msg *msg;
+	s64 put;
+
+
+	put = ch->w_remote_GP.put;
+	do {
+		msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
+				(put % ch->remote_nentries) * ch->msg_size);
+		msg->flags = 0;
+	} while (++put < (volatile s64) ch->remote_GP.put);
+}
+
+
+static void
+xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
+{
+	struct xpc_channel *ch = &part->channels[ch_number];
+	int nmsgs_sent;
+
+
+	ch->remote_GP = part->remote_GPs[ch_number];
+
+
+	/* See what, if anything, has changed for each connected channel */
+
+	xpc_msgqueue_ref(ch);
+
+	if (ch->w_remote_GP.get == ch->remote_GP.get &&
+				ch->w_remote_GP.put == ch->remote_GP.put) {
+		/* nothing changed since GPs were last pulled */
+		xpc_msgqueue_deref(ch);
+		return;
+	}
+
+	if (!(ch->flags & XPC_C_CONNECTED)){
+		xpc_msgqueue_deref(ch);
+		return;
+	}
+
+
+	/*
+	 * First check to see if messages recently sent by us have been
+	 * received by the other side. (The remote GET value will have
+	 * changed since we last looked at it.)
+	 */
+
+	if (ch->w_remote_GP.get != ch->remote_GP.get) {
+
+		/*
+		 * We need to notify any senders that want to be notified
+		 * that their sent messages have been received by their
+		 * intended recipients. We need to do this before updating
+		 * w_remote_GP.get so that we don't allocate the same message
+		 * queue entries prematurely (see xpc_allocate_msg()).
+		 */
+		if (atomic_read(&ch->n_to_notify) > 0) {
+			/*
+			 * Notify senders that messages sent have been
+			 * received and delivered by the other side.
+			 */
+			xpc_notify_senders(ch, xpcMsgDelivered,
+							ch->remote_GP.get);
+		}
+
+		/*
+		 * Clear msg->flags in previously sent messages, so that
+		 * they're ready for xpc_allocate_msg().
+		 */
+		xpc_clear_local_msgqueue_flags(ch);
+
+		(volatile s64) ch->w_remote_GP.get = ch->remote_GP.get;
+
+		dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
+			"channel=%d\n", ch->w_remote_GP.get, ch->partid,
+			ch->number);
+
+		/*
+		 * If anyone was waiting for message queue entries to become
+		 * available, wake them up.
+		 */
+		if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
+			wake_up(&ch->msg_allocate_wq);
+		}
+	}
+
+
+	/*
+	 * Now check for newly sent messages by the other side. (The remote
+	 * PUT value will have changed since we last looked at it.)
+	 */
+
+	if (ch->w_remote_GP.put != ch->remote_GP.put) {
+		/*
+		 * Clear msg->flags in previously received messages, so that
+		 * they're ready for xpc_get_deliverable_msg().
+		 */
+		xpc_clear_remote_msgqueue_flags(ch);
+
+		(volatile s64) ch->w_remote_GP.put = ch->remote_GP.put;
+
+		dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
+			"channel=%d\n", ch->w_remote_GP.put, ch->partid,
+			ch->number);
+
+		nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
+		if (nmsgs_sent > 0) {
+			dev_dbg(xpc_chan, "msgs waiting to be copied and "
+				"delivered=%d, partid=%d, channel=%d\n",
+				nmsgs_sent, ch->partid, ch->number);
+
+			if (ch->flags & XPC_C_CONNECTCALLOUT) {
+				xpc_activate_kthreads(ch, nmsgs_sent);
+			}
+		}
+	}
+
+	xpc_msgqueue_deref(ch);
+}
+
+
+void
+xpc_process_channel_activity(struct xpc_partition *part)
+{
+	unsigned long irq_flags;
+	u64 IPI_amo, IPI_flags;
+	struct xpc_channel *ch;
+	int ch_number;
+
+
+	IPI_amo = xpc_get_IPI_flags(part);
+
+	/*
+	 * Initiate channel connections for registered channels.
+	 *
+	 * For each connected channel that has pending messages activate idle
+	 * kthreads and/or create new kthreads as needed.
+	 */
+
+	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
+		ch = &part->channels[ch_number];
+
+
+		/*
+		 * Process any open or close related IPI flags, and then deal
+		 * with connecting or disconnecting the channel as required.
+		 */
+
+		IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
+
+		if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
+			xpc_process_openclose_IPI(part, ch_number, IPI_flags);
+		}
+
+
+		if (ch->flags & XPC_C_DISCONNECTING) {
+			spin_lock_irqsave(&ch->lock, irq_flags);
+			xpc_process_disconnect(ch, &irq_flags);
+			spin_unlock_irqrestore(&ch->lock, irq_flags);
+			continue;
+		}
+
+		if (part->act_state == XPC_P_DEACTIVATING) {
+			continue;
+		}
+
+		if (!(ch->flags & XPC_C_CONNECTED)) {
+			if (!(ch->flags & XPC_C_OPENREQUEST)) {
+				DBUG_ON(ch->flags & XPC_C_SETUP);
+				(void) xpc_connect_channel(ch);
+			} else {
+				spin_lock_irqsave(&ch->lock, irq_flags);
+				xpc_process_connect(ch, &irq_flags);
+				spin_unlock_irqrestore(&ch->lock, irq_flags);
+			}
+			continue;
+		}
+
+
+		/*
+		 * Process any message related IPI flags, this may involve the
+		 * activation of kthreads to deliver any pending messages sent
+		 * from the other partition.
+		 */
+
+		if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
+			xpc_process_msg_IPI(part, ch_number);
+		}
+	}
+}
+
+
+/*
+ * XPC's heartbeat code calls this function to inform XPC that a partition has
+ * gone down.  XPC responds by tearing down the XPartition Communication
+ * infrastructure used for the just downed partition.
+ *
+ * XPC's heartbeat code will never call this function and xpc_partition_up()
+ * at the same time. Nor will it ever make multiple calls to either function
+ * at the same time.
+ */
+void
+xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason)
+{
+	unsigned long irq_flags;
+	int ch_number;
+	struct xpc_channel *ch;
+
+
+	dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
+		XPC_PARTID(part), reason);
+
+	if (!xpc_part_ref(part)) {
+		/* infrastructure for this partition isn't currently set up */
+		return;
+	}
+
+
+	/* disconnect all channels associated with the downed partition */
+
+	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
+		ch = &part->channels[ch_number];
+
+
+		xpc_msgqueue_ref(ch);
+		spin_lock_irqsave(&ch->lock, irq_flags);
+
+		XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
+
+		spin_unlock_irqrestore(&ch->lock, irq_flags);
+		xpc_msgqueue_deref(ch);
+	}
+
+	xpc_wakeup_channel_mgr(part);
+
+	xpc_part_deref(part);
+}
+
+
+/*
+ * Teardown the infrastructure necessary to support XPartition Communication
+ * between the specified remote partition and the local one.
+ */
+void
+xpc_teardown_infrastructure(struct xpc_partition *part)
+{
+	partid_t partid = XPC_PARTID(part);
+
+
+	/*
+	 * We start off by making this partition inaccessible to local
+	 * processes by marking it as no longer setup. Then we make it
+	 * inaccessible to remote processes by clearing the XPC per partition
+	 * specific variable's magic # (which indicates that these variables
+	 * are no longer valid) and by ignoring all XPC notify IPIs sent to
+	 * this partition.
+	 */
+
+	DBUG_ON(atomic_read(&part->nchannels_active) != 0);
+	DBUG_ON(part->setup_state != XPC_P_SETUP);
+	part->setup_state = XPC_P_WTEARDOWN;
+
+	xpc_vars_part[partid].magic = 0;
+
+
+	free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
+
+
+	/*
+	 * Before proceding with the teardown we have to wait until all
+	 * existing references cease.
+	 */
+	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
+
+
+	/* now we can begin tearing down the infrastructure */
+
+	part->setup_state = XPC_P_TORNDOWN;
+
+	/* in case we've still got outstanding timers registered... */
+	del_timer_sync(&part->dropped_IPI_timer);
+
+	kfree(part->remote_openclose_args_base);
+	part->remote_openclose_args = NULL;
+	kfree(part->local_openclose_args_base);
+	part->local_openclose_args = NULL;
+	kfree(part->remote_GPs_base);
+	part->remote_GPs = NULL;
+	kfree(part->local_GPs_base);
+	part->local_GPs = NULL;
+	kfree(part->channels);
+	part->channels = NULL;
+	part->local_IPI_amo_va = NULL;
+}
+
+
+/*
+ * Called by XP at the time of channel connection registration to cause
+ * XPC to establish connections to all currently active partitions.
+ */
+void
+xpc_initiate_connect(int ch_number)
+{
+	partid_t partid;
+	struct xpc_partition *part;
+	struct xpc_channel *ch;
+
+
+	DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
+
+	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+		part = &xpc_partitions[partid];
+
+		if (xpc_part_ref(part)) {
+			ch = &part->channels[ch_number];
+
+			if (!(ch->flags & XPC_C_DISCONNECTING)) {
+				DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
+				DBUG_ON(ch->flags & XPC_C_CONNECTED);
+				DBUG_ON(ch->flags & XPC_C_SETUP);
+
+				/*
+				 * Initiate the establishment of a connection
+				 * on the newly registered channel to the
+				 * remote partition.
+				 */
+				xpc_wakeup_channel_mgr(part);
+			}
+
+			xpc_part_deref(part);
+		}
+	}
+}
+
+
+void
+xpc_connected_callout(struct xpc_channel *ch)
+{
+	unsigned long irq_flags;
+
+
+	/* let the registerer know that a connection has been established */
+
+	if (ch->func != NULL) {
+		dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, "
+			"partid=%d, channel=%d\n", ch->partid, ch->number);
+
+		ch->func(xpcConnected, ch->partid, ch->number,
+				(void *) (u64) ch->local_nentries, ch->key);
+
+		dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
+			"partid=%d, channel=%d\n", ch->partid, ch->number);
+	}
+
+	spin_lock_irqsave(&ch->lock, irq_flags);
+	ch->flags |= XPC_C_CONNECTCALLOUT;
+	spin_unlock_irqrestore(&ch->lock, irq_flags);
+}
+
+
+/*
+ * Called by XP at the time of channel connection unregistration to cause
+ * XPC to teardown all current connections for the specified channel.
+ *
+ * Before returning xpc_initiate_disconnect() will wait until all connections
+ * on the specified channel have been closed/torndown. So the caller can be
+ * assured that they will not be receiving any more callouts from XPC to the
+ * function they registered via xpc_connect().
+ *
+ * Arguments:
+ *
+ *	ch_number - channel # to unregister.
+ */
+void
+xpc_initiate_disconnect(int ch_number)
+{
+	unsigned long irq_flags;
+	partid_t partid;
+	struct xpc_partition *part;
+	struct xpc_channel *ch;
+
+
+	DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
+
+	/* initiate the channel disconnect for every active partition */
+	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+		part = &xpc_partitions[partid];
+
+		if (xpc_part_ref(part)) {
+			ch = &part->channels[ch_number];
+			xpc_msgqueue_ref(ch);
+
+			spin_lock_irqsave(&ch->lock, irq_flags);
+
+			XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
+								&irq_flags);
+
+			spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+			xpc_msgqueue_deref(ch);
+			xpc_part_deref(part);
+		}
+	}
+
+	xpc_disconnect_wait(ch_number);
+}
+
+
+/*
+ * To disconnect a channel, and reflect it back to all who may be waiting.
+ *
+ * >>> An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
+ * >>> xpc_free_msgqueues().
+ *
+ * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
+ */
+void
+xpc_disconnect_channel(const int line, struct xpc_channel *ch,
+			enum xpc_retval reason, unsigned long *irq_flags)
+{
+	u32 flags;
+
+
+	DBUG_ON(!spin_is_locked(&ch->lock));
+
+	if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
+		return;
+	}
+	DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
+
+	dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
+		reason, line, ch->partid, ch->number);
+
+	XPC_SET_REASON(ch, reason, line);
+
+	flags = ch->flags;
+	/* some of these may not have been set */
+	ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
+			XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
+			XPC_C_CONNECTING | XPC_C_CONNECTED);
+
+	ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
+	xpc_IPI_send_closerequest(ch, irq_flags);
+
+	if (flags & XPC_C_CONNECTED) {
+		ch->flags |= XPC_C_WASCONNECTED;
+	}
+
+	if (atomic_read(&ch->kthreads_idle) > 0) {
+		/* wake all idle kthreads so they can exit */
+		wake_up_all(&ch->idle_wq);
+	}
+
+	spin_unlock_irqrestore(&ch->lock, *irq_flags);
+
+
+	/* wake those waiting to allocate an entry from the local msg queue */
+
+	if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
+		wake_up(&ch->msg_allocate_wq);
+	}
+
+	/* wake those waiting for notify completion */
+
+	if (atomic_read(&ch->n_to_notify) > 0) {
+		xpc_notify_senders(ch, reason, ch->w_local_GP.put);
+	}
+
+	spin_lock_irqsave(&ch->lock, *irq_flags);
+}
+
+
+void
+xpc_disconnected_callout(struct xpc_channel *ch)
+{
+	/*
+	 * Let the channel's registerer know that the channel is now
+	 * disconnected. We don't want to do this if the registerer was never
+	 * informed of a connection being made, unless the disconnect was for
+	 * abnormal reasons.
+	 */
+
+	if (ch->func != NULL) {
+		dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
+			"channel=%d\n", ch->reason, ch->partid, ch->number);
+
+		ch->func(ch->reason, ch->partid, ch->number, NULL, ch->key);
+
+		dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
+			"channel=%d\n", ch->reason, ch->partid, ch->number);
+	}
+}
+
+
+/*
+ * Wait for a message entry to become available for the specified channel,
+ * but don't wait any longer than 1 jiffy.
+ */
+static enum xpc_retval
+xpc_allocate_msg_wait(struct xpc_channel *ch)
+{
+	enum xpc_retval ret;
+
+
+	if (ch->flags & XPC_C_DISCONNECTING) {
+		DBUG_ON(ch->reason == xpcInterrupted);  // >>> Is this true?
+		return ch->reason;
+	}
+
+	atomic_inc(&ch->n_on_msg_allocate_wq);
+	ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
+	atomic_dec(&ch->n_on_msg_allocate_wq);
+
+	if (ch->flags & XPC_C_DISCONNECTING) {
+		ret = ch->reason;
+		DBUG_ON(ch->reason == xpcInterrupted);  // >>> Is this true?
+	} else if (ret == 0) {
+		ret = xpcTimeout;
+	} else {
+		ret = xpcInterrupted;
+	}
+
+	return ret;
+}
+
+
+/*
+ * Allocate an entry for a message from the message queue associated with the
+ * specified channel.
+ */
+static enum xpc_retval
+xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
+			struct xpc_msg **address_of_msg)
+{
+	struct xpc_msg *msg;
+	enum xpc_retval ret;
+	s64 put;
+
+
+	/* this reference will be dropped in xpc_send_msg() */
+	xpc_msgqueue_ref(ch);
+
+	if (ch->flags & XPC_C_DISCONNECTING) {
+		xpc_msgqueue_deref(ch);
+		return ch->reason;
+	}
+	if (!(ch->flags & XPC_C_CONNECTED)) {
+		xpc_msgqueue_deref(ch);
+		return xpcNotConnected;
+	}
+
+
+	/*
+	 * Get the next available message entry from the local message queue.
+	 * If none are available, we'll make sure that we grab the latest
+	 * GP values.
+	 */
+	ret = xpcTimeout;
+
+	while (1) {
+
+		put = (volatile s64) ch->w_local_GP.put;
+		if (put - (volatile s64) ch->w_remote_GP.get <
+							ch->local_nentries) {
+
+			/* There are available message entries. We need to try
+			 * to secure one for ourselves. We'll do this by trying
+			 * to increment w_local_GP.put as long as someone else
+			 * doesn't beat us to it. If they do, we'll have to
+			 * try again.
+		 	 */
+			if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
+									put) {
+				/* we got the entry referenced by put */
+				break;
+			}
+			continue;	/* try again */
+		}
+
+
+		/*
+		 * There aren't any available msg entries at this time.
+		 *
+		 * In waiting for a message entry to become available,
+		 * we set a timeout in case the other side is not
+		 * sending completion IPIs. This lets us fake an IPI
+		 * that will cause the IPI handler to fetch the latest
+		 * GP values as if an IPI was sent by the other side.
+		 */
+		if (ret == xpcTimeout) {
+			xpc_IPI_send_local_msgrequest(ch);
+		}
+
+		if (flags & XPC_NOWAIT) {
+			xpc_msgqueue_deref(ch);
+			return xpcNoWait;
+		}
+
+		ret = xpc_allocate_msg_wait(ch);
+		if (ret != xpcInterrupted && ret != xpcTimeout) {
+			xpc_msgqueue_deref(ch);
+			return ret;
+		}
+	}
+
+
+	/* get the message's address and initialize it */
+	msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
+				(put % ch->local_nentries) * ch->msg_size);
+
+
+	DBUG_ON(msg->flags != 0);
+	msg->number = put;
+
+	dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
+		"msg_number=%ld, partid=%d, channel=%d\n", put + 1,
+		(void *) msg, msg->number, ch->partid, ch->number);
+
+	*address_of_msg = msg;
+
+	return xpcSuccess;
+}
+
+
+/*
+ * Allocate an entry for a message from the message queue associated with the
+ * specified channel. NOTE that this routine can sleep waiting for a message
+ * entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
+ *
+ * Arguments:
+ *
+ *	partid - ID of partition to which the channel is connected.
+ *	ch_number - channel #.
+ *	flags - see xpc.h for valid flags.
+ *	payload - address of the allocated payload area pointer (filled in on
+ * 	          return) in which the user-defined message is constructed.
+ */
+enum xpc_retval
+xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
+{
+	struct xpc_partition *part = &xpc_partitions[partid];
+	enum xpc_retval ret = xpcUnknownReason;
+	struct xpc_msg *msg;
+
+
+	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
+
+	*payload = NULL;
+
+	if (xpc_part_ref(part)) {
+		ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
+		xpc_part_deref(part);
+
+		if (msg != NULL) {
+			*payload = &msg->payload;
+		}
+	}
+
+	return ret;
+}
+
+
+/*
+ * Now we actually send the messages that are ready to be sent by advancing
+ * the local message queue's Put value and then send an IPI to the recipient
+ * partition.
+ */
+static void
+xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
+{
+	struct xpc_msg *msg;
+	s64 put = initial_put + 1;
+	int send_IPI = 0;
+
+
+	while (1) {
+
+		while (1) {
+			if (put == (volatile s64) ch->w_local_GP.put) {
+				break;
+			}
+
+			msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
+			       (put % ch->local_nentries) * ch->msg_size);
+
+			if (!(msg->flags & XPC_M_READY)) {
+				break;
+			}
+
+			put++;
+		}
+
+		if (put == initial_put) {
+			/* nothing's changed */
+			break;
+		}
+
+		if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
+								initial_put) {
+			/* someone else beat us to it */
+			DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
+			break;
+		}
+
+		/* we just set the new value of local_GP->put */
+
+		dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
+			"channel=%d\n", put, ch->partid, ch->number);
+
+		send_IPI = 1;
+
+		/*
+		 * We need to ensure that the message referenced by
+		 * local_GP->put is not XPC_M_READY or that local_GP->put
+		 * equals w_local_GP.put, so we'll go have a look.
+		 */
+		initial_put = put;
+	}
+
+	if (send_IPI) {
+		xpc_IPI_send_msgrequest(ch);
+	}
+}
+
+
+/*
+ * Common code that does the actual sending of the message by advancing the
+ * local message queue's Put value and sends an IPI to the partition the
+ * message is being sent to.
+ */
+static enum xpc_retval
+xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
+			xpc_notify_func func, void *key)
+{
+	enum xpc_retval ret = xpcSuccess;
+	struct xpc_notify *notify = NULL;   // >>> to keep the compiler happy!!
+	s64 put, msg_number = msg->number;
+
+
+	DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
+	DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
+					msg_number % ch->local_nentries);
+	DBUG_ON(msg->flags & XPC_M_READY);
+
+	if (ch->flags & XPC_C_DISCONNECTING) {
+		/* drop the reference grabbed in xpc_allocate_msg() */
+		xpc_msgqueue_deref(ch);
+		return ch->reason;
+	}
+
+	if (notify_type != 0) {
+		/*
+		 * Tell the remote side to send an ACK interrupt when the
+		 * message has been delivered.
+		 */
+		msg->flags |= XPC_M_INTERRUPT;
+
+		atomic_inc(&ch->n_to_notify);
+
+		notify = &ch->notify_queue[msg_number % ch->local_nentries];
+		notify->func = func;
+		notify->key = key;
+		(volatile u8) notify->type = notify_type;
+
+		// >>> is a mb() needed here?
+
+		if (ch->flags & XPC_C_DISCONNECTING) {
+			/*
+			 * An error occurred between our last error check and
+			 * this one. We will try to clear the type field from
+			 * the notify entry. If we succeed then
+			 * xpc_disconnect_channel() didn't already process
+			 * the notify entry.
+			 */
+			if (cmpxchg(&notify->type, notify_type, 0) ==
+								notify_type) {
+				atomic_dec(&ch->n_to_notify);
+				ret = ch->reason;
+			}
+
+			/* drop the reference grabbed in xpc_allocate_msg() */
+			xpc_msgqueue_deref(ch);
+			return ret;
+		}
+	}
+
+	msg->flags |= XPC_M_READY;
+
+	/*
+	 * The preceding store of msg->flags must occur before the following
+	 * load of ch->local_GP->put.
+	 */
+	mb();
+
+	/* see if the message is next in line to be sent, if so send it */
+
+	put = ch->local_GP->put;
+	if (put == msg_number) {
+		xpc_send_msgs(ch, put);
+	}
+
+	/* drop the reference grabbed in xpc_allocate_msg() */
+	xpc_msgqueue_deref(ch);
+	return ret;
+}
+
+
+/*
+ * Send a message previously allocated using xpc_initiate_allocate() on the
+ * specified channel connected to the specified partition.
+ *
+ * This routine will not wait for the message to be received, nor will
+ * notification be given when it does happen. Once this routine has returned
+ * the message entry allocated via xpc_initiate_allocate() is no longer
+ * accessable to the caller.
+ *
+ * This routine, although called by users, does not call xpc_part_ref() to
+ * ensure that the partition infrastructure is in place. It relies on the
+ * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
+ *
+ * Arguments:
+ *
+ *	partid - ID of partition to which the channel is connected.
+ *	ch_number - channel # to send message on.
+ *	payload - pointer to the payload area allocated via
+ *			xpc_initiate_allocate().
+ */
+enum xpc_retval
+xpc_initiate_send(partid_t partid, int ch_number, void *payload)
+{
+	struct xpc_partition *part = &xpc_partitions[partid];
+	struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
+	enum xpc_retval ret;
+
+
+	dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
+		partid, ch_number);
+
+	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
+	DBUG_ON(msg == NULL);
+
+	ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL);
+
+	return ret;
+}
+
+
+/*
+ * Send a message previously allocated using xpc_initiate_allocate on the
+ * specified channel connected to the specified partition.
+ *
+ * This routine will not wait for the message to be sent. Once this routine
+ * has returned the message entry allocated via xpc_initiate_allocate() is no
+ * longer accessable to the caller.
+ *
+ * Once the remote end of the channel has received the message, the function
+ * passed as an argument to xpc_initiate_send_notify() will be called. This
+ * allows the sender to free up or re-use any buffers referenced by the
+ * message, but does NOT mean the message has been processed at the remote
+ * end by a receiver.
+ *
+ * If this routine returns an error, the caller's function will NOT be called.
+ *
+ * This routine, although called by users, does not call xpc_part_ref() to
+ * ensure that the partition infrastructure is in place. It relies on the
+ * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
+ *
+ * Arguments:
+ *
+ *	partid - ID of partition to which the channel is connected.
+ *	ch_number - channel # to send message on.
+ *	payload - pointer to the payload area allocated via
+ *			xpc_initiate_allocate().
+ *	func - function to call with asynchronous notification of message
+ *		  receipt. THIS FUNCTION MUST BE NON-BLOCKING.
+ *	key - user-defined key to be passed to the function when it's called.
+ */
+enum xpc_retval
+xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
+				xpc_notify_func func, void *key)
+{
+	struct xpc_partition *part = &xpc_partitions[partid];
+	struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
+	enum xpc_retval ret;
+
+
+	dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
+		partid, ch_number);
+
+	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
+	DBUG_ON(msg == NULL);
+	DBUG_ON(func == NULL);
+
+	ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
+								func, key);
+	return ret;
+}
+
+
+static struct xpc_msg *
+xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
+{
+	struct xpc_partition *part = &xpc_partitions[ch->partid];
+	struct xpc_msg *remote_msg, *msg;
+	u32 msg_index, nmsgs;
+	u64 msg_offset;
+	enum xpc_retval ret;
+
+
+	if (down_interruptible(&ch->msg_to_pull_sema) != 0) {
+		/* we were interrupted by a signal */
+		return NULL;
+	}
+
+	while (get >= ch->next_msg_to_pull) {
+
+		/* pull as many messages as are ready and able to be pulled */
+
+		msg_index = ch->next_msg_to_pull % ch->remote_nentries;
+
+		DBUG_ON(ch->next_msg_to_pull >=
+					(volatile s64) ch->w_remote_GP.put);
+		nmsgs =  (volatile s64) ch->w_remote_GP.put -
+						ch->next_msg_to_pull;
+		if (msg_index + nmsgs > ch->remote_nentries) {
+			/* ignore the ones that wrap the msg queue for now */
+			nmsgs = ch->remote_nentries - msg_index;
+		}
+
+		msg_offset = msg_index * ch->msg_size;
+		msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
+								msg_offset);
+		remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
+								msg_offset);
+
+		if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
+				nmsgs * ch->msg_size)) != xpcSuccess) {
+
+			dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
+				" msg %ld from partition %d, channel=%d, "
+				"ret=%d\n", nmsgs, ch->next_msg_to_pull,
+				ch->partid, ch->number, ret);
+
+			XPC_DEACTIVATE_PARTITION(part, ret);
+
+			up(&ch->msg_to_pull_sema);
+			return NULL;
+		}
+
+		mb();	/* >>> this may not be needed, we're not sure */
+
+		ch->next_msg_to_pull += nmsgs;
+	}
+
+	up(&ch->msg_to_pull_sema);
+
+	/* return the message we were looking for */
+	msg_offset = (get % ch->remote_nentries) * ch->msg_size;
+	msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
+
+	return msg;
+}
+
+
+/*
+ * Get a message to be delivered.
+ */
+static struct xpc_msg *
+xpc_get_deliverable_msg(struct xpc_channel *ch)
+{
+	struct xpc_msg *msg = NULL;
+	s64 get;
+
+
+	do {
+		if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
+			break;
+		}
+
+		get = (volatile s64) ch->w_local_GP.get;
+		if (get == (volatile s64) ch->w_remote_GP.put) {
+			break;
+		}
+
+		/* There are messages waiting to be pulled and delivered.
+		 * We need to try to secure one for ourselves. We'll do this
+		 * by trying to increment w_local_GP.get and hope that no one
+		 * else beats us to it. If they do, we'll we'll simply have
+		 * to try again for the next one.
+	 	 */
+
+		if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
+			/* we got the entry referenced by get */
+
+			dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
+				"partid=%d, channel=%d\n", get + 1,
+				ch->partid, ch->number);
+
+			/* pull the message from the remote partition */
+
+			msg = xpc_pull_remote_msg(ch, get);
+
+			DBUG_ON(msg != NULL && msg->number != get);
+			DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
+			DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
+
+			break;
+		}
+
+	} while (1);
+
+	return msg;
+}
+
+
+/*
+ * Deliver a message to its intended recipient.
+ */
+void
+xpc_deliver_msg(struct xpc_channel *ch)
+{
+	struct xpc_msg *msg;
+
+
+	if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
+
+		/*
+		 * This ref is taken to protect the payload itself from being
+		 * freed before the user is finished with it, which the user
+		 * indicates by calling xpc_initiate_received().
+		 */
+		xpc_msgqueue_ref(ch);
+
+		atomic_inc(&ch->kthreads_active);
+
+		if (ch->func != NULL) {
+			dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
+				"msg_number=%ld, partid=%d, channel=%d\n",
+				(void *) msg, msg->number, ch->partid,
+				ch->number);
+
+			/* deliver the message to its intended recipient */
+			ch->func(xpcMsgReceived, ch->partid, ch->number,
+					&msg->payload, ch->key);
+
+			dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
+				"msg_number=%ld, partid=%d, channel=%d\n",
+				(void *) msg, msg->number, ch->partid,
+				ch->number);
+		}
+
+		atomic_dec(&ch->kthreads_active);
+	}
+}
+
+
+/*
+ * Now we actually acknowledge the messages that have been delivered and ack'd
+ * by advancing the cached remote message queue's Get value and if requested
+ * send an IPI to the message sender's partition.
+ */
+static void
+xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
+{
+	struct xpc_msg *msg;
+	s64 get = initial_get + 1;
+	int send_IPI = 0;
+
+
+	while (1) {
+
+		while (1) {
+			if (get == (volatile s64) ch->w_local_GP.get) {
+				break;
+			}
+
+			msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
+			       (get % ch->remote_nentries) * ch->msg_size);
+
+			if (!(msg->flags & XPC_M_DONE)) {
+				break;
+			}
+
+			msg_flags |= msg->flags;
+			get++;
+		}
+
+		if (get == initial_get) {
+			/* nothing's changed */
+			break;
+		}
+
+		if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
+								initial_get) {
+			/* someone else beat us to it */
+			DBUG_ON((volatile s64) ch->local_GP->get <=
+								initial_get);
+			break;
+		}
+
+		/* we just set the new value of local_GP->get */
+
+		dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
+			"channel=%d\n", get, ch->partid, ch->number);
+
+		send_IPI = (msg_flags & XPC_M_INTERRUPT);
+
+		/*
+		 * We need to ensure that the message referenced by
+		 * local_GP->get is not XPC_M_DONE or that local_GP->get
+		 * equals w_local_GP.get, so we'll go have a look.
+		 */
+		initial_get = get;
+	}
+
+	if (send_IPI) {
+		xpc_IPI_send_msgrequest(ch);
+	}
+}
+
+
+/*
+ * Acknowledge receipt of a delivered message.
+ *
+ * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
+ * that sent the message.
+ *
+ * This function, although called by users, does not call xpc_part_ref() to
+ * ensure that the partition infrastructure is in place. It relies on the
+ * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
+ *
+ * Arguments:
+ *
+ *	partid - ID of partition to which the channel is connected.
+ *	ch_number - channel # message received on.
+ *	payload - pointer to the payload area allocated via
+ *			xpc_initiate_allocate().
+ */
+void
+xpc_initiate_received(partid_t partid, int ch_number, void *payload)
+{
+	struct xpc_partition *part = &xpc_partitions[partid];
+	struct xpc_channel *ch;
+	struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
+	s64 get, msg_number = msg->number;
+
+
+	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
+
+	ch = &part->channels[ch_number];
+
+	dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
+		(void *) msg, msg_number, ch->partid, ch->number);
+
+	DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
+					msg_number % ch->remote_nentries);
+	DBUG_ON(msg->flags & XPC_M_DONE);
+
+	msg->flags |= XPC_M_DONE;
+
+	/*
+	 * The preceding store of msg->flags must occur before the following
+	 * load of ch->local_GP->get.
+	 */
+	mb();
+
+	/*
+	 * See if this message is next in line to be acknowledged as having
+	 * been delivered.
+	 */
+	get = ch->local_GP->get;
+	if (get == msg_number) {
+		xpc_acknowledge_msgs(ch, get, msg->flags);
+	}
+
+	/* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg()  */
+	xpc_msgqueue_deref(ch);
+}
+

+ 1064 - 0
arch/ia64/sn/kernel/xpc_main.c

@@ -0,0 +1,1064 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+
+/*
+ * Cross Partition Communication (XPC) support - standard version.
+ *
+ *	XPC provides a message passing capability that crosses partition
+ *	boundaries. This module is made up of two parts:
+ *
+ *	    partition	This part detects the presence/absence of other
+ *			partitions. It provides a heartbeat and monitors
+ *			the heartbeats of other partitions.
+ *
+ *	    channel	This part manages the channels and sends/receives
+ *			messages across them to/from other partitions.
+ *
+ *	There are a couple of additional functions residing in XP, which
+ *	provide an interface to XPC for its users.
+ *
+ *
+ *	Caveats:
+ *
+ *	  . We currently have no way to determine which nasid an IPI came
+ *	    from. Thus, xpc_IPI_send() does a remote AMO write followed by
+ *	    an IPI. The AMO indicates where data is to be pulled from, so
+ *	    after the IPI arrives, the remote partition checks the AMO word.
+ *	    The IPI can actually arrive before the AMO however, so other code
+ *	    must periodically check for this case. Also, remote AMO operations
+ *	    do not reliably time out. Thus we do a remote PIO read solely to
+ *	    know whether the remote partition is down and whether we should
+ *	    stop sending IPIs to it. This remote PIO read operation is set up
+ *	    in a special nofault region so SAL knows to ignore (and cleanup)
+ *	    any errors due to the remote AMO write, PIO read, and/or PIO
+ *	    write operations.
+ *
+ *	    If/when new hardware solves this IPI problem, we should abandon
+ *	    the current approach.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/uaccess.h>
+#include "xpc.h"
+
+
+/* define two XPC debug device structures to be used with dev_dbg() et al */
+
+struct device_driver xpc_dbg_name = {
+	.name = "xpc"
+};
+
+struct device xpc_part_dbg_subname = {
+	.bus_id = {0},		/* set to "part" at xpc_init() time */
+	.driver = &xpc_dbg_name
+};
+
+struct device xpc_chan_dbg_subname = {
+	.bus_id = {0},		/* set to "chan" at xpc_init() time */
+	.driver = &xpc_dbg_name
+};
+
+struct device *xpc_part = &xpc_part_dbg_subname;
+struct device *xpc_chan = &xpc_chan_dbg_subname;
+
+
+/* systune related variables for /proc/sys directories */
+
+static int xpc_hb_min = 1;
+static int xpc_hb_max = 10;
+
+static int xpc_hb_check_min = 10;
+static int xpc_hb_check_max = 120;
+
+static ctl_table xpc_sys_xpc_hb_dir[] = {
+	{
+		1,
+		"hb_interval",
+		&xpc_hb_interval,
+		sizeof(int),
+		0644,
+		NULL,
+		&proc_dointvec_minmax,
+		&sysctl_intvec,
+		NULL,
+		&xpc_hb_min, &xpc_hb_max
+	},
+	{
+		2,
+		"hb_check_interval",
+		&xpc_hb_check_interval,
+		sizeof(int),
+		0644,
+		NULL,
+		&proc_dointvec_minmax,
+		&sysctl_intvec,
+		NULL,
+		&xpc_hb_check_min, &xpc_hb_check_max
+	},
+	{0}
+};
+static ctl_table xpc_sys_xpc_dir[] = {
+	{
+		1,
+		"hb",
+		NULL,
+		0,
+		0555,
+		xpc_sys_xpc_hb_dir
+	},
+	{0}
+};
+static ctl_table xpc_sys_dir[] = {
+	{
+		1,
+		"xpc",
+		NULL,
+		0,
+		0555,
+		xpc_sys_xpc_dir
+	},
+	{0}
+};
+static struct ctl_table_header *xpc_sysctl;
+
+
+/* #of IRQs received */
+static atomic_t xpc_act_IRQ_rcvd;
+
+/* IRQ handler notifies this wait queue on receipt of an IRQ */
+static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
+
+static unsigned long xpc_hb_check_timeout;
+
+/* xpc_hb_checker thread exited notification */
+static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited);
+
+/* xpc_discovery thread exited notification */
+static DECLARE_MUTEX_LOCKED(xpc_discovery_exited);
+
+
+static struct timer_list xpc_hb_timer;
+
+
+static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
+
+
+/*
+ * Notify the heartbeat check thread that an IRQ has been received.
+ */
+static irqreturn_t
+xpc_act_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	atomic_inc(&xpc_act_IRQ_rcvd);
+	wake_up_interruptible(&xpc_act_IRQ_wq);
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * Timer to produce the heartbeat.  The timer structures function is
+ * already set when this is initially called.  A tunable is used to
+ * specify when the next timeout should occur.
+ */
+static void
+xpc_hb_beater(unsigned long dummy)
+{
+	xpc_vars->heartbeat++;
+
+	if (jiffies >= xpc_hb_check_timeout) {
+		wake_up_interruptible(&xpc_act_IRQ_wq);
+	}
+
+	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
+	add_timer(&xpc_hb_timer);
+}
+
+
+/*
+ * This thread is responsible for nearly all of the partition
+ * activation/deactivation.
+ */
+static int
+xpc_hb_checker(void *ignore)
+{
+	int last_IRQ_count = 0;
+	int new_IRQ_count;
+	int force_IRQ=0;
+
+
+	/* this thread was marked active by xpc_hb_init() */
+
+	daemonize(XPC_HB_CHECK_THREAD_NAME);
+
+	set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
+
+	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
+
+	while (!(volatile int) xpc_exiting) {
+
+		/* wait for IRQ or timeout */
+		(void) wait_event_interruptible(xpc_act_IRQ_wq,
+			    (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
+					jiffies >= xpc_hb_check_timeout ||
+						(volatile int) xpc_exiting));
+
+		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
+			"been received\n",
+			(int) (xpc_hb_check_timeout - jiffies),
+			atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
+
+
+		/* checking of remote heartbeats is skewed by IRQ handling */
+		if (jiffies >= xpc_hb_check_timeout) {
+			dev_dbg(xpc_part, "checking remote heartbeats\n");
+			xpc_check_remote_hb();
+
+			/*
+			 * We need to periodically recheck to ensure no
+			 * IPI/AMO pairs have been missed.  That check
+			 * must always reset xpc_hb_check_timeout.
+			 */
+			force_IRQ = 1;
+		}
+
+
+		new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
+		if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
+			force_IRQ = 0;
+
+			dev_dbg(xpc_part, "found an IRQ to process; will be "
+				"resetting xpc_hb_check_timeout\n");
+
+			last_IRQ_count += xpc_identify_act_IRQ_sender();
+			if (last_IRQ_count < new_IRQ_count) {
+				/* retry once to help avoid missing AMO */
+				(void) xpc_identify_act_IRQ_sender();
+			}
+			last_IRQ_count = new_IRQ_count;
+
+			xpc_hb_check_timeout = jiffies +
+					   (xpc_hb_check_interval * HZ);
+		}
+	}
+
+	dev_dbg(xpc_part, "heartbeat checker is exiting\n");
+
+
+	/* mark this thread as inactive */
+	up(&xpc_hb_checker_exited);
+	return 0;
+}
+
+
+/*
+ * This thread will attempt to discover other partitions to activate
+ * based on info provided by SAL. This new thread is short lived and
+ * will exit once discovery is complete.
+ */
+static int
+xpc_initiate_discovery(void *ignore)
+{
+	daemonize(XPC_DISCOVERY_THREAD_NAME);
+
+	xpc_discovery();
+
+	dev_dbg(xpc_part, "discovery thread is exiting\n");
+
+	/* mark this thread as inactive */
+	up(&xpc_discovery_exited);
+	return 0;
+}
+
+
+/*
+ * Establish first contact with the remote partititon. This involves pulling
+ * the XPC per partition variables from the remote partition and waiting for
+ * the remote partition to pull ours.
+ */
+static enum xpc_retval
+xpc_make_first_contact(struct xpc_partition *part)
+{
+	enum xpc_retval ret;
+
+
+	while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
+		if (ret != xpcRetry) {
+			XPC_DEACTIVATE_PARTITION(part, ret);
+			return ret;
+		}
+
+		dev_dbg(xpc_chan, "waiting to make first contact with "
+			"partition %d\n", XPC_PARTID(part));
+
+		/* wait a 1/4 of a second or so */
+		set_current_state(TASK_INTERRUPTIBLE);
+		(void) schedule_timeout(0.25 * HZ);
+
+		if (part->act_state == XPC_P_DEACTIVATING) {
+			return part->reason;
+		}
+	}
+
+	return xpc_mark_partition_active(part);
+}
+
+
+/*
+ * The first kthread assigned to a newly activated partition is the one
+ * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
+ * that kthread until the partition is brought down, at which time that kthread
+ * returns back to XPC HB. (The return of that kthread will signify to XPC HB
+ * that XPC has dismantled all communication infrastructure for the associated
+ * partition.) This kthread becomes the channel manager for that partition.
+ *
+ * Each active partition has a channel manager, who, besides connecting and
+ * disconnecting channels, will ensure that each of the partition's connected
+ * channels has the required number of assigned kthreads to get the work done.
+ */
+static void
+xpc_channel_mgr(struct xpc_partition *part)
+{
+	while (part->act_state != XPC_P_DEACTIVATING ||
+				atomic_read(&part->nchannels_active) > 0) {
+
+		xpc_process_channel_activity(part);
+
+
+		/*
+		 * Wait until we've been requested to activate kthreads or
+		 * all of the channel's message queues have been torn down or
+		 * a signal is pending.
+		 *
+		 * The channel_mgr_requests is set to 1 after being awakened,
+		 * This is done to prevent the channel mgr from making one pass
+		 * through the loop for each request, since he will
+		 * be servicing all the requests in one pass. The reason it's
+		 * set to 1 instead of 0 is so that other kthreads will know
+		 * that the channel mgr is running and won't bother trying to
+		 * wake him up.
+		 */
+		atomic_dec(&part->channel_mgr_requests);
+		(void) wait_event_interruptible(part->channel_mgr_wq,
+				(atomic_read(&part->channel_mgr_requests) > 0 ||
+				(volatile u64) part->local_IPI_amo != 0 ||
+				((volatile u8) part->act_state ==
+							XPC_P_DEACTIVATING &&
+				atomic_read(&part->nchannels_active) == 0)));
+		atomic_set(&part->channel_mgr_requests, 1);
+
+		// >>> Does it need to wakeup periodically as well? In case we
+		// >>> miscalculated the #of kthreads to wakeup or create?
+	}
+}
+
+
+/*
+ * When XPC HB determines that a partition has come up, it will create a new
+ * kthread and that kthread will call this function to attempt to set up the
+ * basic infrastructure used for Cross Partition Communication with the newly
+ * upped partition.
+ *
+ * The kthread that was created by XPC HB and which setup the XPC
+ * infrastructure will remain assigned to the partition until the partition
+ * goes down. At which time the kthread will teardown the XPC infrastructure
+ * and then exit.
+ *
+ * XPC HB will put the remote partition's XPC per partition specific variables
+ * physical address into xpc_partitions[partid].remote_vars_part_pa prior to
+ * calling xpc_partition_up().
+ */
+static void
+xpc_partition_up(struct xpc_partition *part)
+{
+	DBUG_ON(part->channels != NULL);
+
+	dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
+
+	if (xpc_setup_infrastructure(part) != xpcSuccess) {
+		return;
+	}
+
+	/*
+	 * The kthread that XPC HB called us with will become the
+	 * channel manager for this partition. It will not return
+	 * back to XPC HB until the partition's XPC infrastructure
+	 * has been dismantled.
+	 */
+
+	(void) xpc_part_ref(part);	/* this will always succeed */
+
+	if (xpc_make_first_contact(part) == xpcSuccess) {
+		xpc_channel_mgr(part);
+	}
+
+	xpc_part_deref(part);
+
+	xpc_teardown_infrastructure(part);
+}
+
+
+static int
+xpc_activating(void *__partid)
+{
+	partid_t partid = (u64) __partid;
+	struct xpc_partition *part = &xpc_partitions[partid];
+	unsigned long irq_flags;
+	struct sched_param param = { sched_priority: MAX_USER_RT_PRIO - 1 };
+	int ret;
+
+
+	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+
+	spin_lock_irqsave(&part->act_lock, irq_flags);
+
+	if (part->act_state == XPC_P_DEACTIVATING) {
+		part->act_state = XPC_P_INACTIVE;
+		spin_unlock_irqrestore(&part->act_lock, irq_flags);
+		part->remote_rp_pa = 0;
+		return 0;
+	}
+
+	/* indicate the thread is activating */
+	DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
+	part->act_state = XPC_P_ACTIVATING;
+
+	XPC_SET_REASON(part, 0, 0);
+	spin_unlock_irqrestore(&part->act_lock, irq_flags);
+
+	dev_dbg(xpc_part, "bringing partition %d up\n", partid);
+
+	daemonize("xpc%02d", partid);
+
+	/*
+	 * This thread needs to run at a realtime priority to prevent a
+	 * significant performance degradation.
+	 */
+	ret = sched_setscheduler(current, SCHED_FIFO, &param);
+	if (ret != 0) {
+		dev_warn(xpc_part, "unable to set pid %d to a realtime "
+			"priority, ret=%d\n", current->pid, ret);
+	}
+
+	/* allow this thread and its children to run on any CPU */
+	set_cpus_allowed(current, CPU_MASK_ALL);
+
+	/*
+	 * Register the remote partition's AMOs with SAL so it can handle
+	 * and cleanup errors within that address range should the remote
+	 * partition go down. We don't unregister this range because it is
+	 * difficult to tell when outstanding writes to the remote partition
+	 * are finished and thus when it is safe to unregister. This should
+	 * not result in wasted space in the SAL xp_addr_region table because
+	 * we should get the same page for remote_amos_page_pa after module
+	 * reloads and system reboots.
+	 */
+	if (sn_register_xp_addr_region(part->remote_amos_page_pa,
+							PAGE_SIZE, 1) < 0) {
+		dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
+			"xp_addr region\n", partid);
+
+		spin_lock_irqsave(&part->act_lock, irq_flags);
+		part->act_state = XPC_P_INACTIVE;
+		XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__);
+		spin_unlock_irqrestore(&part->act_lock, irq_flags);
+		part->remote_rp_pa = 0;
+		return 0;
+	}
+
+	XPC_ALLOW_HB(partid, xpc_vars);
+	xpc_IPI_send_activated(part);
+
+
+	/*
+	 * xpc_partition_up() holds this thread and marks this partition as
+	 * XPC_P_ACTIVE by calling xpc_hb_mark_active().
+	 */
+	(void) xpc_partition_up(part);
+
+	xpc_mark_partition_inactive(part);
+
+	if (part->reason == xpcReactivating) {
+		/* interrupting ourselves results in activating partition */
+		xpc_IPI_send_reactivate(part);
+	}
+
+	return 0;
+}
+
+
+void
+xpc_activate_partition(struct xpc_partition *part)
+{
+	partid_t partid = XPC_PARTID(part);
+	unsigned long irq_flags;
+	pid_t pid;
+
+
+	spin_lock_irqsave(&part->act_lock, irq_flags);
+
+	pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
+
+	DBUG_ON(part->act_state != XPC_P_INACTIVE);
+
+	if (pid > 0) {
+		part->act_state = XPC_P_ACTIVATION_REQ;
+		XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
+	} else {
+		XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
+	}
+
+	spin_unlock_irqrestore(&part->act_lock, irq_flags);
+}
+
+
+/*
+ * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
+ * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
+ * than one partition, we use an AMO_t structure per partition to indicate
+ * whether a partition has sent an IPI or not.  >>> If it has, then wake up the
+ * associated kthread to handle it.
+ *
+ * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
+ * running on other partitions.
+ *
+ * Noteworthy Arguments:
+ *
+ *	irq - Interrupt ReQuest number. NOT USED.
+ *
+ *	dev_id - partid of IPI's potential sender.
+ *
+ *	regs - processor's context before the processor entered
+ *	       interrupt code. NOT USED.
+ */
+irqreturn_t
+xpc_notify_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	partid_t partid = (partid_t) (u64) dev_id;
+	struct xpc_partition *part = &xpc_partitions[partid];
+
+
+	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+
+	if (xpc_part_ref(part)) {
+		xpc_check_for_channel_activity(part);
+
+		xpc_part_deref(part);
+	}
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
+ * because the write to their associated IPI amo completed after the IRQ/IPI
+ * was received.
+ */
+void
+xpc_dropped_IPI_check(struct xpc_partition *part)
+{
+	if (xpc_part_ref(part)) {
+		xpc_check_for_channel_activity(part);
+
+		part->dropped_IPI_timer.expires = jiffies +
+							XPC_P_DROPPED_IPI_WAIT;
+		add_timer(&part->dropped_IPI_timer);
+		xpc_part_deref(part);
+	}
+}
+
+
+void
+xpc_activate_kthreads(struct xpc_channel *ch, int needed)
+{
+	int idle = atomic_read(&ch->kthreads_idle);
+	int assigned = atomic_read(&ch->kthreads_assigned);
+	int wakeup;
+
+
+	DBUG_ON(needed <= 0);
+
+	if (idle > 0) {
+		wakeup = (needed > idle) ? idle : needed;
+		needed -= wakeup;
+
+		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
+			"channel=%d\n", wakeup, ch->partid, ch->number);
+
+		/* only wakeup the requested number of kthreads */
+		wake_up_nr(&ch->idle_wq, wakeup);
+	}
+
+	if (needed <= 0) {
+		return;
+	}
+
+	if (needed + assigned > ch->kthreads_assigned_limit) {
+		needed = ch->kthreads_assigned_limit - assigned;
+		// >>>should never be less than 0
+		if (needed <= 0) {
+			return;
+		}
+	}
+
+	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
+		needed, ch->partid, ch->number);
+
+	xpc_create_kthreads(ch, needed);
+}
+
+
+/*
+ * This function is where XPC's kthreads wait for messages to deliver.
+ */
+static void
+xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
+{
+	do {
+		/* deliver messages to their intended recipients */
+
+		while ((volatile s64) ch->w_local_GP.get <
+				(volatile s64) ch->w_remote_GP.put &&
+					!((volatile u32) ch->flags &
+						XPC_C_DISCONNECTING)) {
+			xpc_deliver_msg(ch);
+		}
+
+		if (atomic_inc_return(&ch->kthreads_idle) >
+						ch->kthreads_idle_limit) {
+			/* too many idle kthreads on this channel */
+			atomic_dec(&ch->kthreads_idle);
+			break;
+		}
+
+		dev_dbg(xpc_chan, "idle kthread calling "
+			"wait_event_interruptible_exclusive()\n");
+
+		(void) wait_event_interruptible_exclusive(ch->idle_wq,
+				((volatile s64) ch->w_local_GP.get <
+					(volatile s64) ch->w_remote_GP.put ||
+				((volatile u32) ch->flags &
+						XPC_C_DISCONNECTING)));
+
+		atomic_dec(&ch->kthreads_idle);
+
+	} while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));
+}
+
+
+static int
+xpc_daemonize_kthread(void *args)
+{
+	partid_t partid = XPC_UNPACK_ARG1(args);
+	u16 ch_number = XPC_UNPACK_ARG2(args);
+	struct xpc_partition *part = &xpc_partitions[partid];
+	struct xpc_channel *ch;
+	int n_needed;
+
+
+	daemonize("xpc%02dc%d", partid, ch_number);
+
+	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
+		partid, ch_number);
+
+	ch = &part->channels[ch_number];
+
+	if (!(ch->flags & XPC_C_DISCONNECTING)) {
+		DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
+
+		/* let registerer know that connection has been established */
+
+		if (atomic_read(&ch->kthreads_assigned) == 1) {
+			xpc_connected_callout(ch);
+
+			/*
+			 * It is possible that while the callout was being
+			 * made that the remote partition sent some messages.
+			 * If that is the case, we may need to activate
+			 * additional kthreads to help deliver them. We only
+			 * need one less than total #of messages to deliver.
+			 */
+			n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
+			if (n_needed > 0 &&
+					!(ch->flags & XPC_C_DISCONNECTING)) {
+				xpc_activate_kthreads(ch, n_needed);
+			}
+		}
+
+		xpc_kthread_waitmsgs(part, ch);
+	}
+
+	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
+			((ch->flags & XPC_C_CONNECTCALLOUT) ||
+				(ch->reason != xpcUnregistering &&
+					ch->reason != xpcOtherUnregistering))) {
+		xpc_disconnected_callout(ch);
+	}
+
+
+	xpc_msgqueue_deref(ch);
+
+	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
+		partid, ch_number);
+
+	xpc_part_deref(part);
+	return 0;
+}
+
+
+/*
+ * For each partition that XPC has established communications with, there is
+ * a minimum of one kernel thread assigned to perform any operation that
+ * may potentially sleep or block (basically the callouts to the asynchronous
+ * functions registered via xpc_connect()).
+ *
+ * Additional kthreads are created and destroyed by XPC as the workload
+ * demands.
+ *
+ * A kthread is assigned to one of the active channels that exists for a given
+ * partition.
+ */
+void
+xpc_create_kthreads(struct xpc_channel *ch, int needed)
+{
+	unsigned long irq_flags;
+	pid_t pid;
+	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
+
+
+	while (needed-- > 0) {
+		pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
+		if (pid < 0) {
+			/* the fork failed */
+
+			if (atomic_read(&ch->kthreads_assigned) <
+						ch->kthreads_idle_limit) {
+				/*
+				 * Flag this as an error only if we have an
+				 * insufficient #of kthreads for the channel
+				 * to function.
+				 *
+				 * No xpc_msgqueue_ref() is needed here since
+				 * the channel mgr is doing this.
+				 */
+				spin_lock_irqsave(&ch->lock, irq_flags);
+				XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
+								&irq_flags);
+				spin_unlock_irqrestore(&ch->lock, irq_flags);
+			}
+			break;
+		}
+
+		/*
+		 * The following is done on behalf of the newly created
+		 * kthread. That kthread is responsible for doing the
+		 * counterpart to the following before it exits.
+		 */
+		(void) xpc_part_ref(&xpc_partitions[ch->partid]);
+		xpc_msgqueue_ref(ch);
+		atomic_inc(&ch->kthreads_assigned);
+		ch->kthreads_created++;	// >>> temporary debug only!!!
+	}
+}
+
+
+void
+xpc_disconnect_wait(int ch_number)
+{
+	partid_t partid;
+	struct xpc_partition *part;
+	struct xpc_channel *ch;
+
+
+	/* now wait for all callouts to the caller's function to cease */
+	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+		part = &xpc_partitions[partid];
+
+		if (xpc_part_ref(part)) {
+			ch = &part->channels[ch_number];
+
+// >>> how do we keep from falling into the window between our check and going
+// >>> down and coming back up where sema is re-inited?
+			if (ch->flags & XPC_C_SETUP) {
+				(void) down(&ch->teardown_sema);
+			}
+
+			xpc_part_deref(part);
+		}
+	}
+}
+
+
+static void
+xpc_do_exit(void)
+{
+	partid_t partid;
+	int active_part_count;
+	struct xpc_partition *part;
+
+
+	/* now it's time to eliminate our heartbeat */
+	del_timer_sync(&xpc_hb_timer);
+	xpc_vars->heartbeating_to_mask = 0;
+
+	/* indicate to others that our reserved page is uninitialized */
+	xpc_rsvd_page->vars_pa = 0;
+
+	/*
+	 * Ignore all incoming interrupts. Without interupts the heartbeat
+	 * checker won't activate any new partitions that may come up.
+	 */
+	free_irq(SGI_XPC_ACTIVATE, NULL);
+
+	/*
+	 * Cause the heartbeat checker and the discovery threads to exit.
+	 * We don't want them attempting to activate new partitions as we
+	 * try to deactivate the existing ones.
+	 */
+	xpc_exiting = 1;
+	wake_up_interruptible(&xpc_act_IRQ_wq);
+
+	/* wait for the heartbeat checker thread to mark itself inactive */
+	down(&xpc_hb_checker_exited);
+
+	/* wait for the discovery thread to mark itself inactive */
+	down(&xpc_discovery_exited);
+
+
+	set_current_state(TASK_INTERRUPTIBLE);
+	schedule_timeout(0.3 * HZ);
+	set_current_state(TASK_RUNNING);
+
+
+	/* wait for all partitions to become inactive */
+
+	do {
+		active_part_count = 0;
+
+		for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+			part = &xpc_partitions[partid];
+			if (part->act_state != XPC_P_INACTIVE) {
+				active_part_count++;
+
+				XPC_DEACTIVATE_PARTITION(part, xpcUnloading);
+			}
+		}
+
+		if (active_part_count) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(0.3 * HZ);
+			set_current_state(TASK_RUNNING);
+		}
+
+	} while (active_part_count > 0);
+
+
+	/* close down protections for IPI operations */
+	xpc_restrict_IPI_ops();
+
+
+	/* clear the interface to XPC's functions */
+	xpc_clear_interface();
+
+	if (xpc_sysctl) {
+		unregister_sysctl_table(xpc_sysctl);
+	}
+}
+
+
+int __init
+xpc_init(void)
+{
+	int ret;
+	partid_t partid;
+	struct xpc_partition *part;
+	pid_t pid;
+
+
+	/*
+	 * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng
+	 * both a partition's reserved page and its XPC variables. Its size was
+	 * based on the size of a reserved page. So we need to ensure that the
+	 * XPC variables will fit as well.
+	 */
+	if (XPC_VARS_ALIGNED_SIZE > XPC_RSVD_PAGE_ALIGNED_SIZE) {
+		dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
+		return -EPERM;
+	}
+	DBUG_ON((u64) xpc_remote_copy_buffer !=
+				L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
+
+	snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
+	snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
+
+	xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1);
+
+	/*
+	 * The first few fields of each entry of xpc_partitions[] need to
+	 * be initialized now so that calls to xpc_connect() and
+	 * xpc_disconnect() can be made prior to the activation of any remote
+	 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
+	 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
+	 * PARTITION HAS BEEN ACTIVATED.
+	 */
+	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+		part = &xpc_partitions[partid];
+
+		DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
+
+		part->act_IRQ_rcvd = 0;
+		spin_lock_init(&part->act_lock);
+		part->act_state = XPC_P_INACTIVE;
+		XPC_SET_REASON(part, 0, 0);
+		part->setup_state = XPC_P_UNSET;
+		init_waitqueue_head(&part->teardown_wq);
+		atomic_set(&part->references, 0);
+	}
+
+	/*
+	 * Open up protections for IPI operations (and AMO operations on
+	 * Shub 1.1 systems).
+	 */
+	xpc_allow_IPI_ops();
+
+	/*
+	 * Interrupts being processed will increment this atomic variable and
+	 * awaken the heartbeat thread which will process the interrupts.
+	 */
+	atomic_set(&xpc_act_IRQ_rcvd, 0);
+
+	/*
+	 * This is safe to do before the xpc_hb_checker thread has started
+	 * because the handler releases a wait queue.  If an interrupt is
+	 * received before the thread is waiting, it will not go to sleep,
+	 * but rather immediately process the interrupt.
+	 */
+	ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
+							"xpc hb", NULL);
+	if (ret != 0) {
+		dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
+			"errno=%d\n", -ret);
+
+		xpc_restrict_IPI_ops();
+
+		if (xpc_sysctl) {
+			unregister_sysctl_table(xpc_sysctl);
+		}
+		return -EBUSY;
+	}
+
+	/*
+	 * Fill the partition reserved page with the information needed by
+	 * other partitions to discover we are alive and establish initial
+	 * communications.
+	 */
+	xpc_rsvd_page = xpc_rsvd_page_init();
+	if (xpc_rsvd_page == NULL) {
+		dev_err(xpc_part, "could not setup our reserved page\n");
+
+		free_irq(SGI_XPC_ACTIVATE, NULL);
+		xpc_restrict_IPI_ops();
+
+		if (xpc_sysctl) {
+			unregister_sysctl_table(xpc_sysctl);
+		}
+		return -EBUSY;
+	}
+
+
+	/*
+	 * Set the beating to other partitions into motion.  This is
+	 * the last requirement for other partitions' discovery to
+	 * initiate communications with us.
+	 */
+	init_timer(&xpc_hb_timer);
+	xpc_hb_timer.function = xpc_hb_beater;
+	xpc_hb_beater(0);
+
+
+	/*
+	 * The real work-horse behind xpc.  This processes incoming
+	 * interrupts and monitors remote heartbeats.
+	 */
+	pid = kernel_thread(xpc_hb_checker, NULL, 0);
+	if (pid < 0) {
+		dev_err(xpc_part, "failed while forking hb check thread\n");
+
+		/* indicate to others that our reserved page is uninitialized */
+		xpc_rsvd_page->vars_pa = 0;
+
+		del_timer_sync(&xpc_hb_timer);
+		free_irq(SGI_XPC_ACTIVATE, NULL);
+		xpc_restrict_IPI_ops();
+
+		if (xpc_sysctl) {
+			unregister_sysctl_table(xpc_sysctl);
+		}
+		return -EBUSY;
+	}
+
+
+	/*
+	 * Startup a thread that will attempt to discover other partitions to
+	 * activate based on info provided by SAL. This new thread is short
+	 * lived and will exit once discovery is complete.
+	 */
+	pid = kernel_thread(xpc_initiate_discovery, NULL, 0);
+	if (pid < 0) {
+		dev_err(xpc_part, "failed while forking discovery thread\n");
+
+		/* mark this new thread as a non-starter */
+		up(&xpc_discovery_exited);
+
+		xpc_do_exit();
+		return -EBUSY;
+	}
+
+
+	/* set the interface to point at XPC's functions */
+	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
+			  xpc_initiate_allocate, xpc_initiate_send,
+			  xpc_initiate_send_notify, xpc_initiate_received,
+			  xpc_initiate_partid_to_nasids);
+
+	return 0;
+}
+module_init(xpc_init);
+
+
+void __exit
+xpc_exit(void)
+{
+	xpc_do_exit();
+}
+module_exit(xpc_exit);
+
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
+MODULE_LICENSE("GPL");
+
+module_param(xpc_hb_interval, int, 0);
+MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
+		"heartbeat increments.");
+
+module_param(xpc_hb_check_interval, int, 0);
+MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
+		"heartbeat checks.");
+

+ 984 - 0
arch/ia64/sn/kernel/xpc_partition.c

@@ -0,0 +1,984 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+
+/*
+ * Cross Partition Communication (XPC) partition support.
+ *
+ *	This is the part of XPC that detects the presence/absence of
+ *	other partitions. It provides a heartbeat and monitors the
+ *	heartbeats of other partitions.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/sysctl.h>
+#include <linux/cache.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/addrs.h>
+#include "xpc.h"
+
+
+/* XPC is exiting flag */
+int xpc_exiting;
+
+
+/* SH_IPI_ACCESS shub register value on startup */
+static u64 xpc_sh1_IPI_access;
+static u64 xpc_sh2_IPI_access0;
+static u64 xpc_sh2_IPI_access1;
+static u64 xpc_sh2_IPI_access2;
+static u64 xpc_sh2_IPI_access3;
+
+
+/* original protection values for each node */
+u64 xpc_prot_vec[MAX_COMPACT_NODES];
+
+
+/* this partition's reserved page */
+struct xpc_rsvd_page *xpc_rsvd_page;
+
+/* this partition's XPC variables (within the reserved page) */
+struct xpc_vars *xpc_vars;
+struct xpc_vars_part *xpc_vars_part;
+
+
+/*
+ * For performance reasons, each entry of xpc_partitions[] is cacheline
+ * aligned. And xpc_partitions[] is padded with an additional entry at the
+ * end so that the last legitimate entry doesn't share its cacheline with
+ * another variable.
+ */
+struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
+
+
+/*
+ * Generic buffer used to store a local copy of the remote partitions
+ * reserved page or XPC variables.
+ *
+ * xpc_discovery runs only once and is a seperate thread that is
+ * very likely going to be processing in parallel with receiving
+ * interrupts.
+ */
+char ____cacheline_aligned
+		xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE];
+
+
+/* systune related variables */
+int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
+int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_TIMEOUT;
+
+
+/*
+ * Given a nasid, get the physical address of the  partition's reserved page
+ * for that nasid. This function returns 0 on any error.
+ */
+static u64
+xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size)
+{
+	bte_result_t bte_res;
+	s64 status;
+	u64 cookie = 0;
+	u64 rp_pa = nasid;	/* seed with nasid */
+	u64 len = 0;
+
+
+	while (1) {
+
+		status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
+								&len);
+
+		dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
+			"0x%016lx, address=0x%016lx, len=0x%016lx\n",
+			status, cookie, rp_pa, len);
+
+		if (status != SALRET_MORE_PASSES) {
+			break;
+		}
+
+		if (len > buf_size) {
+			dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len);
+			status = SALRET_ERROR;
+			break;
+		}
+
+		bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size,
+					(BTE_NOTIFY | BTE_WACQUIRE), NULL);
+		if (bte_res != BTE_SUCCESS) {
+			dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
+			status = SALRET_ERROR;
+			break;
+		}
+	}
+
+	if (status != SALRET_OK) {
+		rp_pa = 0;
+	}
+	dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
+	return rp_pa;
+}
+
+
+/*
+ * Fill the partition reserved page with the information needed by
+ * other partitions to discover we are alive and establish initial
+ * communications.
+ */
+struct xpc_rsvd_page *
+xpc_rsvd_page_init(void)
+{
+	struct xpc_rsvd_page *rp;
+	AMO_t *amos_page;
+	u64 rp_pa, next_cl, nasid_array = 0;
+	int i, ret;
+
+
+	/* get the local reserved page's address */
+
+	rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0),
+					(u64) xpc_remote_copy_buffer,
+						XPC_RSVD_PAGE_ALIGNED_SIZE);
+	if (rp_pa == 0) {
+		dev_err(xpc_part, "SAL failed to locate the reserved page\n");
+		return NULL;
+	}
+	rp = (struct xpc_rsvd_page *) __va(rp_pa);
+
+	if (rp->partid != sn_partition_id) {
+		dev_err(xpc_part, "the reserved page's partid of %d should be "
+			"%d\n", rp->partid, sn_partition_id);
+		return NULL;
+	}
+
+	rp->version = XPC_RP_VERSION;
+
+	/*
+	 * Place the XPC variables on the cache line following the
+	 * reserved page structure.
+	 */
+	next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE;
+	xpc_vars = (struct xpc_vars *) next_cl;
+
+	/*
+	 * Before clearing xpc_vars, see if a page of AMOs had been previously
+	 * allocated. If not we'll need to allocate one and set permissions
+	 * so that cross-partition AMOs are allowed.
+	 *
+	 * The allocated AMO page needs MCA reporting to remain disabled after
+	 * XPC has unloaded.  To make this work, we keep a copy of the pointer
+	 * to this page (i.e., amos_page) in the struct xpc_vars structure,
+	 * which is pointed to by the reserved page, and re-use that saved copy
+	 * on subsequent loads of XPC. This AMO page is never freed, and its
+	 * memory protections are never restricted.
+	 */
+	if ((amos_page = xpc_vars->amos_page) == NULL) {
+		amos_page = (AMO_t *) mspec_kalloc_page(0);
+		if (amos_page == NULL) {
+			dev_err(xpc_part, "can't allocate page of AMOs\n");
+			return NULL;
+		}
+
+		/*
+		 * Open up AMO-R/W to cpu.  This is done for Shub 1.1 systems
+		 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
+		 */
+		if (!enable_shub_wars_1_1()) {
+			ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
+					PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
+					&nasid_array);
+			if (ret != 0) {
+				dev_err(xpc_part, "can't change memory "
+					"protections\n");
+				mspec_kfree_page((unsigned long) amos_page);
+				return NULL;
+			}
+		}
+	} else if (!IS_AMO_ADDRESS((u64) amos_page)) {
+		/*
+		 * EFI's XPBOOT can also set amos_page in the reserved page,
+		 * but it happens to leave it as an uncached physical address
+		 * and we need it to be an uncached virtual, so we'll have to
+		 * convert it.
+		 */
+		if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
+			dev_err(xpc_part, "previously used amos_page address "
+				"is bad = 0x%p\n", (void *) amos_page);
+			return NULL;
+		}
+		amos_page = (AMO_t *) TO_AMO((u64) amos_page);
+	}
+
+	memset(xpc_vars, 0, sizeof(struct xpc_vars));
+
+	/*
+	 * Place the XPC per partition specific variables on the cache line
+	 * following the XPC variables structure.
+	 */
+	next_cl += XPC_VARS_ALIGNED_SIZE;
+	memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) *
+							XP_MAX_PARTITIONS);
+	xpc_vars_part = (struct xpc_vars_part *) next_cl;
+	xpc_vars->vars_part_pa = __pa(next_cl);
+
+	xpc_vars->version = XPC_V_VERSION;
+	xpc_vars->act_nasid = cpuid_to_nasid(0);
+	xpc_vars->act_phys_cpuid = cpu_physical_id(0);
+	xpc_vars->amos_page = amos_page;  /* save for next load of XPC */
+
+
+	/*
+	 * Initialize the activation related AMO variables.
+	 */
+	xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS);
+	for (i = 1; i < XP_NASID_MASK_WORDS; i++) {
+		xpc_IPI_init(i + XP_MAX_PARTITIONS);
+	}
+	/* export AMO page's physical address to other partitions */
+	xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page);
+
+	/*
+	 * This signifies to the remote partition that our reserved
+	 * page is initialized.
+	 */
+	(volatile u64) rp->vars_pa = __pa(xpc_vars);
+
+	return rp;
+}
+
+
+/*
+ * Change protections to allow IPI operations (and AMO operations on
+ * Shub 1.1 systems).
+ */
+void
+xpc_allow_IPI_ops(void)
+{
+	int node;
+	int nasid;
+
+
+	// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
+
+	if (is_shub2()) {
+		xpc_sh2_IPI_access0 =
+			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
+		xpc_sh2_IPI_access1 =
+			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
+		xpc_sh2_IPI_access2 =
+			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
+		xpc_sh2_IPI_access3 =
+			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
+
+		for_each_online_node(node) {
+			nasid = cnodeid_to_nasid(node);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+								-1UL);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+								-1UL);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+								-1UL);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+								-1UL);
+		}
+
+	} else {
+		xpc_sh1_IPI_access =
+			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
+
+		for_each_online_node(node) {
+			nasid = cnodeid_to_nasid(node);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+								-1UL);
+
+			/*
+			 * Since the BIST collides with memory operations on
+			 * SHUB 1.1 sn_change_memprotect() cannot be used.
+			 */
+			if (enable_shub_wars_1_1()) {
+				/* open up everything */
+				xpc_prot_vec[node] = (u64) HUB_L((u64 *)
+						GLOBAL_MMR_ADDR(nasid,
+						SH1_MD_DQLP_MMR_DIR_PRIVEC0));
+				HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
+						SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+								-1UL);
+				HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
+						SH1_MD_DQRP_MMR_DIR_PRIVEC0),
+								-1UL);
+			}
+		}
+	}
+}
+
+
+/*
+ * Restrict protections to disallow IPI operations (and AMO operations on
+ * Shub 1.1 systems).
+ */
+void
+xpc_restrict_IPI_ops(void)
+{
+	int node;
+	int nasid;
+
+
+	// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
+
+	if (is_shub2()) {
+
+		for_each_online_node(node) {
+			nasid = cnodeid_to_nasid(node);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+							xpc_sh2_IPI_access0);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+							xpc_sh2_IPI_access1);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+							xpc_sh2_IPI_access2);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+							xpc_sh2_IPI_access3);
+		}
+
+	} else {
+
+		for_each_online_node(node) {
+			nasid = cnodeid_to_nasid(node);
+			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+							xpc_sh1_IPI_access);
+
+			if (enable_shub_wars_1_1()) {
+				HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
+						SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+							xpc_prot_vec[node]);
+				HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
+						SH1_MD_DQRP_MMR_DIR_PRIVEC0),
+							xpc_prot_vec[node]);
+			}
+		}
+	}
+}
+
+
+/*
+ * At periodic intervals, scan through all active partitions and ensure
+ * their heartbeat is still active.  If not, the partition is deactivated.
+ */
+void
+xpc_check_remote_hb(void)
+{
+	struct xpc_vars *remote_vars;
+	struct xpc_partition *part;
+	partid_t partid;
+	bte_result_t bres;
+
+
+	remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
+
+	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+		if (partid == sn_partition_id) {
+			continue;
+		}
+
+		part = &xpc_partitions[partid];
+
+		if (part->act_state == XPC_P_INACTIVE ||
+				part->act_state == XPC_P_DEACTIVATING) {
+			continue;
+		}
+
+		/* pull the remote_hb cache line */
+		bres = xp_bte_copy(part->remote_vars_pa,
+					ia64_tpa((u64) remote_vars),
+					XPC_VARS_ALIGNED_SIZE,
+					(BTE_NOTIFY | BTE_WACQUIRE), NULL);
+		if (bres != BTE_SUCCESS) {
+			XPC_DEACTIVATE_PARTITION(part,
+						xpc_map_bte_errors(bres));
+			continue;
+		}
+
+		dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
+			" = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid,
+			remote_vars->heartbeat, part->last_heartbeat,
+			remote_vars->kdb_status,
+			remote_vars->heartbeating_to_mask);
+
+		if (((remote_vars->heartbeat == part->last_heartbeat) &&
+			(remote_vars->kdb_status == 0)) ||
+			     !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) {
+
+			XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
+			continue;
+		}
+
+		part->last_heartbeat = remote_vars->heartbeat;
+	}
+}
+
+
+/*
+ * Get a copy of the remote partition's rsvd page.
+ *
+ * remote_rp points to a buffer that is cacheline aligned for BTE copies and
+ * assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE.
+ */
+static enum xpc_retval
+xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
+		struct xpc_rsvd_page *remote_rp, u64 *remote_rsvd_page_pa)
+{
+	int bres, i;
+
+
+	/* get the reserved page's physical address */
+
+	*remote_rsvd_page_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp,
+						XPC_RSVD_PAGE_ALIGNED_SIZE);
+	if (*remote_rsvd_page_pa == 0) {
+		return xpcNoRsvdPageAddr;
+	}
+
+
+	/* pull over the reserved page structure */
+
+	bres = xp_bte_copy(*remote_rsvd_page_pa, ia64_tpa((u64) remote_rp),
+				XPC_RSVD_PAGE_ALIGNED_SIZE,
+				(BTE_NOTIFY | BTE_WACQUIRE), NULL);
+	if (bres != BTE_SUCCESS) {
+		return xpc_map_bte_errors(bres);
+	}
+
+
+	if (discovered_nasids != NULL) {
+		for (i = 0; i < XP_NASID_MASK_WORDS; i++) {
+			discovered_nasids[i] |= remote_rp->part_nasids[i];
+		}
+	}
+
+
+	/* check that the partid is for another partition */
+
+	if (remote_rp->partid < 1 ||
+				remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
+		return xpcInvalidPartid;
+	}
+
+	if (remote_rp->partid == sn_partition_id) {
+		return xpcLocalPartid;
+	}
+
+
+	if (XPC_VERSION_MAJOR(remote_rp->version) !=
+					XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
+		return xpcBadVersion;
+	}
+
+	return xpcSuccess;
+}
+
+
+/*
+ * Get a copy of the remote partition's XPC variables.
+ *
+ * remote_vars points to a buffer that is cacheline aligned for BTE copies and
+ * assumed to be of size XPC_VARS_ALIGNED_SIZE.
+ */
+static enum xpc_retval
+xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
+{
+	int bres;
+
+
+	if (remote_vars_pa == 0) {
+		return xpcVarsNotSet;
+	}
+
+
+	/* pull over the cross partition variables */
+
+	bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
+				XPC_VARS_ALIGNED_SIZE,
+				(BTE_NOTIFY | BTE_WACQUIRE), NULL);
+	if (bres != BTE_SUCCESS) {
+		return xpc_map_bte_errors(bres);
+	}
+
+	if (XPC_VERSION_MAJOR(remote_vars->version) !=
+					XPC_VERSION_MAJOR(XPC_V_VERSION)) {
+		return xpcBadVersion;
+	}
+
+	return xpcSuccess;
+}
+
+
+/*
+ * Prior code has determine the nasid which generated an IPI.  Inspect
+ * that nasid to determine if its partition needs to be activated or
+ * deactivated.
+ *
+ * A partition is consider "awaiting activation" if our partition
+ * flags indicate it is not active and it has a heartbeat.  A
+ * partition is considered "awaiting deactivation" if our partition
+ * flags indicate it is active but it has no heartbeat or it is not
+ * sending its heartbeat to us.
+ *
+ * To determine the heartbeat, the remote nasid must have a properly
+ * initialized reserved page.
+ */
+static void
+xpc_identify_act_IRQ_req(int nasid)
+{
+	struct xpc_rsvd_page *remote_rp;
+	struct xpc_vars *remote_vars;
+	u64 remote_rsvd_page_pa;
+	u64 remote_vars_pa;
+	partid_t partid;
+	struct xpc_partition *part;
+	enum xpc_retval ret;
+
+
+	/* pull over the reserved page structure */
+
+	remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
+
+	ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rsvd_page_pa);
+	if (ret != xpcSuccess) {
+		dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
+			"which sent interrupt, reason=%d\n", nasid, ret);
+		return;
+	}
+
+	remote_vars_pa = remote_rp->vars_pa;
+	partid = remote_rp->partid;
+	part = &xpc_partitions[partid];
+
+
+	/* pull over the cross partition variables */
+
+	remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
+
+	ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
+	if (ret != xpcSuccess) {
+
+		dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
+			"which sent interrupt, reason=%d\n", nasid, ret);
+
+		XPC_DEACTIVATE_PARTITION(part, ret);
+		return;
+	}
+
+
+	part->act_IRQ_rcvd++;
+
+	dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
+		"%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
+		remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
+
+
+	if (part->act_state == XPC_P_INACTIVE) {
+
+		part->remote_rp_pa = remote_rsvd_page_pa;
+		dev_dbg(xpc_part, "  remote_rp_pa = 0x%016lx\n",
+			part->remote_rp_pa);
+
+		part->remote_vars_pa = remote_vars_pa;
+		dev_dbg(xpc_part, "  remote_vars_pa = 0x%016lx\n",
+			part->remote_vars_pa);
+
+		part->last_heartbeat = remote_vars->heartbeat;
+		dev_dbg(xpc_part, "  last_heartbeat = 0x%016lx\n",
+			part->last_heartbeat);
+
+		part->remote_vars_part_pa = remote_vars->vars_part_pa;
+		dev_dbg(xpc_part, "  remote_vars_part_pa = 0x%016lx\n",
+			part->remote_vars_part_pa);
+
+		part->remote_act_nasid = remote_vars->act_nasid;
+		dev_dbg(xpc_part, "  remote_act_nasid = 0x%x\n",
+			part->remote_act_nasid);
+
+		part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
+		dev_dbg(xpc_part, "  remote_act_phys_cpuid = 0x%x\n",
+			part->remote_act_phys_cpuid);
+
+		part->remote_amos_page_pa = remote_vars->amos_page_pa;
+		dev_dbg(xpc_part, "  remote_amos_page_pa = 0x%lx\n",
+			part->remote_amos_page_pa);
+
+		xpc_activate_partition(part);
+
+	} else if (part->remote_amos_page_pa != remote_vars->amos_page_pa ||
+			!XPC_HB_ALLOWED(sn_partition_id, remote_vars)) {
+
+		part->reactivate_nasid = nasid;
+		XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
+	}
+}
+
+
+/*
+ * Loop through the activation AMO variables and process any bits
+ * which are set.  Each bit indicates a nasid sending a partition
+ * activation or deactivation request.
+ *
+ * Return #of IRQs detected.
+ */
+int
+xpc_identify_act_IRQ_sender(void)
+{
+	int word, bit;
+	u64 nasid_mask;
+	u64 nasid;			/* remote nasid */
+	int n_IRQs_detected = 0;
+	AMO_t *act_amos;
+	struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
+
+
+	act_amos = xpc_vars->act_amos;
+
+
+	/* scan through act AMO variable looking for non-zero entries */
+	for (word = 0; word < XP_NASID_MASK_WORDS; word++) {
+
+		nasid_mask = xpc_IPI_receive(&act_amos[word]);
+		if (nasid_mask == 0) {
+			/* no IRQs from nasids in this variable */
+			continue;
+		}
+
+		dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
+			nasid_mask);
+
+
+		/*
+		 * If this nasid has been added to the machine since
+		 * our partition was reset, this will retain the
+		 * remote nasid in our reserved pages machine mask.
+		 * This is used in the event of module reload.
+		 */
+		rp->mach_nasids[word] |= nasid_mask;
+
+
+		/* locate the nasid(s) which sent interrupts */
+
+		for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
+			if (nasid_mask & (1UL << bit)) {
+				n_IRQs_detected++;
+				nasid = XPC_NASID_FROM_W_B(word, bit);
+				dev_dbg(xpc_part, "interrupt from nasid %ld\n",
+					nasid);
+				xpc_identify_act_IRQ_req(nasid);
+			}
+		}
+	}
+	return n_IRQs_detected;
+}
+
+
+/*
+ * Mark specified partition as active.
+ */
+enum xpc_retval
+xpc_mark_partition_active(struct xpc_partition *part)
+{
+	unsigned long irq_flags;
+	enum xpc_retval ret;
+
+
+	dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
+
+	spin_lock_irqsave(&part->act_lock, irq_flags);
+	if (part->act_state == XPC_P_ACTIVATING) {
+		part->act_state = XPC_P_ACTIVE;
+		ret = xpcSuccess;
+	} else {
+		DBUG_ON(part->reason == xpcSuccess);
+		ret = part->reason;
+	}
+	spin_unlock_irqrestore(&part->act_lock, irq_flags);
+
+	return ret;
+}
+
+
+/*
+ * Notify XPC that the partition is down.
+ */
+void
+xpc_deactivate_partition(const int line, struct xpc_partition *part,
+				enum xpc_retval reason)
+{
+	unsigned long irq_flags;
+	partid_t partid = XPC_PARTID(part);
+
+
+	spin_lock_irqsave(&part->act_lock, irq_flags);
+
+	if (part->act_state == XPC_P_INACTIVE) {
+		XPC_SET_REASON(part, reason, line);
+		spin_unlock_irqrestore(&part->act_lock, irq_flags);
+		if (reason == xpcReactivating) {
+			/* we interrupt ourselves to reactivate partition */
+			xpc_IPI_send_reactivate(part);
+		}
+		return;
+	}
+	if (part->act_state == XPC_P_DEACTIVATING) {
+		if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
+					reason == xpcReactivating) {
+			XPC_SET_REASON(part, reason, line);
+		}
+		spin_unlock_irqrestore(&part->act_lock, irq_flags);
+		return;
+	}
+
+	part->act_state = XPC_P_DEACTIVATING;
+	XPC_SET_REASON(part, reason, line);
+
+	spin_unlock_irqrestore(&part->act_lock, irq_flags);
+
+	XPC_DISALLOW_HB(partid, xpc_vars);
+
+	dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", partid,
+		reason);
+
+	xpc_partition_down(part, reason);
+}
+
+
+/*
+ * Mark specified partition as active.
+ */
+void
+xpc_mark_partition_inactive(struct xpc_partition *part)
+{
+	unsigned long irq_flags;
+
+
+	dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
+		XPC_PARTID(part));
+
+	spin_lock_irqsave(&part->act_lock, irq_flags);
+	part->act_state = XPC_P_INACTIVE;
+	spin_unlock_irqrestore(&part->act_lock, irq_flags);
+	part->remote_rp_pa = 0;
+}
+
+
+/*
+ * SAL has provided a partition and machine mask.  The partition mask
+ * contains a bit for each even nasid in our partition.  The machine
+ * mask contains a bit for each even nasid in the entire machine.
+ *
+ * Using those two bit arrays, we can determine which nasids are
+ * known in the machine.  Each should also have a reserved page
+ * initialized if they are available for partitioning.
+ */
+void
+xpc_discovery(void)
+{
+	void *remote_rp_base;
+	struct xpc_rsvd_page *remote_rp;
+	struct xpc_vars *remote_vars;
+	u64 remote_rsvd_page_pa;
+	u64 remote_vars_pa;
+	int region;
+	int max_regions;
+	int nasid;
+	struct xpc_rsvd_page *rp;
+	partid_t partid;
+	struct xpc_partition *part;
+	u64 *discovered_nasids;
+	enum xpc_retval ret;
+
+
+	remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE,
+						GFP_KERNEL, &remote_rp_base);
+	if (remote_rp == NULL) {
+		return;
+	}
+	remote_vars = (struct xpc_vars *) remote_rp;
+
+
+	discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS,
+							GFP_KERNEL);
+	if (discovered_nasids == NULL) {
+		kfree(remote_rp_base);
+		return;
+	}
+	memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS);
+
+	rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
+
+	/*
+	 * The term 'region' in this context refers to the minimum number of
+	 * nodes that can comprise an access protection grouping. The access
+	 * protection is in regards to memory, IOI and IPI.
+	 */
+//>>> move the next two #defines into either include/asm-ia64/sn/arch.h or
+//>>> include/asm-ia64/sn/addrs.h
+#define SH1_MAX_REGIONS		64
+#define SH2_MAX_REGIONS		256
+	max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS;
+
+	for (region = 0; region < max_regions; region++) {
+
+		if ((volatile int) xpc_exiting) {
+			break;
+		}
+
+		dev_dbg(xpc_part, "searching region %d\n", region);
+
+		for (nasid = (region * sn_region_size * 2);
+		     nasid < ((region + 1) * sn_region_size * 2);
+		     nasid += 2) {
+
+			if ((volatile int) xpc_exiting) {
+				break;
+			}
+
+			dev_dbg(xpc_part, "checking nasid %d\n", nasid);
+
+
+			if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) {
+				dev_dbg(xpc_part, "PROM indicates Nasid %d is "
+					"part of the local partition; skipping "
+					"region\n", nasid);
+				break;
+			}
+
+			if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) {
+				dev_dbg(xpc_part, "PROM indicates Nasid %d was "
+					"not on Numa-Link network at reset\n",
+					nasid);
+				continue;
+			}
+
+			if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) {
+				dev_dbg(xpc_part, "Nasid %d is part of a "
+					"partition which was previously "
+					"discovered\n", nasid);
+				continue;
+			}
+
+
+			/* pull over the reserved page structure */
+
+			ret = xpc_get_remote_rp(nasid, discovered_nasids,
+					      remote_rp, &remote_rsvd_page_pa);
+			if (ret != xpcSuccess) {
+				dev_dbg(xpc_part, "unable to get reserved page "
+					"from nasid %d, reason=%d\n", nasid,
+					ret);
+
+				if (ret == xpcLocalPartid) {
+					break;
+				}
+				continue;
+			}
+
+			remote_vars_pa = remote_rp->vars_pa;
+
+			partid = remote_rp->partid;
+			part = &xpc_partitions[partid];
+
+
+			/* pull over the cross partition variables */
+
+			ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
+			if (ret != xpcSuccess) {
+				dev_dbg(xpc_part, "unable to get XPC variables "
+					"from nasid %d, reason=%d\n", nasid,
+					ret);
+
+				XPC_DEACTIVATE_PARTITION(part, ret);
+				continue;
+			}
+
+			if (part->act_state != XPC_P_INACTIVE) {
+				dev_dbg(xpc_part, "partition %d on nasid %d is "
+					"already activating\n", partid, nasid);
+				break;
+			}
+
+			/*
+			 * Register the remote partition's AMOs with SAL so it
+			 * can handle and cleanup errors within that address
+			 * range should the remote partition go down. We don't
+			 * unregister this range because it is difficult to
+			 * tell when outstanding writes to the remote partition
+			 * are finished and thus when it is thus safe to
+			 * unregister. This should not result in wasted space
+			 * in the SAL xp_addr_region table because we should
+			 * get the same page for remote_act_amos_pa after
+			 * module reloads and system reboots.
+			 */
+			if (sn_register_xp_addr_region(
+					    remote_vars->amos_page_pa,
+							PAGE_SIZE, 1) < 0) {
+				dev_dbg(xpc_part, "partition %d failed to "
+					"register xp_addr region 0x%016lx\n",
+					partid, remote_vars->amos_page_pa);
+
+				XPC_SET_REASON(part, xpcPhysAddrRegFailed,
+						__LINE__);
+				break;
+			}
+
+			/*
+			 * The remote nasid is valid and available.
+			 * Send an interrupt to that nasid to notify
+			 * it that we are ready to begin activation.
+			 */
+			dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
+				"nasid %d, phys_cpuid 0x%x\n",
+				remote_vars->amos_page_pa,
+				remote_vars->act_nasid,
+				remote_vars->act_phys_cpuid);
+
+			xpc_IPI_send_activate(remote_vars);
+		}
+	}
+
+	kfree(discovered_nasids);
+	kfree(remote_rp_base);
+}
+
+
+/*
+ * Given a partid, get the nasids owned by that partition from the
+ * remote partition's reserved page.
+ */
+enum xpc_retval
+xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
+{
+	struct xpc_partition *part;
+	u64 part_nasid_pa;
+	int bte_res;
+
+
+	part = &xpc_partitions[partid];
+	if (part->remote_rp_pa == 0) {
+		return xpcPartitionDown;
+	}
+
+	part_nasid_pa = part->remote_rp_pa +
+		(u64) &((struct xpc_rsvd_page *) 0)->part_nasids;
+
+	bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
+				L1_CACHE_ALIGN(XP_NASID_MASK_BYTES),
+				(BTE_NOTIFY | BTE_WACQUIRE), NULL);
+
+	return xpc_map_bte_errors(bte_res);
+}
+

+ 715 - 0
arch/ia64/sn/kernel/xpnet.c

@@ -0,0 +1,715 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+/*
+ * Cross Partition Network Interface (XPNET) support
+ *
+ *	XPNET provides a virtual network layered on top of the Cross
+ *	Partition communication layer.
+ *
+ *	XPNET provides direct point-to-point and broadcast-like support
+ *	for an ethernet-like device.  The ethernet broadcast medium is
+ *	replaced with a point-to-point message structure which passes
+ *	pointers to a DMA-capable block that a remote partition should
+ *	retrieve and pass to the upper level networking layer.
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/types.h>
+#include <asm/atomic.h>
+#include <asm/sn/xp.h>
+
+
+/*
+ * The message payload transferred by XPC.
+ *
+ * buf_pa is the physical address where the DMA should pull from.
+ *
+ * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a
+ * cacheline boundary.  To accomplish this, we record the number of
+ * bytes from the beginning of the first cacheline to the first useful
+ * byte of the skb (leadin_ignore) and the number of bytes from the
+ * last useful byte of the skb to the end of the last cacheline
+ * (tailout_ignore).
+ *
+ * size is the number of bytes to transfer which includes the skb->len
+ * (useful bytes of the senders skb) plus the leadin and tailout
+ */
+struct xpnet_message {
+	u16 version;		/* Version for this message */
+	u16 embedded_bytes;	/* #of bytes embedded in XPC message */
+	u32 magic;		/* Special number indicating this is xpnet */
+	u64 buf_pa;		/* phys address of buffer to retrieve */
+	u32 size;		/* #of bytes in buffer */
+	u8 leadin_ignore;	/* #of bytes to ignore at the beginning */
+	u8 tailout_ignore;	/* #of bytes to ignore at the end */
+	unsigned char data;	/* body of small packets */
+};
+
+/*
+ * Determine the size of our message, the cacheline aligned size,
+ * and then the number of message will request from XPC.
+ *
+ * XPC expects each message to exist in an individual cacheline.
+ */
+#define XPNET_MSG_SIZE		(L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET)
+#define XPNET_MSG_DATA_MAX	\
+		(XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data))
+#define XPNET_MSG_ALIGNED_SIZE	(L1_CACHE_ALIGN(XPNET_MSG_SIZE))
+#define XPNET_MSG_NENTRIES	(PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
+
+
+#define XPNET_MAX_KTHREADS	(XPNET_MSG_NENTRIES + 1)
+#define XPNET_MAX_IDLE_KTHREADS	(XPNET_MSG_NENTRIES + 1)
+
+/*
+ * Version number of XPNET implementation. XPNET can always talk to versions
+ * with same major #, and never talk to versions with a different version.
+ */
+#define _XPNET_VERSION(_major, _minor)	(((_major) << 4) | (_minor))
+#define XPNET_VERSION_MAJOR(_v)		((_v) >> 4)
+#define XPNET_VERSION_MINOR(_v)		((_v) & 0xf)
+
+#define	XPNET_VERSION _XPNET_VERSION(1,0)		/* version 1.0 */
+#define	XPNET_VERSION_EMBED _XPNET_VERSION(1,1)		/* version 1.1 */
+#define XPNET_MAGIC	0x88786984 /* "XNET" */
+
+#define XPNET_VALID_MSG(_m)						     \
+   ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
+    && (msg->magic == XPNET_MAGIC))
+
+#define XPNET_DEVICE_NAME		"xp0"
+
+
+/*
+ * When messages are queued with xpc_send_notify, a kmalloc'd buffer
+ * of the following type is passed as a notification cookie.  When the
+ * notification function is called, we use the cookie to decide
+ * whether all outstanding message sends have completed.  The skb can
+ * then be released.
+ */
+struct xpnet_pending_msg {
+	struct list_head free_list;
+	struct sk_buff *skb;
+	atomic_t use_count;
+};
+
+/* driver specific structure pointed to by the device structure */
+struct xpnet_dev_private {
+	struct net_device_stats stats;
+};
+
+struct net_device *xpnet_device;
+
+/*
+ * When we are notified of other partitions activating, we add them to
+ * our bitmask of partitions to which we broadcast.
+ */
+static u64 xpnet_broadcast_partitions;
+/* protect above */
+static spinlock_t xpnet_broadcast_lock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * Since the Block Transfer Engine (BTE) is being used for the transfer
+ * and it relies upon cache-line size transfers, we need to reserve at
+ * least one cache-line for head and tail alignment.  The BTE is
+ * limited to 8MB transfers.
+ *
+ * Testing has shown that changing MTU to greater than 64KB has no effect
+ * on TCP as the two sides negotiate a Max Segment Size that is limited
+ * to 64K.  Other protocols May use packets greater than this, but for
+ * now, the default is 64KB.
+ */
+#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
+/* 32KB has been determined to be the ideal */
+#define XPNET_DEF_MTU (0x8000UL)
+
+
+/*
+ * The partition id is encapsulated in the MAC address.  The following
+ * define locates the octet the partid is in.
+ */
+#define XPNET_PARTID_OCTET	1
+#define XPNET_LICENSE_OCTET	2
+
+
+/*
+ * Define the XPNET debug device structure that is to be used with dev_dbg(),
+ * dev_err(), dev_warn(), and dev_info().
+ */
+struct device_driver xpnet_dbg_name = {
+	.name = "xpnet"
+};
+
+struct device xpnet_dbg_subname = {
+	.bus_id = {0},			/* set to "" */
+	.driver = &xpnet_dbg_name
+};
+
+struct device *xpnet = &xpnet_dbg_subname;
+
+/*
+ * Packet was recevied by XPC and forwarded to us.
+ */
+static void
+xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
+{
+	struct sk_buff *skb;
+	bte_result_t bret;
+	struct xpnet_dev_private *priv =
+		(struct xpnet_dev_private *) xpnet_device->priv;
+
+
+	if (!XPNET_VALID_MSG(msg)) {
+		/*
+		 * Packet with a different XPC version.  Ignore.
+		 */
+		xpc_received(partid, channel, (void *) msg);
+
+		priv->stats.rx_errors++;
+
+		return;
+	}
+	dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
+		msg->leadin_ignore, msg->tailout_ignore);
+
+
+	/* reserve an extra cache line */
+	skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
+	if (!skb) {
+		dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
+			msg->size + L1_CACHE_BYTES);
+
+		xpc_received(partid, channel, (void *) msg);
+
+		priv->stats.rx_errors++;
+
+		return;
+	}
+
+	/*
+	 * The allocated skb has some reserved space.
+	 * In order to use bte_copy, we need to get the
+	 * skb->data pointer moved forward.
+	 */
+	skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
+					    (L1_CACHE_BYTES - 1)) +
+			  msg->leadin_ignore));
+
+	/*
+	 * Update the tail pointer to indicate data actually
+	 * transferred.
+	 */
+	skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
+
+	/*
+	 * Move the data over from the the other side.
+	 */
+	if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
+						(msg->embedded_bytes != 0)) {
+		dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
+			"%lu)\n", skb->data, &msg->data,
+			(size_t) msg->embedded_bytes);
+
+		memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes);
+	} else {
+		dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
+			"bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
+			(void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
+			msg->size);
+
+		bret = bte_copy(msg->buf_pa,
+				__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
+				msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+
+		if (bret != BTE_SUCCESS) {
+			// >>> Need better way of cleaning skb.  Currently skb
+			// >>> appears in_use and we can't just call
+			// >>> dev_kfree_skb.
+			dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
+				"error=0x%x\n", (void *)msg->buf_pa,
+				(void *)__pa((u64)skb->data &
+							~(L1_CACHE_BYTES - 1)),
+				msg->size, bret);
+
+			xpc_received(partid, channel, (void *) msg);
+
+			priv->stats.rx_errors++;
+
+			return;
+		}
+	}
+
+	dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
+		"skb->end=0x%p skb->len=%d\n", (void *) skb->head,
+		(void *) skb->data, (void *) skb->tail, (void *) skb->end,
+		skb->len);
+
+	skb->dev = xpnet_device;
+	skb->protocol = eth_type_trans(skb, xpnet_device);
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p "
+		"skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n",
+		(void *) skb->head, (void *) skb->data, (void *) skb->tail,
+		(void *) skb->end, skb->len);
+
+
+	xpnet_device->last_rx = jiffies;
+	priv->stats.rx_packets++;
+	priv->stats.rx_bytes += skb->len + ETH_HLEN;
+
+	netif_rx_ni(skb);
+	xpc_received(partid, channel, (void *) msg);
+}
+
+
+/*
+ * This is the handler which XPC calls during any sort of change in
+ * state or message reception on a connection.
+ */
+static void
+xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
+			  void *data, void *key)
+{
+	long bp;
+
+
+	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
+	DBUG_ON(channel != XPC_NET_CHANNEL);
+
+	switch(reason) {
+	case xpcMsgReceived:	/* message received */
+		DBUG_ON(data == NULL);
+
+		xpnet_receive(partid, channel, (struct xpnet_message *) data);
+		break;
+
+	case xpcConnected:	/* connection completed to a partition */
+		spin_lock_bh(&xpnet_broadcast_lock);
+		xpnet_broadcast_partitions |= 1UL << (partid -1 );
+		bp = xpnet_broadcast_partitions;
+		spin_unlock_bh(&xpnet_broadcast_lock);
+
+		netif_carrier_on(xpnet_device);
+
+		dev_dbg(xpnet, "%s connection created to partition %d; "
+			"xpnet_broadcast_partitions=0x%lx\n",
+			xpnet_device->name, partid, bp);
+		break;
+
+	default:
+		spin_lock_bh(&xpnet_broadcast_lock);
+		xpnet_broadcast_partitions &= ~(1UL << (partid -1 ));
+		bp = xpnet_broadcast_partitions;
+		spin_unlock_bh(&xpnet_broadcast_lock);
+
+		if (bp == 0) {
+			netif_carrier_off(xpnet_device);
+		}
+
+		dev_dbg(xpnet, "%s disconnected from partition %d; "
+			"xpnet_broadcast_partitions=0x%lx\n",
+			xpnet_device->name, partid, bp);
+		break;
+
+	}
+}
+
+
+static int
+xpnet_dev_open(struct net_device *dev)
+{
+	enum xpc_retval ret;
+
+
+	dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, "
+		"%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
+		XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
+		XPNET_MAX_IDLE_KTHREADS);
+
+	ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
+			  XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
+			  XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
+	if (ret != xpcSuccess) {
+		dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
+			"ret=%d\n", dev->name, ret);
+
+		return -ENOMEM;
+	}
+
+	dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name);
+
+	return 0;
+}
+
+
+static int
+xpnet_dev_stop(struct net_device *dev)
+{
+	xpc_disconnect(XPC_NET_CHANNEL);
+
+	dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name);
+
+	return 0;
+}
+
+
+static int
+xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+	/* 68 comes from min TCP+IP+MAC header */
+	if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) {
+		dev_err(xpnet, "ifconfig %s mtu %d failed; value must be "
+			"between 68 and %ld\n", dev->name, new_mtu,
+			XPNET_MAX_MTU);
+		return -EINVAL;
+	}
+
+	dev->mtu = new_mtu;
+	dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu);
+	return 0;
+}
+
+
+/*
+ * Required for the net_device structure.
+ */
+static int
+xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
+{
+	return 0;
+}
+
+
+/*
+ * Return statistics to the caller.
+ */
+static struct net_device_stats *
+xpnet_dev_get_stats(struct net_device *dev)
+{
+	struct xpnet_dev_private *priv;
+
+
+	priv = (struct xpnet_dev_private *) dev->priv;
+
+	return &priv->stats;
+}
+
+
+/*
+ * Notification that the other end has received the message and
+ * DMA'd the skb information.  At this point, they are done with
+ * our side.  When all recipients are done processing, we
+ * release the skb and then release our pending message structure.
+ */
+static void
+xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
+			void *__qm)
+{
+	struct xpnet_pending_msg *queued_msg =
+		(struct xpnet_pending_msg *) __qm;
+
+
+	DBUG_ON(queued_msg == NULL);
+
+	dev_dbg(xpnet, "message to %d notified with reason %d\n",
+		partid, reason);
+
+	if (atomic_dec_return(&queued_msg->use_count) == 0) {
+		dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
+			(void *) queued_msg->skb->head);
+
+		dev_kfree_skb_any(queued_msg->skb);
+		kfree(queued_msg);
+	}
+}
+
+
+/*
+ * Network layer has formatted a packet (skb) and is ready to place it
+ * "on the wire".  Prepare and send an xpnet_message to all partitions
+ * which have connected with us and are targets of this packet.
+ *
+ * MAC-NOTE:  For the XPNET driver, the MAC address contains the
+ * destination partition_id.  If the destination partition id word
+ * is 0xff, this packet is to broadcast to all partitions.
+ */
+static int
+xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct xpnet_pending_msg *queued_msg;
+	enum xpc_retval ret;
+	struct xpnet_message *msg;
+	u64 start_addr, end_addr;
+	long dp;
+	u8 second_mac_octet;
+	partid_t dest_partid;
+	struct xpnet_dev_private *priv;
+	u16 embedded_bytes;
+
+
+	priv = (struct xpnet_dev_private *) dev->priv;
+
+
+	dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
+		"skb->end=0x%p skb->len=%d\n", (void *) skb->head,
+		(void *) skb->data, (void *) skb->tail, (void *) skb->end,
+		skb->len);
+
+
+	/*
+	 * The xpnet_pending_msg tracks how many outstanding
+	 * xpc_send_notifies are relying on this skb.  When none
+	 * remain, release the skb.
+	 */
+	queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
+	if (queued_msg == NULL) {
+		dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
+			"packet\n", sizeof(struct xpnet_pending_msg));
+
+		priv->stats.tx_errors++;
+
+		return -ENOMEM;
+	}
+
+
+	/* get the beginning of the first cacheline and end of last */
+	start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1));
+	end_addr = L1_CACHE_ALIGN((u64) skb->tail);
+
+	/* calculate how many bytes to embed in the XPC message */
+	embedded_bytes = 0;
+	if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
+		/* skb->data does fit so embed */
+		embedded_bytes = skb->len;
+	}
+
+
+	/*
+	 * Since the send occurs asynchronously, we set the count to one
+	 * and begin sending.  Any sends that happen to complete before
+	 * we are done sending will not free the skb.  We will be left
+	 * with that task during exit.  This also handles the case of
+	 * a packet destined for a partition which is no longer up.
+	 */
+	atomic_set(&queued_msg->use_count, 1);
+	queued_msg->skb = skb;
+
+
+	second_mac_octet = skb->data[XPNET_PARTID_OCTET];
+	if (second_mac_octet == 0xff) {
+		/* we are being asked to broadcast to all partitions */
+		dp = xpnet_broadcast_partitions;
+	} else if (second_mac_octet != 0) {
+		dp = xpnet_broadcast_partitions &
+					(1UL << (second_mac_octet - 1));
+	} else {
+		/* 0 is an invalid partid.  Ignore */
+		dp = 0;
+	}
+	dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
+
+	/*
+	 * If we wanted to allow promiscous mode to work like an
+	 * unswitched network, this would be a good point to OR in a
+	 * mask of partitions which should be receiving all packets.
+	 */
+
+	/*
+	 * Main send loop.
+	 */
+	for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
+	     dest_partid++) {
+
+
+		if (!(dp & (1UL << (dest_partid - 1)))) {
+			/* not destined for this partition */
+			continue;
+		}
+
+		/* remove this partition from the destinations mask */
+		dp &= ~(1UL << (dest_partid - 1));
+
+
+		/* found a partition to send to */
+
+		ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
+				   XPC_NOWAIT, (void **)&msg);
+		if (unlikely(ret != xpcSuccess)) {
+			continue;
+		}
+
+		msg->embedded_bytes = embedded_bytes;
+		if (unlikely(embedded_bytes != 0)) {
+			msg->version = XPNET_VERSION_EMBED;
+			dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
+				&msg->data, skb->data, (size_t) embedded_bytes);
+			memcpy(&msg->data, skb->data, (size_t) embedded_bytes);
+		} else {
+			msg->version = XPNET_VERSION;
+		}
+		msg->magic = XPNET_MAGIC;
+		msg->size = end_addr - start_addr;
+		msg->leadin_ignore = (u64) skb->data - start_addr;
+		msg->tailout_ignore = end_addr - (u64) skb->tail;
+		msg->buf_pa = __pa(start_addr);
+
+		dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa="
+			"0x%lx, msg->size=%u, msg->leadin_ignore=%u, "
+			"msg->tailout_ignore=%u\n", dest_partid,
+			XPC_NET_CHANNEL, msg->buf_pa, msg->size,
+			msg->leadin_ignore, msg->tailout_ignore);
+
+
+		atomic_inc(&queued_msg->use_count);
+
+		ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
+				      xpnet_send_completed, queued_msg);
+		if (unlikely(ret != xpcSuccess)) {
+			atomic_dec(&queued_msg->use_count);
+			continue;
+		}
+
+	}
+
+	if (atomic_dec_return(&queued_msg->use_count) == 0) {
+		dev_dbg(xpnet, "no partitions to receive packet destined for "
+			"%d\n", dest_partid);
+
+
+		dev_kfree_skb(skb);
+		kfree(queued_msg);
+	}
+
+	priv->stats.tx_packets++;
+	priv->stats.tx_bytes += skb->len;
+
+	return 0;
+}
+
+
+/*
+ * Deal with transmit timeouts coming from the network layer.
+ */
+static void
+xpnet_dev_tx_timeout (struct net_device *dev)
+{
+	struct xpnet_dev_private *priv;
+
+
+	priv = (struct xpnet_dev_private *) dev->priv;
+
+	priv->stats.tx_errors++;
+	return;
+}
+
+
+static int __init
+xpnet_init(void)
+{
+	int i;
+	u32 license_num;
+	int result = -ENOMEM;
+
+
+	dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
+
+	/*
+	 * use ether_setup() to init the majority of our device
+	 * structure and then override the necessary pieces.
+	 */
+	xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
+				    XPNET_DEVICE_NAME, ether_setup);
+	if (xpnet_device == NULL) {
+		return -ENOMEM;
+	}
+
+	netif_carrier_off(xpnet_device);
+
+	xpnet_device->mtu = XPNET_DEF_MTU;
+	xpnet_device->change_mtu = xpnet_dev_change_mtu;
+	xpnet_device->open = xpnet_dev_open;
+	xpnet_device->get_stats = xpnet_dev_get_stats;
+	xpnet_device->stop = xpnet_dev_stop;
+	xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit;
+	xpnet_device->tx_timeout = xpnet_dev_tx_timeout;
+	xpnet_device->set_config = xpnet_dev_set_config;
+
+	/*
+	 * Multicast assumes the LSB of the first octet is set for multicast
+	 * MAC addresses.  We chose the first octet of the MAC to be unlikely
+	 * to collide with any vendor's officially issued MAC.
+	 */
+	xpnet_device->dev_addr[0] = 0xfe;
+	xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id;
+	license_num = sn_partition_serial_number_val();
+	for (i = 3; i >= 0; i--) {
+		xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
+							license_num & 0xff;
+		license_num = license_num >> 8;
+	}
+
+	/*
+	 * ether_setup() sets this to a multicast device.  We are
+	 * really not supporting multicast at this time.
+	 */
+	xpnet_device->flags &= ~IFF_MULTICAST;
+
+	/*
+	 * No need to checksum as it is a DMA transfer.  The BTE will
+	 * report an error if the data is not retrievable and the
+	 * packet will be dropped.
+	 */
+	xpnet_device->features = NETIF_F_NO_CSUM;
+
+	result = register_netdev(xpnet_device);
+	if (result != 0) {
+		free_netdev(xpnet_device);
+	}
+
+	return result;
+}
+module_init(xpnet_init);
+
+
+static void __exit
+xpnet_exit(void)
+{
+	dev_info(xpnet, "unregistering network device %s\n",
+		xpnet_device[0].name);
+
+	unregister_netdev(xpnet_device);
+
+	free_netdev(xpnet_device);
+}
+module_exit(xpnet_exit);
+
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
+MODULE_LICENSE("GPL");
+

+ 1 - 1
arch/ia64/sn/pci/pcibr/pcibr_dma.c

@@ -301,7 +301,7 @@ void sn_dma_flush(uint64_t addr)
 		spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
 		spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
 				  sfdl_flush_lock, flags);
 				  sfdl_flush_lock, flags);
 
 
-		p->sfdl_flush_value = 0;
+		*p->sfdl_flush_addr = 0;
 
 
 		/* force an interrupt. */
 		/* force an interrupt. */
 		*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
 		*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;

+ 1 - 1
arch/ia64/sn/pci/tioca_provider.c

@@ -431,7 +431,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
 	ca_dmamap->cad_dma_addr = bus_addr;
 	ca_dmamap->cad_dma_addr = bus_addr;
 	ca_dmamap->cad_gart_size = entries;
 	ca_dmamap->cad_gart_size = entries;
 	ca_dmamap->cad_gart_entry = entry;
 	ca_dmamap->cad_gart_entry = entry;
-	list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list);
+	list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
 
 
 	if (xio_addr % ps) {
 	if (xio_addr % ps) {
 		tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
 		tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);

+ 5 - 0
arch/m68knommu/Kconfig

@@ -534,6 +534,11 @@ endchoice
 
 
 endmenu
 endmenu
 
 
+config ISA_DMA_API
+	bool
+	depends on !M5272
+	default y
+
 menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
 menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
 
 
 config PCI
 config PCI

+ 4 - 0
arch/mips/Kconfig

@@ -1656,3 +1656,7 @@ config GENERIC_HARDIRQS
 config GENERIC_IRQ_PROBE
 config GENERIC_IRQ_PROBE
 	bool
 	bool
 	default y
 	default y
+
+config ISA_DMA_API
+	bool
+	default y

+ 4 - 0
arch/parisc/Kconfig

@@ -45,6 +45,10 @@ config GENERIC_IRQ_PROBE
 config PM
 config PM
 	bool
 	bool
 
 
+config ISA_DMA_API
+	bool
+	default y
+
 source "init/Kconfig"
 source "init/Kconfig"
 
 
 
 

+ 4 - 0
arch/ppc/Kconfig

@@ -1079,6 +1079,10 @@ source kernel/power/Kconfig
 
 
 endmenu
 endmenu
 
 
+config ISA_DMA_API
+	bool
+	default y
+
 menu "Bus options"
 menu "Bus options"
 
 
 config ISA
 config ISA

+ 3 - 0
arch/ppc64/Kconfig

@@ -293,6 +293,9 @@ config SECCOMP
 
 
 endmenu
 endmenu
 
 
+config ISA_DMA_API
+	bool
+	default y
 
 
 menu "General setup"
 menu "General setup"
 
 

+ 7 - 0
arch/ppc64/Makefile

@@ -56,12 +56,19 @@ LDFLAGS_vmlinux	:= -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
 CFLAGS		+= -msoft-float -pipe -mminimal-toc -mtraceback=none \
 CFLAGS		+= -msoft-float -pipe -mminimal-toc -mtraceback=none \
 		   -mcall-aixdesc
 		   -mcall-aixdesc
 
 
+GCC_VERSION     := $(call cc-version)
+GCC_BROKEN_VEC	:= $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;)
+
 ifeq ($(CONFIG_POWER4_ONLY),y)
 ifeq ($(CONFIG_POWER4_ONLY),y)
 ifeq ($(CONFIG_ALTIVEC),y)
 ifeq ($(CONFIG_ALTIVEC),y)
+ifeq ($(GCC_BROKEN_VEC),y)
 	CFLAGS += $(call cc-option,-mcpu=970)
 	CFLAGS += $(call cc-option,-mcpu=970)
 else
 else
 	CFLAGS += $(call cc-option,-mcpu=power4)
 	CFLAGS += $(call cc-option,-mcpu=power4)
 endif
 endif
+else
+	CFLAGS += $(call cc-option,-mcpu=power4)
+endif
 else
 else
 	CFLAGS += $(call cc-option,-mtune=power4)
 	CFLAGS += $(call cc-option,-mtune=power4)
 endif
 endif

+ 4 - 0
arch/sh/Kconfig

@@ -693,6 +693,10 @@ config RTC_9701JE
 
 
 endmenu
 endmenu
 
 
+config ISA_DMA_API
+	bool
+	depends on MPC1211
+	default y
 
 
 menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
 menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
 
 

+ 14 - 14
arch/sparc/prom/memory.c

@@ -47,9 +47,9 @@ prom_sortmemlist(struct linux_mlist_v0 *thislist)
 	char *tmpaddr;
 	char *tmpaddr;
 	char *lowest;
 	char *lowest;
 
 
-	for(i=0; thislist[i].theres_more != 0; i++) {
+	for(i=0; thislist[i].theres_more; i++) {
 		lowest = thislist[i].start_adr;
 		lowest = thislist[i].start_adr;
-		for(mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
+		for(mitr = i+1; thislist[mitr-1].theres_more; mitr++)
 			if(thislist[mitr].start_adr < lowest) {
 			if(thislist[mitr].start_adr < lowest) {
 				lowest = thislist[mitr].start_adr;
 				lowest = thislist[mitr].start_adr;
 				swapi = mitr;
 				swapi = mitr;
@@ -85,7 +85,7 @@ void __init prom_meminit(void)
 			prom_phys_total[iter].num_bytes = mptr->num_bytes;
 			prom_phys_total[iter].num_bytes = mptr->num_bytes;
 			prom_phys_total[iter].theres_more = &prom_phys_total[iter+1];
 			prom_phys_total[iter].theres_more = &prom_phys_total[iter+1];
 		}
 		}
-		prom_phys_total[iter-1].theres_more = 0x0;
+		prom_phys_total[iter-1].theres_more = NULL;
 		/* Second, the total prom taken descriptors. */
 		/* Second, the total prom taken descriptors. */
 		for(mptr = (*(romvec->pv_v0mem.v0_prommap)), iter=0;
 		for(mptr = (*(romvec->pv_v0mem.v0_prommap)), iter=0;
 		    mptr; mptr=mptr->theres_more, iter++) {
 		    mptr; mptr=mptr->theres_more, iter++) {
@@ -93,7 +93,7 @@ void __init prom_meminit(void)
 			prom_prom_taken[iter].num_bytes = mptr->num_bytes;
 			prom_prom_taken[iter].num_bytes = mptr->num_bytes;
 			prom_prom_taken[iter].theres_more = &prom_prom_taken[iter+1];
 			prom_prom_taken[iter].theres_more = &prom_prom_taken[iter+1];
 		}
 		}
-		prom_prom_taken[iter-1].theres_more = 0x0;
+		prom_prom_taken[iter-1].theres_more = NULL;
 		/* Last, the available physical descriptors. */
 		/* Last, the available physical descriptors. */
 		for(mptr = (*(romvec->pv_v0mem.v0_available)), iter=0;
 		for(mptr = (*(romvec->pv_v0mem.v0_available)), iter=0;
 		    mptr; mptr=mptr->theres_more, iter++) {
 		    mptr; mptr=mptr->theres_more, iter++) {
@@ -101,7 +101,7 @@ void __init prom_meminit(void)
 			prom_phys_avail[iter].num_bytes = mptr->num_bytes;
 			prom_phys_avail[iter].num_bytes = mptr->num_bytes;
 			prom_phys_avail[iter].theres_more = &prom_phys_avail[iter+1];
 			prom_phys_avail[iter].theres_more = &prom_phys_avail[iter+1];
 		}
 		}
-		prom_phys_avail[iter-1].theres_more = 0x0;
+		prom_phys_avail[iter-1].theres_more = NULL;
 		/* Sort all the lists. */
 		/* Sort all the lists. */
 		prom_sortmemlist(prom_phys_total);
 		prom_sortmemlist(prom_phys_total);
 		prom_sortmemlist(prom_prom_taken);
 		prom_sortmemlist(prom_prom_taken);
@@ -124,7 +124,7 @@ void __init prom_meminit(void)
 			prom_phys_avail[iter].theres_more =
 			prom_phys_avail[iter].theres_more =
 				&prom_phys_avail[iter+1];
 				&prom_phys_avail[iter+1];
 		}
 		}
-		prom_phys_avail[iter-1].theres_more = 0x0;
+		prom_phys_avail[iter-1].theres_more = NULL;
 
 
 		num_regs = prom_getproperty(node, "reg",
 		num_regs = prom_getproperty(node, "reg",
 					    (char *) prom_reg_memlist,
 					    (char *) prom_reg_memlist,
@@ -138,7 +138,7 @@ void __init prom_meminit(void)
 			prom_phys_total[iter].theres_more =
 			prom_phys_total[iter].theres_more =
 				&prom_phys_total[iter+1];
 				&prom_phys_total[iter+1];
 		}
 		}
-		prom_phys_total[iter-1].theres_more = 0x0;
+		prom_phys_total[iter-1].theres_more = NULL;
 
 
 		node = prom_getchild(prom_root_node);
 		node = prom_getchild(prom_root_node);
 		node = prom_searchsiblings(node, "virtual-memory");
 		node = prom_searchsiblings(node, "virtual-memory");
@@ -158,7 +158,7 @@ void __init prom_meminit(void)
 			prom_prom_taken[iter].theres_more =
 			prom_prom_taken[iter].theres_more =
 				&prom_prom_taken[iter+1];
 				&prom_prom_taken[iter+1];
 		}
 		}
-		prom_prom_taken[iter-1].theres_more = 0x0;
+		prom_prom_taken[iter-1].theres_more = NULL;
 
 
 		prom_sortmemlist(prom_prom_taken);
 		prom_sortmemlist(prom_prom_taken);
 
 
@@ -182,15 +182,15 @@ void __init prom_meminit(void)
 	case PROM_SUN4:
 	case PROM_SUN4:
 #ifdef CONFIG_SUN4	
 #ifdef CONFIG_SUN4	
 		/* how simple :) */
 		/* how simple :) */
-		prom_phys_total[0].start_adr = 0x0;
+		prom_phys_total[0].start_adr = NULL;
 		prom_phys_total[0].num_bytes = *(sun4_romvec->memorysize);
 		prom_phys_total[0].num_bytes = *(sun4_romvec->memorysize);
-		prom_phys_total[0].theres_more = 0x0;
-		prom_prom_taken[0].start_adr = 0x0; 
+		prom_phys_total[0].theres_more = NULL;
+		prom_prom_taken[0].start_adr = NULL; 
 		prom_prom_taken[0].num_bytes = 0x0;
 		prom_prom_taken[0].num_bytes = 0x0;
-		prom_prom_taken[0].theres_more = 0x0;
-		prom_phys_avail[0].start_adr = 0x0;
+		prom_prom_taken[0].theres_more = NULL;
+		prom_phys_avail[0].start_adr = NULL;
 		prom_phys_avail[0].num_bytes = *(sun4_romvec->memoryavail);
 		prom_phys_avail[0].num_bytes = *(sun4_romvec->memoryavail);
-		prom_phys_avail[0].theres_more = 0x0;
+		prom_phys_avail[0].theres_more = NULL;
 #endif
 #endif
 		break;
 		break;
 
 

+ 1 - 1
arch/sparc/prom/sun4prom.c

@@ -151,7 +151,7 @@ struct linux_romvec * __init sun4_prom_init(void)
 	 * have more time, we can teach the penguin to say "By your
 	 * have more time, we can teach the penguin to say "By your
 	 * command" or "Activating turbo boost, Michael". :-)
 	 * command" or "Activating turbo boost, Michael". :-)
 	 */
 	 */
-	sun4_romvec->setLEDs(0x0);
+	sun4_romvec->setLEDs(NULL);
 	
 	
 	printk("PROMLIB: Old Sun4 boot PROM monitor %s, romvec version %d\n",
 	printk("PROMLIB: Old Sun4 boot PROM monitor %s, romvec version %d\n",
 		sun4_romvec->monid,
 		sun4_romvec->monid,

+ 3 - 3
arch/sparc64/kernel/irq.c

@@ -756,7 +756,7 @@ void handler_irq(int irq, struct pt_regs *regs)
 		clear_softint(clr_mask);
 		clear_softint(clr_mask);
 	}
 	}
 #else
 #else
-	int should_forward = 1;
+	int should_forward = 0;
 
 
 	clear_softint(1 << irq);
 	clear_softint(1 << irq);
 #endif
 #endif
@@ -1007,10 +1007,10 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu)
 	}
 	}
 	upa_writel(tid | IMAP_VALID, imap);
 	upa_writel(tid | IMAP_VALID, imap);
 
 
-	while (!cpu_online(goal_cpu)) {
+	do {
 		if (++goal_cpu >= NR_CPUS)
 		if (++goal_cpu >= NR_CPUS)
 			goal_cpu = 0;
 			goal_cpu = 0;
-	}
+	} while (!cpu_online(goal_cpu));
 
 
 	return goal_cpu;
 	return goal_cpu;
 }
 }

+ 5 - 0
arch/x86_64/Kconfig

@@ -379,6 +379,11 @@ config GENERIC_IRQ_PROBE
 	bool
 	bool
 	default y
 	default y
 
 
+# we have no ISA slots, but we do have ISA-style DMA.
+config ISA_DMA_API
+	bool
+	default y
+
 menu "Power management options"
 menu "Power management options"
 
 
 source kernel/power/Kconfig
 source kernel/power/Kconfig

+ 2 - 3
drivers/base/bus.c

@@ -405,9 +405,8 @@ void device_release_driver(struct device * dev)
 
 
 static void driver_detach(struct device_driver * drv)
 static void driver_detach(struct device_driver * drv)
 {
 {
-	struct list_head * entry, * next;
-	list_for_each_safe(entry, next, &drv->devices) {
-		struct device * dev = container_of(entry, struct device, driver_list);
+	while (!list_empty(&drv->devices)) {
+		struct device * dev = container_of(drv->devices.next, struct device, driver_list);
 		device_release_driver(dev);
 		device_release_driver(dev);
 	}
 	}
 }
 }

+ 1 - 1
drivers/base/core.c

@@ -139,7 +139,7 @@ static int dev_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
 	buffer = &buffer[length];
 	buffer = &buffer[length];
 	buffer_size -= length;
 	buffer_size -= length;
 
 
-	if (dev->bus->hotplug) {
+	if (dev->bus && dev->bus->hotplug) {
 		/* have the bus specific function add its stuff */
 		/* have the bus specific function add its stuff */
 		retval = dev->bus->hotplug (dev, envp, num_envp, buffer, buffer_size);
 		retval = dev->bus->hotplug (dev, envp, num_envp, buffer, buffer_size);
 			if (retval) {
 			if (retval) {

+ 1 - 1
drivers/block/Kconfig

@@ -105,7 +105,7 @@ config ATARI_SLM
 
 
 config BLK_DEV_XD
 config BLK_DEV_XD
 	tristate "XT hard disk support"
 	tristate "XT hard disk support"
-	depends on ISA
+	depends on ISA && ISA_DMA_API
 	help
 	help
 	  Very old 8 bit hard disk controllers used in the IBM XT computer
 	  Very old 8 bit hard disk controllers used in the IBM XT computer
 	  will be supported if you say Y here.
 	  will be supported if you say Y here.

+ 1 - 1
drivers/block/aoe/aoe.h

@@ -1,5 +1,5 @@
 /* Copyright (c) 2004 Coraid, Inc.  See COPYING for GPL terms. */
 /* Copyright (c) 2004 Coraid, Inc.  See COPYING for GPL terms. */
-#define VERSION "6"
+#define VERSION "10"
 #define AOE_MAJOR 152
 #define AOE_MAJOR 152
 #define DEVICE_NAME "aoe"
 #define DEVICE_NAME "aoe"
 
 

+ 13 - 0
drivers/block/aoe/aoeblk.c

@@ -37,6 +37,13 @@ static ssize_t aoedisk_show_netif(struct gendisk * disk, char *page)
 
 
 	return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name);
 	return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name);
 }
 }
+/* firmware version */
+static ssize_t aoedisk_show_fwver(struct gendisk * disk, char *page)
+{
+	struct aoedev *d = disk->private_data;
+
+	return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
+}
 
 
 static struct disk_attribute disk_attr_state = {
 static struct disk_attribute disk_attr_state = {
 	.attr = {.name = "state", .mode = S_IRUGO },
 	.attr = {.name = "state", .mode = S_IRUGO },
@@ -50,6 +57,10 @@ static struct disk_attribute disk_attr_netif = {
 	.attr = {.name = "netif", .mode = S_IRUGO },
 	.attr = {.name = "netif", .mode = S_IRUGO },
 	.show = aoedisk_show_netif
 	.show = aoedisk_show_netif
 };
 };
+static struct disk_attribute disk_attr_fwver = {
+	.attr = {.name = "firmware-version", .mode = S_IRUGO },
+	.show = aoedisk_show_fwver
+};
 
 
 static void
 static void
 aoedisk_add_sysfs(struct aoedev *d)
 aoedisk_add_sysfs(struct aoedev *d)
@@ -57,6 +68,7 @@ aoedisk_add_sysfs(struct aoedev *d)
 	sysfs_create_file(&d->gd->kobj, &disk_attr_state.attr);
 	sysfs_create_file(&d->gd->kobj, &disk_attr_state.attr);
 	sysfs_create_file(&d->gd->kobj, &disk_attr_mac.attr);
 	sysfs_create_file(&d->gd->kobj, &disk_attr_mac.attr);
 	sysfs_create_file(&d->gd->kobj, &disk_attr_netif.attr);
 	sysfs_create_file(&d->gd->kobj, &disk_attr_netif.attr);
+	sysfs_create_file(&d->gd->kobj, &disk_attr_fwver.attr);
 }
 }
 void
 void
 aoedisk_rm_sysfs(struct aoedev *d)
 aoedisk_rm_sysfs(struct aoedev *d)
@@ -64,6 +76,7 @@ aoedisk_rm_sysfs(struct aoedev *d)
 	sysfs_remove_link(&d->gd->kobj, "state");
 	sysfs_remove_link(&d->gd->kobj, "state");
 	sysfs_remove_link(&d->gd->kobj, "mac");
 	sysfs_remove_link(&d->gd->kobj, "mac");
 	sysfs_remove_link(&d->gd->kobj, "netif");
 	sysfs_remove_link(&d->gd->kobj, "netif");
+	sysfs_remove_link(&d->gd->kobj, "firmware-version");
 }
 }
 
 
 static int
 static int

+ 4 - 7
drivers/block/aoe/aoedev.c

@@ -109,25 +109,22 @@ aoedev_set(ulong sysminor, unsigned char *addr, struct net_device *ifp, ulong bu
 	spin_lock_irqsave(&devlist_lock, flags);
 	spin_lock_irqsave(&devlist_lock, flags);
 
 
 	for (d=devlist; d; d=d->next)
 	for (d=devlist; d; d=d->next)
-		if (d->sysminor == sysminor
-		|| memcmp(d->addr, addr, sizeof d->addr) == 0)
+		if (d->sysminor == sysminor)
 			break;
 			break;
 
 
 	if (d == NULL && (d = aoedev_newdev(bufcnt)) == NULL) {
 	if (d == NULL && (d = aoedev_newdev(bufcnt)) == NULL) {
 		spin_unlock_irqrestore(&devlist_lock, flags);
 		spin_unlock_irqrestore(&devlist_lock, flags);
 		printk(KERN_INFO "aoe: aoedev_set: aoedev_newdev failure.\n");
 		printk(KERN_INFO "aoe: aoedev_set: aoedev_newdev failure.\n");
 		return NULL;
 		return NULL;
-	}
+	} /* if newdev, (d->flags & DEVFL_UP) == 0 for below */
 
 
 	spin_unlock_irqrestore(&devlist_lock, flags);
 	spin_unlock_irqrestore(&devlist_lock, flags);
 	spin_lock_irqsave(&d->lock, flags);
 	spin_lock_irqsave(&d->lock, flags);
 
 
 	d->ifp = ifp;
 	d->ifp = ifp;
-
-	if (d->sysminor != sysminor
-	|| (d->flags & DEVFL_UP) == 0) {
+	memcpy(d->addr, addr, sizeof d->addr);
+	if ((d->flags & DEVFL_UP) == 0) {
 		aoedev_downdev(d); /* flushes outstanding frames */
 		aoedev_downdev(d); /* flushes outstanding frames */
-		memcpy(d->addr, addr, sizeof d->addr);
 		d->sysminor = sysminor;
 		d->sysminor = sysminor;
 		d->aoemajor = AOEMAJOR(sysminor);
 		d->aoemajor = AOEMAJOR(sysminor);
 		d->aoeminor = AOEMINOR(sysminor);
 		d->aoeminor = AOEMINOR(sysminor);

+ 16 - 1
drivers/block/aoe/aoenet.c

@@ -7,6 +7,7 @@
 #include <linux/hdreg.h>
 #include <linux/hdreg.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
+#include <linux/moduleparam.h>
 #include "aoe.h"
 #include "aoe.h"
 
 
 #define NECODES 5
 #define NECODES 5
@@ -26,6 +27,19 @@ enum {
 };
 };
 
 
 static char aoe_iflist[IFLISTSZ];
 static char aoe_iflist[IFLISTSZ];
+module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
+MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\"\n");
+
+#ifndef MODULE
+static int __init aoe_iflist_setup(char *str)
+{
+	strncpy(aoe_iflist, str, IFLISTSZ);
+	aoe_iflist[IFLISTSZ - 1] = '\0';
+	return 1;
+}
+
+__setup("aoe_iflist=", aoe_iflist_setup);
+#endif
 
 
 int
 int
 is_aoe_netif(struct net_device *ifp)
 is_aoe_netif(struct net_device *ifp)
@@ -36,7 +50,8 @@ is_aoe_netif(struct net_device *ifp)
 	if (aoe_iflist[0] == '\0')
 	if (aoe_iflist[0] == '\0')
 		return 1;
 		return 1;
 
 
-	for (p = aoe_iflist; *p; p = q + strspn(q, WHITESPACE)) {
+	p = aoe_iflist + strspn(aoe_iflist, WHITESPACE);
+	for (; *p; p = q + strspn(q, WHITESPACE)) {
 		q = p + strcspn(p, WHITESPACE);
 		q = p + strcspn(p, WHITESPACE);
 		if (q != p)
 		if (q != p)
 			len = q - p;
 			len = q - p;

+ 3 - 3
drivers/char/Kconfig

@@ -153,7 +153,7 @@ config DIGIEPCA
 
 
 config ESPSERIAL
 config ESPSERIAL
 	tristate "Hayes ESP serial port support"
 	tristate "Hayes ESP serial port support"
-	depends on SERIAL_NONSTANDARD && ISA && BROKEN_ON_SMP
+	depends on SERIAL_NONSTANDARD && ISA && BROKEN_ON_SMP && ISA_DMA_API
 	help
 	help
 	  This is a driver which supports Hayes ESP serial ports.  Both single
 	  This is a driver which supports Hayes ESP serial ports.  Both single
 	  port cards and multiport cards are supported.  Make sure to read
 	  port cards and multiport cards are supported.  Make sure to read
@@ -195,7 +195,7 @@ config ISI
 
 
 config SYNCLINK
 config SYNCLINK
 	tristate "Microgate SyncLink card support"
 	tristate "Microgate SyncLink card support"
-	depends on SERIAL_NONSTANDARD && PCI
+	depends on SERIAL_NONSTANDARD && PCI && ISA_DMA_API
 	help
 	help
 	  Provides support for the SyncLink ISA and PCI multiprotocol serial
 	  Provides support for the SyncLink ISA and PCI multiprotocol serial
 	  adapters. These adapters support asynchronous and HDLC bit
 	  adapters. These adapters support asynchronous and HDLC bit
@@ -408,7 +408,7 @@ config SGI_TIOCX
 
 
 config SGI_MBCS
 config SGI_MBCS
        tristate "SGI FPGA Core Services driver support"
        tristate "SGI FPGA Core Services driver support"
-       depends on (IA64_SGI_SN2 || IA64_GENERIC)
+       depends on SGI_TIOCX
        help
        help
          If you have an SGI Altix with an attached SABrick
          If you have an SGI Altix with an attached SABrick
          say Y or M here, otherwise say N.
          say Y or M here, otherwise say N.

+ 16 - 16
drivers/char/ipmi/ipmi_si_intf.c

@@ -1617,15 +1617,15 @@ typedef struct dmi_header
 	u16	handle;
 	u16	handle;
 } dmi_header_t;
 } dmi_header_t;
 
 
-static int decode_dmi(dmi_header_t *dm, int intf_num)
+static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
 {
 {
-	u8		*data = (u8 *)dm;
+	u8		__iomem *data = (u8 __iomem *)dm;
 	unsigned long  	base_addr;
 	unsigned long  	base_addr;
 	u8		reg_spacing;
 	u8		reg_spacing;
-	u8              len = dm->length;
+	u8              len = readb(&dm->length);
 	dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
 	dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
 
 
-	ipmi_data->type = data[4];
+	ipmi_data->type = readb(&data[4]);
 
 
 	memcpy(&base_addr, data+8, sizeof(unsigned long));
 	memcpy(&base_addr, data+8, sizeof(unsigned long));
 	if (len >= 0x11) {
 	if (len >= 0x11) {
@@ -1640,12 +1640,12 @@ static int decode_dmi(dmi_header_t *dm, int intf_num)
 		}
 		}
 		/* If bit 4 of byte 0x10 is set, then the lsb for the address
 		/* If bit 4 of byte 0x10 is set, then the lsb for the address
 		   is odd. */
 		   is odd. */
-		ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
+		ipmi_data->base_addr = base_addr | ((readb(&data[0x10]) & 0x10) >> 4);
 
 
-		ipmi_data->irq = data[0x11];
+		ipmi_data->irq = readb(&data[0x11]);
 
 
 		/* The top two bits of byte 0x10 hold the register spacing. */
 		/* The top two bits of byte 0x10 hold the register spacing. */
-		reg_spacing = (data[0x10] & 0xC0) >> 6;
+		reg_spacing = (readb(&data[0x10]) & 0xC0) >> 6;
 		switch(reg_spacing){
 		switch(reg_spacing){
 		case 0x00: /* Byte boundaries */
 		case 0x00: /* Byte boundaries */
 		    ipmi_data->offset = 1;
 		    ipmi_data->offset = 1;
@@ -1673,7 +1673,7 @@ static int decode_dmi(dmi_header_t *dm, int intf_num)
 		ipmi_data->offset = 1;
 		ipmi_data->offset = 1;
 	}
 	}
 
 
-	ipmi_data->slave_addr = data[6];
+	ipmi_data->slave_addr = readb(&data[6]);
 
 
 	if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
 	if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
 		dmi_data_entries++;
 		dmi_data_entries++;
@@ -1687,9 +1687,9 @@ static int decode_dmi(dmi_header_t *dm, int intf_num)
 
 
 static int dmi_table(u32 base, int len, int num)
 static int dmi_table(u32 base, int len, int num)
 {
 {
-	u8 		  *buf;
-	struct dmi_header *dm;
-	u8 		  *data;
+	u8 		  __iomem *buf;
+	struct dmi_header __iomem *dm;
+	u8 		  __iomem *data;
 	int 		  i=1;
 	int 		  i=1;
 	int		  status=-1;
 	int		  status=-1;
 	int               intf_num = 0;
 	int               intf_num = 0;
@@ -1702,12 +1702,12 @@ static int dmi_table(u32 base, int len, int num)
 
 
 	while(i<num && (data - buf) < len)
 	while(i<num && (data - buf) < len)
 	{
 	{
-		dm=(dmi_header_t *)data;
+		dm=(dmi_header_t __iomem *)data;
 
 
-		if((data-buf+dm->length) >= len)
+		if((data-buf+readb(&dm->length)) >= len)
         		break;
         		break;
 
 
-		if (dm->type == 38) {
+		if (readb(&dm->type) == 38) {
 			if (decode_dmi(dm, intf_num) == 0) {
 			if (decode_dmi(dm, intf_num) == 0) {
 				intf_num++;
 				intf_num++;
 				if (intf_num >= SI_MAX_DRIVERS)
 				if (intf_num >= SI_MAX_DRIVERS)
@@ -1715,8 +1715,8 @@ static int dmi_table(u32 base, int len, int num)
 			}
 			}
 		}
 		}
 
 
-	        data+=dm->length;
-		while((data-buf) < len && (*data || data[1]))
+	        data+=readb(&dm->length);
+		while((data-buf) < len && (readb(data)||readb(data+1)))
 			data++;
 			data++;
 		data+=2;
 		data+=2;
 		i++;
 		i++;

+ 1 - 1
drivers/char/ipmi/ipmi_si_sm.h

@@ -51,7 +51,7 @@ struct si_sm_io
 	/* Generic info used by the actual handling routines, the
 	/* Generic info used by the actual handling routines, the
            state machine shouldn't touch these. */
            state machine shouldn't touch these. */
 	void *info;
 	void *info;
-	void *addr;
+	void __iomem *addr;
 	int  regspacing;
 	int  regspacing;
 	int  regsize;
 	int  regsize;
 	int  regshift;
 	int  regshift;

+ 2 - 2
drivers/char/mbcs.c

@@ -394,7 +394,7 @@ int mbcs_open(struct inode *ip, struct file *fp)
 	return -ENODEV;
 	return -ENODEV;
 }
 }
 
 
-ssize_t mbcs_sram_read(struct file * fp, char *buf, size_t len, loff_t * off)
+ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
 {
 {
 	struct cx_dev *cx_dev = fp->private_data;
 	struct cx_dev *cx_dev = fp->private_data;
 	struct mbcs_soft *soft = cx_dev->soft;
 	struct mbcs_soft *soft = cx_dev->soft;
@@ -419,7 +419,7 @@ ssize_t mbcs_sram_read(struct file * fp, char *buf, size_t len, loff_t * off)
 }
 }
 
 
 ssize_t
 ssize_t
-mbcs_sram_write(struct file * fp, const char *buf, size_t len, loff_t * off)
+mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
 {
 {
 	struct cx_dev *cx_dev = fp->private_data;
 	struct cx_dev *cx_dev = fp->private_data;
 	struct mbcs_soft *soft = cx_dev->soft;
 	struct mbcs_soft *soft = cx_dev->soft;

+ 2 - 2
drivers/char/mbcs.h

@@ -543,9 +543,9 @@ struct mbcs_soft {
 };
 };
 
 
 extern int mbcs_open(struct inode *ip, struct file *fp);
 extern int mbcs_open(struct inode *ip, struct file *fp);
-extern ssize_t mbcs_sram_read(struct file *fp, char *buf, size_t len,
+extern ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len,
 			      loff_t * off);
 			      loff_t * off);
-extern ssize_t mbcs_sram_write(struct file *fp, const char *buf, size_t len,
+extern ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len,
 			       loff_t * off);
 			       loff_t * off);
 extern loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
 extern loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
 extern int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
 extern int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);

+ 3 - 3
drivers/char/sonypi.c

@@ -1021,11 +1021,11 @@ static int sonypi_misc_ioctl(struct inode *ip, struct file *fp,
 			ret = -EIO;
 			ret = -EIO;
 			break;
 			break;
 		}
 		}
-		if (copy_to_user((u8 *)arg, &val8, sizeof(val8)))
+		if (copy_to_user(argp, &val8, sizeof(val8)))
 			ret = -EFAULT;
 			ret = -EFAULT;
 		break;
 		break;
 	case SONYPI_IOCSFAN:
 	case SONYPI_IOCSFAN:
-		if (copy_from_user(&val8, (u8 *)arg, sizeof(val8))) {
+		if (copy_from_user(&val8, argp, sizeof(val8))) {
 			ret = -EFAULT;
 			ret = -EFAULT;
 			break;
 			break;
 		}
 		}
@@ -1038,7 +1038,7 @@ static int sonypi_misc_ioctl(struct inode *ip, struct file *fp,
 			ret = -EIO;
 			ret = -EIO;
 			break;
 			break;
 		}
 		}
-		if (copy_to_user((u8 *)arg, &val8, sizeof(val8)))
+		if (copy_to_user(argp, &val8, sizeof(val8)))
 			ret = -EFAULT;
 			ret = -EFAULT;
 		break;
 		break;
 	default:
 	default:

+ 1 - 1
drivers/mmc/Kconfig

@@ -51,7 +51,7 @@ config MMC_PXA
 
 
 config MMC_WBSD
 config MMC_WBSD
 	tristate "Winbond W83L51xD SD/MMC Card Interface support"
 	tristate "Winbond W83L51xD SD/MMC Card Interface support"
-	depends on MMC && ISA
+	depends on MMC && ISA && ISA_DMA_API
 	help
 	help
 	  This selects the Winbond(R) W83L51xD Secure digital and
 	  This selects the Winbond(R) W83L51xD Secure digital and
           Multimedia card Interface.
           Multimedia card Interface.

+ 5 - 5
drivers/net/Kconfig

@@ -589,7 +589,7 @@ config EL2
 
 
 config ELPLUS
 config ELPLUS
 	tristate "3c505 \"EtherLink Plus\" support"
 	tristate "3c505 \"EtherLink Plus\" support"
-	depends on NET_VENDOR_3COM && ISA
+	depends on NET_VENDOR_3COM && ISA && ISA_DMA_API
 	---help---
 	---help---
 	  Information about this network (Ethernet) card can be found in
 	  Information about this network (Ethernet) card can be found in
 	  <file:Documentation/networking/3c505.txt>.  If you have a card of
 	  <file:Documentation/networking/3c505.txt>.  If you have a card of
@@ -630,7 +630,7 @@ config EL3
 
 
 config 3C515
 config 3C515
 	tristate "3c515 ISA \"Fast EtherLink\""
 	tristate "3c515 ISA \"Fast EtherLink\""
-	depends on NET_VENDOR_3COM && (ISA || EISA)
+	depends on NET_VENDOR_3COM && (ISA || EISA) && ISA_DMA_API
 	help
 	help
 	  If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
 	  If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
 	  network card, say Y and read the Ethernet-HOWTO, available from
 	  network card, say Y and read the Ethernet-HOWTO, available from
@@ -708,7 +708,7 @@ config TYPHOON
 
 
 config LANCE
 config LANCE
 	tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
 	tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
-	depends on NET_ETHERNET && ISA
+	depends on NET_ETHERNET && ISA && ISA_DMA_API
 	help
 	help
 	  If you have a network (Ethernet) card of this type, say Y and read
 	  If you have a network (Ethernet) card of this type, say Y and read
 	  the Ethernet-HOWTO, available from
 	  the Ethernet-HOWTO, available from
@@ -864,7 +864,7 @@ config NI52
 
 
 config NI65
 config NI65
 	tristate "NI6510 support"
 	tristate "NI6510 support"
-	depends on NET_VENDOR_RACAL && ISA
+	depends on NET_VENDOR_RACAL && ISA && ISA_DMA_API
 	help
 	help
 	  If you have a network (Ethernet) card of this type, say Y and read
 	  If you have a network (Ethernet) card of this type, say Y and read
 	  the Ethernet-HOWTO, available from
 	  the Ethernet-HOWTO, available from
@@ -1072,7 +1072,7 @@ config NE2000
 
 
 config ZNET
 config ZNET
 	tristate "Zenith Z-Note support (EXPERIMENTAL)"
 	tristate "Zenith Z-Note support (EXPERIMENTAL)"
-	depends on NET_ISA && EXPERIMENTAL
+	depends on NET_ISA && EXPERIMENTAL && ISA_DMA_API
 	help
 	help
 	  The Zenith Z-Note notebook computer has a built-in network
 	  The Zenith Z-Note notebook computer has a built-in network
 	  (Ethernet) card, and this is the Linux driver for it. Note that the
 	  (Ethernet) card, and this is the Linux driver for it. Note that the

+ 1 - 1
drivers/net/appletalk/Kconfig

@@ -13,7 +13,7 @@ config DEV_APPLETALK
 
 
 config LTPC
 config LTPC
 	tristate "Apple/Farallon LocalTalk PC support"
 	tristate "Apple/Farallon LocalTalk PC support"
-	depends on DEV_APPLETALK && (ISA || EISA)
+	depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
 	help
 	help
 	  This allows you to use the AppleTalk PC card to connect to LocalTalk
 	  This allows you to use the AppleTalk PC card to connect to LocalTalk
 	  networks. The card is also known as the Farallon PhoneNet PC card.
 	  networks. The card is also known as the Farallon PhoneNet PC card.

+ 2 - 2
drivers/net/hamradio/Kconfig

@@ -45,7 +45,7 @@ config BPQETHER
 
 
 config DMASCC
 config DMASCC
 	tristate "High-speed (DMA) SCC driver for AX.25"
 	tristate "High-speed (DMA) SCC driver for AX.25"
-	depends on ISA && AX25 && BROKEN_ON_SMP
+	depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
 	---help---
 	---help---
 	  This is a driver for high-speed SCC boards, i.e. those supporting
 	  This is a driver for high-speed SCC boards, i.e. those supporting
 	  DMA on one port. You usually use those boards to connect your
 	  DMA on one port. You usually use those boards to connect your
@@ -78,7 +78,7 @@ config DMASCC
 
 
 config SCC
 config SCC
 	tristate "Z8530 SCC driver"
 	tristate "Z8530 SCC driver"
-	depends on ISA && AX25
+	depends on ISA && AX25 && ISA_DMA_API
 	---help---
 	---help---
 	  These cards are used to connect your Linux box to an amateur radio
 	  These cards are used to connect your Linux box to an amateur radio
 	  in order to communicate with other computers. If you want to use
 	  in order to communicate with other computers. If you want to use

+ 5 - 5
drivers/net/irda/Kconfig

@@ -310,7 +310,7 @@ config SIGMATEL_FIR
 
 
 config NSC_FIR
 config NSC_FIR
 	tristate "NSC PC87108/PC87338"
 	tristate "NSC PC87108/PC87338"
-	depends on IRDA
+	depends on IRDA && ISA_DMA_API
 	help
 	help
 	  Say Y here if you want to build support for the NSC PC87108 and
 	  Say Y here if you want to build support for the NSC PC87108 and
 	  PC87338 IrDA chipsets.  This driver supports SIR,
 	  PC87338 IrDA chipsets.  This driver supports SIR,
@@ -321,7 +321,7 @@ config NSC_FIR
 
 
 config WINBOND_FIR
 config WINBOND_FIR
 	tristate "Winbond W83977AF (IR)"
 	tristate "Winbond W83977AF (IR)"
-	depends on IRDA
+	depends on IRDA && ISA_DMA_API
 	help
 	help
 	  Say Y here if you want to build IrDA support for the Winbond
 	  Say Y here if you want to build IrDA support for the Winbond
 	  W83977AF super-io chipset.  This driver should be used for the IrDA
 	  W83977AF super-io chipset.  This driver should be used for the IrDA
@@ -347,7 +347,7 @@ config AU1000_FIR
 
 
 config SMC_IRCC_FIR
 config SMC_IRCC_FIR
 	tristate "SMSC IrCC (EXPERIMENTAL)"
 	tristate "SMSC IrCC (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && IRDA
+	depends on EXPERIMENTAL && IRDA && ISA_DMA_API
 	help
 	help
 	  Say Y here if you want to build support for the SMC Infrared
 	  Say Y here if you want to build support for the SMC Infrared
 	  Communications Controller.  It is used in a wide variety of
 	  Communications Controller.  It is used in a wide variety of
@@ -357,7 +357,7 @@ config SMC_IRCC_FIR
 
 
 config ALI_FIR
 config ALI_FIR
 	tristate "ALi M5123 FIR (EXPERIMENTAL)"
 	tristate "ALi M5123 FIR (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && IRDA
+	depends on EXPERIMENTAL && IRDA && ISA_DMA_API
 	help
 	help
 	  Say Y here if you want to build support for the ALi M5123 FIR
 	  Say Y here if you want to build support for the ALi M5123 FIR
 	  Controller.  The ALi M5123 FIR Controller is embedded in ALi M1543C,
 	  Controller.  The ALi M5123 FIR Controller is embedded in ALi M1543C,
@@ -385,7 +385,7 @@ config SA1100_FIR
 
 
 config VIA_FIR
 config VIA_FIR
 	tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
 	tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
-	depends on IRDA
+	depends on IRDA && ISA_DMA_API
 	help
 	help
 	  Say Y here if you want to build support for the VIA VT8231
 	  Say Y here if you want to build support for the VIA VT8231
 	  and VIA VT1211 IrDA controllers, found on the motherboards using
 	  and VIA VT1211 IrDA controllers, found on the motherboards using

+ 2 - 4
drivers/net/ppp_deflate.c

@@ -87,8 +87,7 @@ static void z_comp_free(void *arg)
 
 
 	if (state) {
 	if (state) {
 		zlib_deflateEnd(&state->strm);
 		zlib_deflateEnd(&state->strm);
-		if (state->strm.workspace)
-			vfree(state->strm.workspace);
+		vfree(state->strm.workspace);
 		kfree(state);
 		kfree(state);
 	}
 	}
 }
 }
@@ -308,8 +307,7 @@ static void z_decomp_free(void *arg)
 
 
 	if (state) {
 	if (state) {
 		zlib_inflateEnd(&state->strm);
 		zlib_inflateEnd(&state->strm);
-		if (state->strm.workspace)
-			kfree(state->strm.workspace);
+		kfree(state->strm.workspace);
 		kfree(state);
 		kfree(state);
 	}
 	}
 }
 }

+ 4 - 8
drivers/net/ppp_generic.c

@@ -2467,14 +2467,10 @@ static void ppp_destroy_interface(struct ppp *ppp)
 	skb_queue_purge(&ppp->mrq);
 	skb_queue_purge(&ppp->mrq);
 #endif /* CONFIG_PPP_MULTILINK */
 #endif /* CONFIG_PPP_MULTILINK */
 #ifdef CONFIG_PPP_FILTER
 #ifdef CONFIG_PPP_FILTER
-	if (ppp->pass_filter) {
-		kfree(ppp->pass_filter);
-		ppp->pass_filter = NULL;
-	}
-	if (ppp->active_filter) {
-		kfree(ppp->active_filter);
-		ppp->active_filter = NULL;
-	}
+	kfree(ppp->pass_filter);
+	ppp->pass_filter = NULL;
+	kfree(ppp->active_filter);
+	ppp->active_filter = NULL;
 #endif /* CONFIG_PPP_FILTER */
 #endif /* CONFIG_PPP_FILTER */
 
 
 	kfree(ppp);
 	kfree(ppp);

+ 3 - 3
drivers/net/wan/Kconfig

@@ -26,7 +26,7 @@ config WAN
 # There is no way to detect a comtrol sv11 - force it modular for now.
 # There is no way to detect a comtrol sv11 - force it modular for now.
 config HOSTESS_SV11
 config HOSTESS_SV11
 	tristate "Comtrol Hostess SV-11 support"
 	tristate "Comtrol Hostess SV-11 support"
-	depends on WAN && ISA && m
+	depends on WAN && ISA && m && ISA_DMA_API
 	help
 	help
 	  Driver for Comtrol Hostess SV-11 network card which
 	  Driver for Comtrol Hostess SV-11 network card which
 	  operates on low speed synchronous serial links at up to
 	  operates on low speed synchronous serial links at up to
@@ -38,7 +38,7 @@ config HOSTESS_SV11
 # The COSA/SRP driver has not been tested as non-modular yet.
 # The COSA/SRP driver has not been tested as non-modular yet.
 config COSA
 config COSA
 	tristate "COSA/SRP sync serial boards support"
 	tristate "COSA/SRP sync serial boards support"
-	depends on WAN && ISA && m
+	depends on WAN && ISA && m && ISA_DMA_API
 	---help---
 	---help---
 	  Driver for COSA and SRP synchronous serial boards.
 	  Driver for COSA and SRP synchronous serial boards.
 
 
@@ -127,7 +127,7 @@ config LANMEDIA
 # There is no way to detect a Sealevel board. Force it modular
 # There is no way to detect a Sealevel board. Force it modular
 config SEALEVEL_4021
 config SEALEVEL_4021
 	tristate "Sealevel Systems 4021 support"
 	tristate "Sealevel Systems 4021 support"
-	depends on WAN && ISA && m
+	depends on WAN && ISA && m && ISA_DMA_API
 	help
 	help
 	  This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
 	  This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
 
 

+ 2 - 6
drivers/net/wan/cycx_x25.c

@@ -436,9 +436,7 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
 	}
 	}
 
 
 	if (err) {
 	if (err) {
-		if (chan->local_addr)
-			kfree(chan->local_addr);
-
+		kfree(chan->local_addr);
 		kfree(chan);
 		kfree(chan);
 		return err;
 		return err;
 	}
 	}
@@ -458,9 +456,7 @@ static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
 		struct cycx_x25_channel *chan = dev->priv;
 		struct cycx_x25_channel *chan = dev->priv;
 
 
 		if (chan->svc) {
 		if (chan->svc) {
-			if (chan->local_addr)
-				kfree(chan->local_addr);
-
+			kfree(chan->local_addr);
 			if (chan->state == WAN_CONNECTED)
 			if (chan->state == WAN_CONNECTED)
 				del_timer(&chan->timer);
 				del_timer(&chan->timer);
 		}
 		}

+ 11 - 16
drivers/net/wan/pc300_tty.c

@@ -400,10 +400,8 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
 		cpc_tty->buf_rx.last = NULL;
 		cpc_tty->buf_rx.last = NULL;
 	}
 	}
 	
 	
-	if (cpc_tty->buf_tx) {
-		kfree(cpc_tty->buf_tx);
-		cpc_tty->buf_tx = NULL;
-	}
+	kfree(cpc_tty->buf_tx);
+	cpc_tty->buf_tx = NULL;
 
 
 	CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
 	CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
 	
 	
@@ -666,7 +664,7 @@ static void cpc_tty_rx_work(void * data)
 	unsigned long port;
 	unsigned long port;
 	int i, j;
 	int i, j;
 	st_cpc_tty_area *cpc_tty; 
 	st_cpc_tty_area *cpc_tty; 
-	volatile st_cpc_rx_buf * buf;
+	volatile st_cpc_rx_buf *buf;
 	char flags=0,flg_rx=1; 
 	char flags=0,flg_rx=1; 
 	struct tty_ldisc *ld;
 	struct tty_ldisc *ld;
 
 
@@ -680,9 +678,9 @@ static void cpc_tty_rx_work(void * data)
 			cpc_tty = &cpc_tty_area[port];
 			cpc_tty = &cpc_tty_area[port];
 		
 		
 			if ((buf=cpc_tty->buf_rx.first) != 0) {
 			if ((buf=cpc_tty->buf_rx.first) != 0) {
-				if(cpc_tty->tty) {
+				if (cpc_tty->tty) {
 					ld = tty_ldisc_ref(cpc_tty->tty);
 					ld = tty_ldisc_ref(cpc_tty->tty);
-					if(ld) {
+					if (ld) {
 						if (ld->receive_buf) {
 						if (ld->receive_buf) {
 							CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name);
 							CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name);
 							ld->receive_buf(cpc_tty->tty, (char *)(buf->data), &flags, buf->size);
 							ld->receive_buf(cpc_tty->tty, (char *)(buf->data), &flags, buf->size);
@@ -691,7 +689,7 @@ static void cpc_tty_rx_work(void * data)
 					}
 					}
 				}	
 				}	
 				cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
 				cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
-				kfree((unsigned char *)buf);
+				kfree(buf);
 				buf = cpc_tty->buf_rx.first;
 				buf = cpc_tty->buf_rx.first;
 				flg_rx = 1;
 				flg_rx = 1;
 			}
 			}
@@ -733,7 +731,7 @@ static void cpc_tty_rx_disc_frame(pc300ch_t *pc300chan)
 
 
 void cpc_tty_receive(pc300dev_t *pc300dev)
 void cpc_tty_receive(pc300dev_t *pc300dev)
 {
 {
-	st_cpc_tty_area    *cpc_tty; 
+	st_cpc_tty_area *cpc_tty; 
 	pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; 
 	pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; 
 	pc300_t *card = (pc300_t *)pc300chan->card; 
 	pc300_t *card = (pc300_t *)pc300chan->card; 
 	int ch = pc300chan->channel; 
 	int ch = pc300chan->channel; 
@@ -742,7 +740,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
 	int rx_len, rx_aux; 
 	int rx_len, rx_aux; 
 	volatile unsigned char status; 
 	volatile unsigned char status; 
 	unsigned short first_bd = pc300chan->rx_first_bd;
 	unsigned short first_bd = pc300chan->rx_first_bd;
-	st_cpc_rx_buf	*new=NULL;
+	st_cpc_rx_buf *new = NULL;
 	unsigned char dsr_rx;
 	unsigned char dsr_rx;
 
 
 	if (pc300dev->cpc_tty == NULL) { 
 	if (pc300dev->cpc_tty == NULL) { 
@@ -762,7 +760,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
 			if (status & DST_EOM) {
 			if (status & DST_EOM) {
 				break;
 				break;
 			}
 			}
-			ptdescr=(pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next));
+			ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next));
 		}
 		}
 			
 			
 		if (!rx_len) { 
 		if (!rx_len) { 
@@ -771,10 +769,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
 				cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), 
 				cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), 
 						RX_BD_ADDR(ch, pc300chan->rx_last_bd)); 
 						RX_BD_ADDR(ch, pc300chan->rx_last_bd)); 
 			}
 			}
-			if (new) {
-				kfree(new);
-				new = NULL;
-			}
+			kfree(new);
 			return; 
 			return; 
 		}
 		}
 		
 		
@@ -787,7 +782,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
 			continue;
 			continue;
 		} 
 		} 
 		
 		
-		new = (st_cpc_rx_buf *) kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
+		new = (st_cpc_rx_buf *)kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
 		if (new == 0) {
 		if (new == 0) {
 			cpc_tty_rx_disc_frame(pc300chan);
 			cpc_tty_rx_disc_frame(pc300chan);
 			continue;
 			continue;

+ 4 - 9
drivers/net/wan/sdla_chdlc.c

@@ -3664,15 +3664,10 @@ static void wanpipe_tty_close(struct tty_struct *tty, struct file * filp)
 		chdlc_disable_comm_shutdown(card);
 		chdlc_disable_comm_shutdown(card);
 		unlock_adapter_irq(&card->wandev.lock,&smp_flags);
 		unlock_adapter_irq(&card->wandev.lock,&smp_flags);
 
 
-		if (card->tty_buf){
-			kfree(card->tty_buf);
-			card->tty_buf=NULL;			
-		}
-
-		if (card->tty_rx){
-			kfree(card->tty_rx);
-			card->tty_rx=NULL;
-		}
+		kfree(card->tty_buf);
+		card->tty_buf = NULL;			
+		kfree(card->tty_rx);
+		card->tty_rx = NULL;
 	}
 	}
 	return;
 	return;
 }
 }

+ 6 - 14
drivers/net/wan/x25_asy.c

@@ -107,13 +107,9 @@ static struct x25_asy *x25_asy_alloc(void)
 static void x25_asy_free(struct x25_asy *sl)
 static void x25_asy_free(struct x25_asy *sl)
 {
 {
 	/* Free all X.25 frame buffers. */
 	/* Free all X.25 frame buffers. */
-	if (sl->rbuff)  {
-		kfree(sl->rbuff);
-	}
+	kfree(sl->rbuff);
 	sl->rbuff = NULL;
 	sl->rbuff = NULL;
-	if (sl->xbuff)  {
-		kfree(sl->xbuff);
-	}
+	kfree(sl->xbuff);
 	sl->xbuff = NULL;
 	sl->xbuff = NULL;
 
 
 	if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) {
 	if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) {
@@ -134,10 +130,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
 	{
 	{
 		printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n",
 		printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n",
 		       dev->name);
 		       dev->name);
-		if (xbuff != NULL)  
-			kfree(xbuff);
-		if (rbuff != NULL)  
-			kfree(rbuff);
+		kfree(xbuff);
+		kfree(rbuff);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
@@ -169,10 +163,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
 
 
 	spin_unlock_bh(&sl->lock);
 	spin_unlock_bh(&sl->lock);
 
 
-	if (xbuff != NULL) 
-		kfree(xbuff);
-	if (rbuff != NULL)
-		kfree(rbuff);
+	kfree(xbuff);
+	kfree(rbuff);
 	return 0;
 	return 0;
 }
 }
 
 

+ 1 - 1
drivers/parport/Kconfig

@@ -34,7 +34,7 @@ config PARPORT
 
 
 config PARPORT_PC
 config PARPORT_PC
 	tristate "PC-style hardware"
 	tristate "PC-style hardware"
-	depends on PARPORT && (!SPARC64 || PCI) && (!SPARC32 || BROKEN)
+	depends on PARPORT && (!SPARC64 || PCI) && !SPARC32
 	---help---
 	---help---
 	  You should say Y here if you have a PC-style parallel port. All
 	  You should say Y here if you have a PC-style parallel port. All
 	  IBM PC compatible computers and some Alphas have PC-style
 	  IBM PC compatible computers and some Alphas have PC-style

+ 24 - 10
drivers/parport/parport_pc.c

@@ -67,6 +67,10 @@
 
 
 #define PARPORT_PC_MAX_PORTS PARPORT_MAX
 #define PARPORT_PC_MAX_PORTS PARPORT_MAX
 
 
+#ifdef CONFIG_ISA_DMA_API
+#define HAS_DMA
+#endif
+
 /* ECR modes */
 /* ECR modes */
 #define ECR_SPP 00
 #define ECR_SPP 00
 #define ECR_PS2 01
 #define ECR_PS2 01
@@ -610,6 +614,7 @@ dump_parport_state ("leave fifo_write_block_pio", port);
 	return length - left;
 	return length - left;
 }
 }
 
 
+#ifdef HAS_DMA
 static size_t parport_pc_fifo_write_block_dma (struct parport *port,
 static size_t parport_pc_fifo_write_block_dma (struct parport *port,
 					       const void *buf, size_t length)
 					       const void *buf, size_t length)
 {
 {
@@ -732,6 +737,17 @@ dump_parport_state ("enter fifo_write_block_dma", port);
 dump_parport_state ("leave fifo_write_block_dma", port);
 dump_parport_state ("leave fifo_write_block_dma", port);
 	return length - left;
 	return length - left;
 }
 }
+#endif
+
+static inline size_t parport_pc_fifo_write_block(struct parport *port,
+					       const void *buf, size_t length)
+{
+#ifdef HAS_DMA
+	if (port->dma != PARPORT_DMA_NONE)
+		return parport_pc_fifo_write_block_dma (port, buf, length);
+#endif
+	return parport_pc_fifo_write_block_pio (port, buf, length);
+}
 
 
 /* Parallel Port FIFO mode (ECP chipsets) */
 /* Parallel Port FIFO mode (ECP chipsets) */
 static size_t parport_pc_compat_write_block_pio (struct parport *port,
 static size_t parport_pc_compat_write_block_pio (struct parport *port,
@@ -758,10 +774,7 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
 	port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
 	port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
 
 
 	/* Write the data to the FIFO. */
 	/* Write the data to the FIFO. */
-	if (port->dma != PARPORT_DMA_NONE)
-		written = parport_pc_fifo_write_block_dma (port, buf, length);
-	else
-		written = parport_pc_fifo_write_block_pio (port, buf, length);
+	written = parport_pc_fifo_write_block(port, buf, length);
 
 
 	/* Finish up. */
 	/* Finish up. */
 	/* For some hardware we don't want to touch the mode until
 	/* For some hardware we don't want to touch the mode until
@@ -856,10 +869,7 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
 	port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
 	port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
 
 
 	/* Write the data to the FIFO. */
 	/* Write the data to the FIFO. */
-	if (port->dma != PARPORT_DMA_NONE)
-		written = parport_pc_fifo_write_block_dma (port, buf, length);
-	else
-		written = parport_pc_fifo_write_block_pio (port, buf, length);
+	written = parport_pc_fifo_write_block(port, buf, length);
 
 
 	/* Finish up. */
 	/* Finish up. */
 	/* For some hardware we don't want to touch the mode until
 	/* For some hardware we don't want to touch the mode until
@@ -2285,6 +2295,7 @@ struct parport *parport_pc_probe_port (unsigned long int base,
 		}
 		}
 
 
 #ifdef CONFIG_PARPORT_PC_FIFO
 #ifdef CONFIG_PARPORT_PC_FIFO
+#ifdef HAS_DMA
 		if (p->dma != PARPORT_DMA_NONE) {
 		if (p->dma != PARPORT_DMA_NONE) {
 			if (request_dma (p->dma, p->name)) {
 			if (request_dma (p->dma, p->name)) {
 				printk (KERN_WARNING "%s: dma %d in use, "
 				printk (KERN_WARNING "%s: dma %d in use, "
@@ -2306,7 +2317,8 @@ struct parport *parport_pc_probe_port (unsigned long int base,
 				}
 				}
 			}
 			}
 		}
 		}
-#endif /* CONFIG_PARPORT_PC_FIFO */
+#endif
+#endif
 	}
 	}
 
 
 	/* Done probing.  Now put the port into a sensible start-up state. */
 	/* Done probing.  Now put the port into a sensible start-up state. */
@@ -2367,11 +2379,13 @@ void parport_pc_unregister_port (struct parport *p)
 	if (p->modes & PARPORT_MODE_ECP)
 	if (p->modes & PARPORT_MODE_ECP)
 		release_region(p->base_hi, 3);
 		release_region(p->base_hi, 3);
 #ifdef CONFIG_PARPORT_PC_FIFO
 #ifdef CONFIG_PARPORT_PC_FIFO
+#ifdef HAS_DMA
 	if (priv->dma_buf)
 	if (priv->dma_buf)
 		pci_free_consistent(priv->dev, PAGE_SIZE,
 		pci_free_consistent(priv->dev, PAGE_SIZE,
 				    priv->dma_buf,
 				    priv->dma_buf,
 				    priv->dma_handle);
 				    priv->dma_handle);
-#endif /* CONFIG_PARPORT_PC_FIFO */
+#endif
+#endif
 	kfree (p->private_data);
 	kfree (p->private_data);
 	parport_put_port(p);
 	parport_put_port(p);
 	kfree (ops); /* hope no-one cached it */
 	kfree (ops); /* hope no-one cached it */

+ 1 - 1
drivers/pci/hotplug/ibmphp.h

@@ -196,7 +196,7 @@ struct ebda_hpc_bus {
 
 
 
 
 /********************************************************************
 /********************************************************************
-*   THREE TYPE OF HOT PLUG CONTROLER                                *
+*   THREE TYPE OF HOT PLUG CONTROLLER                                *
 ********************************************************************/
 ********************************************************************/
 
 
 struct isa_ctlr_access {
 struct isa_ctlr_access {

+ 3 - 3
drivers/pci/hotplug/ibmphp_hpc.c

@@ -64,7 +64,7 @@ static int to_debug = FALSE;
 #define WPG_I2C_OR		0x2000	// I2C OR operation
 #define WPG_I2C_OR		0x2000	// I2C OR operation
 
 
 //----------------------------------------------------------------------------
 //----------------------------------------------------------------------------
-// Command set for I2C Master Operation Setup Regisetr
+// Command set for I2C Master Operation Setup Register
 //----------------------------------------------------------------------------
 //----------------------------------------------------------------------------
 #define WPG_READATADDR_MASK	0x00010000	// read,bytes,I2C shifted,index
 #define WPG_READATADDR_MASK	0x00010000	// read,bytes,I2C shifted,index
 #define WPG_WRITEATADDR_MASK	0x40010000	// write,bytes,I2C shifted,index
 #define WPG_WRITEATADDR_MASK	0x40010000	// write,bytes,I2C shifted,index
@@ -835,7 +835,7 @@ static void poll_hpc (void)
 		if (ibmphp_shutdown) 
 		if (ibmphp_shutdown) 
 			break;
 			break;
 		
 		
-		/* try to get the lock to do some kind of harware access */
+		/* try to get the lock to do some kind of hardware access */
 		down (&semOperations);
 		down (&semOperations);
 
 
 		switch (poll_state) {
 		switch (poll_state) {
@@ -906,7 +906,7 @@ static void poll_hpc (void)
 				poll_state = POLL_LATCH_REGISTER;
 				poll_state = POLL_LATCH_REGISTER;
 			break;
 			break;
 		}	
 		}	
-		/* give up the harware semaphore */
+		/* give up the hardware semaphore */
 		up (&semOperations);
 		up (&semOperations);
 		/* sleep for a short time just for good measure */
 		/* sleep for a short time just for good measure */
 		msleep(100);
 		msleep(100);

+ 5 - 2
drivers/pci/hotplug/ibmphp_pci.c

@@ -1308,10 +1308,10 @@ static int unconfigure_boot_device (u8 busno, u8 device, u8 function)
 			/* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */
 			/* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */
 		} else {
 		} else {
 			/* This is Memory */
 			/* This is Memory */
-			start_address &= PCI_BASE_ADDRESS_MEM_MASK;
 			if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) {
 			if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) {
 				/* pfmem */
 				/* pfmem */
 				debug ("start address of pfmem is %x\n", start_address);
 				debug ("start address of pfmem is %x\n", start_address);
+				start_address &= PCI_BASE_ADDRESS_MEM_MASK;
 
 
 				if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) {
 				if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) {
 					err ("cannot find corresponding PFMEM resource to remove\n");
 					err ("cannot find corresponding PFMEM resource to remove\n");
@@ -1325,6 +1325,8 @@ static int unconfigure_boot_device (u8 busno, u8 device, u8 function)
 			} else {
 			} else {
 				/* regular memory */
 				/* regular memory */
 				debug ("start address of mem is %x\n", start_address);
 				debug ("start address of mem is %x\n", start_address);
+				start_address &= PCI_BASE_ADDRESS_MEM_MASK;
+
 				if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) {
 				if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) {
 					err ("cannot find corresponding MEM resource to remove\n");
 					err ("cannot find corresponding MEM resource to remove\n");
 					return -EIO;
 					return -EIO;
@@ -1422,9 +1424,9 @@ static int unconfigure_boot_bridge (u8 busno, u8 device, u8 function)
 			/* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */
 			/* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */
 		} else {
 		} else {
 			/* This is Memory */
 			/* This is Memory */
-			start_address &= PCI_BASE_ADDRESS_MEM_MASK;
 			if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) {
 			if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) {
 				/* pfmem */
 				/* pfmem */
+				start_address &= PCI_BASE_ADDRESS_MEM_MASK;
 				if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) {
 				if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) {
 					err ("cannot find corresponding PFMEM resource to remove\n");
 					err ("cannot find corresponding PFMEM resource to remove\n");
 					return -EINVAL;
 					return -EINVAL;
@@ -1436,6 +1438,7 @@ static int unconfigure_boot_bridge (u8 busno, u8 device, u8 function)
 				}
 				}
 			} else {
 			} else {
 				/* regular memory */
 				/* regular memory */
+				start_address &= PCI_BASE_ADDRESS_MEM_MASK;
 				if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) {
 				if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) {
 					err ("cannot find corresponding MEM resource to remove\n");
 					err ("cannot find corresponding MEM resource to remove\n");
 					return -EINVAL;
 					return -EINVAL;

+ 1 - 1
drivers/pci/hotplug/pci_hotplug.h

@@ -150,7 +150,7 @@ struct hotplug_slot_info {
  * @name: the name of the slot being registered.  This string must
  * @name: the name of the slot being registered.  This string must
  * be unique amoung slots registered on this system.
  * be unique amoung slots registered on this system.
  * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
  * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
- * @info: pointer to the &struct hotplug_slot_info for the inital values for
+ * @info: pointer to the &struct hotplug_slot_info for the initial values for
  * this slot.
  * this slot.
  * @release: called during pci_hp_deregister to free memory allocated in a
  * @release: called during pci_hp_deregister to free memory allocated in a
  * hotplug_slot structure.
  * hotplug_slot structure.

+ 18 - 5
drivers/pci/hotplug/pciehp_core.c

@@ -90,6 +90,22 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
   	.get_cur_bus_speed =	get_cur_bus_speed,
   	.get_cur_bus_speed =	get_cur_bus_speed,
 };
 };
 
 
+/**
+ * release_slot - free up the memory used by a slot
+ * @hotplug_slot: slot to free
+ */
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+	struct slot *slot = hotplug_slot->private;
+
+	dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
+
+	kfree(slot->hotplug_slot->info);
+	kfree(slot->hotplug_slot->name);
+	kfree(slot->hotplug_slot);
+	kfree(slot);
+}
+
 static int init_slots(struct controller *ctrl)
 static int init_slots(struct controller *ctrl)
 {
 {
 	struct slot *new_slot;
 	struct slot *new_slot;
@@ -139,7 +155,8 @@ static int init_slots(struct controller *ctrl)
 
 
 		/* register this slot with the hotplug pci core */
 		/* register this slot with the hotplug pci core */
 		new_slot->hotplug_slot->private = new_slot;
 		new_slot->hotplug_slot->private = new_slot;
-		make_slot_name (new_slot->hotplug_slot->name, SLOT_NAME_SIZE, new_slot);
+		new_slot->hotplug_slot->release = &release_slot;
+		make_slot_name(new_slot->hotplug_slot->name, SLOT_NAME_SIZE, new_slot);
 		new_slot->hotplug_slot->ops = &pciehp_hotplug_slot_ops;
 		new_slot->hotplug_slot->ops = &pciehp_hotplug_slot_ops;
 
 
 		new_slot->hpc_ops->get_power_status(new_slot, &(new_slot->hotplug_slot->info->power_status));
 		new_slot->hpc_ops->get_power_status(new_slot, &(new_slot->hotplug_slot->info->power_status));
@@ -188,10 +205,6 @@ static int cleanup_slots (struct controller * ctrl)
 	while (old_slot) {
 	while (old_slot) {
 		next_slot = old_slot->next;
 		next_slot = old_slot->next;
 		pci_hp_deregister (old_slot->hotplug_slot);
 		pci_hp_deregister (old_slot->hotplug_slot);
-		kfree(old_slot->hotplug_slot->info);
-		kfree(old_slot->hotplug_slot->name);
-		kfree(old_slot->hotplug_slot);
-		kfree(old_slot);
 		old_slot = next_slot;
 		old_slot = next_slot;
 	}
 	}
 
 

+ 1 - 1
drivers/pci/hotplug/pcihp_skeleton.c

@@ -297,7 +297,7 @@ static int __init init_slots(void)
 		hotplug_slot->ops = &skel_hotplug_slot_ops;
 		hotplug_slot->ops = &skel_hotplug_slot_ops;
 		
 		
 		/*
 		/*
-		 * Initilize the slot info structure with some known
+		 * Initialize the slot info structure with some known
 		 * good values.
 		 * good values.
 		 */
 		 */
 		info->power_status = get_power_status(slot);
 		info->power_status = get_power_status(slot);

+ 3 - 3
drivers/pci/msi.c

@@ -522,7 +522,7 @@ void pci_scan_msi_device(struct pci_dev *dev)
  * msi_capability_init - configure device's MSI capability structure
  * msi_capability_init - configure device's MSI capability structure
  * @dev: pointer to the pci_dev data structure of MSI device function
  * @dev: pointer to the pci_dev data structure of MSI device function
  *
  *
- * Setup the MSI capability structure of device funtion with a single
+ * Setup the MSI capability structure of device function with a single
  * MSI vector, regardless of device function is capable of handling
  * MSI vector, regardless of device function is capable of handling
  * multiple messages. A return of zero indicates the successful setup
  * multiple messages. A return of zero indicates the successful setup
  * of an entry zero with the new MSI vector or non-zero for otherwise.
  * of an entry zero with the new MSI vector or non-zero for otherwise.
@@ -599,7 +599,7 @@ static int msi_capability_init(struct pci_dev *dev)
  * msix_capability_init - configure device's MSI-X capability
  * msix_capability_init - configure device's MSI-X capability
  * @dev: pointer to the pci_dev data structure of MSI-X device function
  * @dev: pointer to the pci_dev data structure of MSI-X device function
  *
  *
- * Setup the MSI-X capability structure of device funtion with a
+ * Setup the MSI-X capability structure of device function with a
  * single MSI-X vector. A return of zero indicates the successful setup of
  * single MSI-X vector. A return of zero indicates the successful setup of
  * requested MSI-X entries with allocated vectors or non-zero for otherwise.
  * requested MSI-X entries with allocated vectors or non-zero for otherwise.
  **/
  **/
@@ -1074,7 +1074,7 @@ void pci_disable_msix(struct pci_dev* dev)
  * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
  * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
  * @dev: pointer to the pci_dev data structure of MSI(X) device function
  * @dev: pointer to the pci_dev data structure of MSI(X) device function
  *
  *
- * Being called during hotplug remove, from which the device funciton
+ * Being called during hotplug remove, from which the device function
  * is hot-removed. All previous assigned MSI/MSI-X vectors, if
  * is hot-removed. All previous assigned MSI/MSI-X vectors, if
  * allocated for this device function, are reclaimed to unused state,
  * allocated for this device function, are reclaimed to unused state,
  * which may be used later on.
  * which may be used later on.

+ 1 - 1
drivers/pci/pci-acpi.c

@@ -19,7 +19,7 @@
 
 
 static u32 ctrlset_buf[3] = {0, 0, 0};
 static u32 ctrlset_buf[3] = {0, 0, 0};
 static u32 global_ctrlsets = 0;
 static u32 global_ctrlsets = 0;
-u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
+static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
 
 
 static acpi_status  
 static acpi_status  
 acpi_query_osc (
 acpi_query_osc (

+ 10 - 1
drivers/pci/pci-driver.c

@@ -318,6 +318,14 @@ static int pci_device_resume(struct device * dev)
 	return 0;
 	return 0;
 }
 }
 
 
+static void pci_device_shutdown(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct pci_driver *drv = pci_dev->driver;
+
+	if (drv && drv->shutdown)
+		drv->shutdown(pci_dev);
+}
 
 
 #define kobj_to_pci_driver(obj) container_of(obj, struct device_driver, kobj)
 #define kobj_to_pci_driver(obj) container_of(obj, struct device_driver, kobj)
 #define attr_to_driver_attribute(obj) container_of(obj, struct driver_attribute, attr)
 #define attr_to_driver_attribute(obj) container_of(obj, struct driver_attribute, attr)
@@ -373,7 +381,7 @@ pci_populate_driver_dir(struct pci_driver *drv)
  * 
  * 
  * Adds the driver structure to the list of registered drivers.
  * Adds the driver structure to the list of registered drivers.
  * Returns a negative value on error, otherwise 0. 
  * Returns a negative value on error, otherwise 0. 
- * If no error occured, the driver remains registered even if 
+ * If no error occurred, the driver remains registered even if 
  * no device was claimed during registration.
  * no device was claimed during registration.
  */
  */
 int pci_register_driver(struct pci_driver *drv)
 int pci_register_driver(struct pci_driver *drv)
@@ -385,6 +393,7 @@ int pci_register_driver(struct pci_driver *drv)
 	drv->driver.bus = &pci_bus_type;
 	drv->driver.bus = &pci_bus_type;
 	drv->driver.probe = pci_device_probe;
 	drv->driver.probe = pci_device_probe;
 	drv->driver.remove = pci_device_remove;
 	drv->driver.remove = pci_device_remove;
+	drv->driver.shutdown = pci_device_shutdown,
 	drv->driver.owner = drv->owner;
 	drv->driver.owner = drv->owner;
 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
 	pci_init_dynids(&drv->dynids);
 	pci_init_dynids(&drv->dynids);

+ 58 - 24
drivers/pci/pci-sysfs.c

@@ -91,6 +91,7 @@ pci_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
 	struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
 	struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
 	unsigned int size = 64;
 	unsigned int size = 64;
 	loff_t init_off = off;
 	loff_t init_off = off;
+	u8 *data = (u8*) buf;
 
 
 	/* Several chips lock up trying to read undefined config space */
 	/* Several chips lock up trying to read undefined config space */
 	if (capable(CAP_SYS_ADMIN)) {
 	if (capable(CAP_SYS_ADMIN)) {
@@ -108,30 +109,47 @@ pci_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
 		size = count;
 		size = count;
 	}
 	}
 
 
-	while (off & 3) {
-		unsigned char val;
+	if ((off & 1) && size) {
+		u8 val;
 		pci_read_config_byte(dev, off, &val);
 		pci_read_config_byte(dev, off, &val);
-		buf[off - init_off] = val;
+		data[off - init_off] = val;
 		off++;
 		off++;
-		if (--size == 0)
-			break;
+		size--;
+	}
+
+	if ((off & 3) && size > 2) {
+		u16 val;
+		pci_read_config_word(dev, off, &val);
+		data[off - init_off] = val & 0xff;
+		data[off - init_off + 1] = (val >> 8) & 0xff;
+		off += 2;
+		size -= 2;
 	}
 	}
 
 
 	while (size > 3) {
 	while (size > 3) {
-		unsigned int val;
+		u32 val;
 		pci_read_config_dword(dev, off, &val);
 		pci_read_config_dword(dev, off, &val);
-		buf[off - init_off] = val & 0xff;
-		buf[off - init_off + 1] = (val >> 8) & 0xff;
-		buf[off - init_off + 2] = (val >> 16) & 0xff;
-		buf[off - init_off + 3] = (val >> 24) & 0xff;
+		data[off - init_off] = val & 0xff;
+		data[off - init_off + 1] = (val >> 8) & 0xff;
+		data[off - init_off + 2] = (val >> 16) & 0xff;
+		data[off - init_off + 3] = (val >> 24) & 0xff;
 		off += 4;
 		off += 4;
 		size -= 4;
 		size -= 4;
 	}
 	}
 
 
-	while (size > 0) {
-		unsigned char val;
+	if (size >= 2) {
+		u16 val;
+		pci_read_config_word(dev, off, &val);
+		data[off - init_off] = val & 0xff;
+		data[off - init_off + 1] = (val >> 8) & 0xff;
+		off += 2;
+		size -= 2;
+	}
+
+	if (size > 0) {
+		u8 val;
 		pci_read_config_byte(dev, off, &val);
 		pci_read_config_byte(dev, off, &val);
-		buf[off - init_off] = val;
+		data[off - init_off] = val;
 		off++;
 		off++;
 		--size;
 		--size;
 	}
 	}
@@ -145,6 +163,7 @@ pci_write_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
 	struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
 	struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
 	unsigned int size = count;
 	unsigned int size = count;
 	loff_t init_off = off;
 	loff_t init_off = off;
+	u8 *data = (u8*) buf;
 
 
 	if (off > dev->cfg_size)
 	if (off > dev->cfg_size)
 		return 0;
 		return 0;
@@ -152,26 +171,41 @@ pci_write_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
 		size = dev->cfg_size - off;
 		size = dev->cfg_size - off;
 		count = size;
 		count = size;
 	}
 	}
-
-	while (off & 3) {
-		pci_write_config_byte(dev, off, buf[off - init_off]);
+	
+	if ((off & 1) && size) {
+		pci_write_config_byte(dev, off, data[off - init_off]);
 		off++;
 		off++;
-		if (--size == 0)
-			break;
+		size--;
 	}
 	}
+	
+	if ((off & 3) && size > 2) {
+		u16 val = data[off - init_off];
+		val |= (u16) data[off - init_off + 1] << 8;
+                pci_write_config_word(dev, off, val);
+                off += 2;
+                size -= 2;
+        }
 
 
 	while (size > 3) {
 	while (size > 3) {
-		unsigned int val = buf[off - init_off];
-		val |= (unsigned int) buf[off - init_off + 1] << 8;
-		val |= (unsigned int) buf[off - init_off + 2] << 16;
-		val |= (unsigned int) buf[off - init_off + 3] << 24;
+		u32 val = data[off - init_off];
+		val |= (u32) data[off - init_off + 1] << 8;
+		val |= (u32) data[off - init_off + 2] << 16;
+		val |= (u32) data[off - init_off + 3] << 24;
 		pci_write_config_dword(dev, off, val);
 		pci_write_config_dword(dev, off, val);
 		off += 4;
 		off += 4;
 		size -= 4;
 		size -= 4;
 	}
 	}
+	
+	if (size >= 2) {
+		u16 val = data[off - init_off];
+		val |= (u16) data[off - init_off + 1] << 8;
+		pci_write_config_word(dev, off, val);
+		off += 2;
+		size -= 2;
+	}
 
 
-	while (size > 0) {
-		pci_write_config_byte(dev, off, buf[off - init_off]);
+	if (size) {
+		pci_write_config_byte(dev, off, data[off - init_off]);
 		off++;
 		off++;
 		--size;
 		--size;
 	}
 	}

+ 4 - 16
drivers/pci/pci.c

@@ -16,6 +16,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <asm/dma.h>	/* isa_dma_bridge_buggy */
 #include <asm/dma.h>	/* isa_dma_bridge_buggy */
+#include "pci.h"
 
 
 
 
 /**
 /**
@@ -398,10 +399,10 @@ pci_enable_device(struct pci_dev *dev)
 {
 {
 	int err;
 	int err;
 
 
-	dev->is_enabled = 1;
 	if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1)))
 	if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1)))
 		return err;
 		return err;
 	pci_fixup_device(pci_fixup_enable, dev);
 	pci_fixup_device(pci_fixup_enable, dev);
+	dev->is_enabled = 1;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -427,16 +428,15 @@ pci_disable_device(struct pci_dev *dev)
 {
 {
 	u16 pci_command;
 	u16 pci_command;
 	
 	
-	dev->is_enabled = 0;
-	dev->is_busmaster = 0;
-
 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
 	if (pci_command & PCI_COMMAND_MASTER) {
 	if (pci_command & PCI_COMMAND_MASTER) {
 		pci_command &= ~PCI_COMMAND_MASTER;
 		pci_command &= ~PCI_COMMAND_MASTER;
 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
 	}
 	}
+	dev->is_busmaster = 0;
 
 
 	pcibios_disable_device(dev);
 	pcibios_disable_device(dev);
+	dev->is_enabled = 0;
 }
 }
 
 
 /**
 /**
@@ -748,17 +748,6 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask)
 	return 0;
 	return 0;
 }
 }
     
     
-int
-pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
-	if (!pci_dac_dma_supported(dev, mask))
-		return -EIO;
-
-	dev->dma_mask = mask;
-
-	return 0;
-}
-
 int
 int
 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
 {
 {
@@ -821,7 +810,6 @@ EXPORT_SYMBOL(pci_set_master);
 EXPORT_SYMBOL(pci_set_mwi);
 EXPORT_SYMBOL(pci_set_mwi);
 EXPORT_SYMBOL(pci_clear_mwi);
 EXPORT_SYMBOL(pci_clear_mwi);
 EXPORT_SYMBOL(pci_set_dma_mask);
 EXPORT_SYMBOL(pci_set_dma_mask);
-EXPORT_SYMBOL(pci_dac_set_dma_mask);
 EXPORT_SYMBOL(pci_set_consistent_dma_mask);
 EXPORT_SYMBOL(pci_set_consistent_dma_mask);
 EXPORT_SYMBOL(pci_assign_resource);
 EXPORT_SYMBOL(pci_assign_resource);
 EXPORT_SYMBOL(pci_find_parent_resource);
 EXPORT_SYMBOL(pci_find_parent_resource);

+ 1 - 0
drivers/pci/probe.c

@@ -9,6 +9,7 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
+#include "pci.h"
 
 
 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
 #define CARDBUS_RESERVE_BUSNR	3
 #define CARDBUS_RESERVE_BUSNR	3

+ 1 - 0
drivers/pci/proc.c

@@ -15,6 +15,7 @@
 
 
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
 #include <asm/byteorder.h>
+#include "pci.h"
 
 
 static int proc_initialized;	/* = 0 */
 static int proc_initialized;	/* = 0 */
 
 

+ 2 - 0
drivers/pci/quirks.c

@@ -18,6 +18,7 @@
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
+#include "pci.h"
 
 
 /* Deal with broken BIOS'es that neglect to enable passive release,
 /* Deal with broken BIOS'es that neglect to enable passive release,
    which can cause problems in combination with the 82441FX/PPro MTRRs */
    which can cause problems in combination with the 82441FX/PPro MTRRs */
@@ -328,6 +329,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801CA_12,
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_0,		quirk_ich4_lpc_acpi );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_0,		quirk_ich4_lpc_acpi );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_12,	quirk_ich4_lpc_acpi );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_12,	quirk_ich4_lpc_acpi );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801EB_0,		quirk_ich4_lpc_acpi );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801EB_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_ESB_1,		quirk_ich4_lpc_acpi );
 
 
 /*
 /*
  * VIA ACPI: One IO region pointed to by longword at
  * VIA ACPI: One IO region pointed to by longword at

+ 6 - 6
drivers/scsi/Kconfig

@@ -260,7 +260,7 @@ config SCSI_3W_9XXX
 
 
 config SCSI_7000FASST
 config SCSI_7000FASST
 	tristate "7000FASST SCSI support"
 	tristate "7000FASST SCSI support"
-	depends on ISA && SCSI
+	depends on ISA && SCSI && ISA_DMA_API
 	help
 	help
 	  This driver supports the Western Digital 7000 SCSI host adapter
 	  This driver supports the Western Digital 7000 SCSI host adapter
 	  family.  Some information is in the source:
 	  family.  Some information is in the source:
@@ -295,7 +295,7 @@ config SCSI_AHA152X
 
 
 config SCSI_AHA1542
 config SCSI_AHA1542
 	tristate "Adaptec AHA1542 support"
 	tristate "Adaptec AHA1542 support"
-	depends on ISA && SCSI
+	depends on ISA && SCSI && ISA_DMA_API
 	---help---
 	---help---
 	  This is support for a SCSI host adapter.  It is explained in section
 	  This is support for a SCSI host adapter.  It is explained in section
 	  3.4 of the SCSI-HOWTO, available from
 	  3.4 of the SCSI-HOWTO, available from
@@ -515,7 +515,7 @@ config SCSI_SATA_VITESSE
 
 
 config SCSI_BUSLOGIC
 config SCSI_BUSLOGIC
 	tristate "BusLogic SCSI support"
 	tristate "BusLogic SCSI support"
-	depends on (PCI || ISA || MCA) && SCSI && (BROKEN || !SPARC64)
+	depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API
 	---help---
 	---help---
 	  This is support for BusLogic MultiMaster and FlashPoint SCSI Host
 	  This is support for BusLogic MultiMaster and FlashPoint SCSI Host
 	  Adapters. Consult the SCSI-HOWTO, available from
 	  Adapters. Consult the SCSI-HOWTO, available from
@@ -571,7 +571,7 @@ config SCSI_DTC3280
 
 
 config SCSI_EATA
 config SCSI_EATA
 	tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
 	tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
-	depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
+	depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
 	---help---
 	---help---
 	  This driver supports all EATA/DMA-compliant SCSI host adapters.  DPT
 	  This driver supports all EATA/DMA-compliant SCSI host adapters.  DPT
 	  ISA and all EISA I/O addresses are probed looking for the "EATA"
 	  ISA and all EISA I/O addresses are probed looking for the "EATA"
@@ -665,7 +665,7 @@ config SCSI_FD_MCS
 
 
 config SCSI_GDTH
 config SCSI_GDTH
 	tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
 	tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
-	depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
+	depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
 	---help---
 	---help---
 	  Formerly called GDT SCSI Disk Array Controller Support.
 	  Formerly called GDT SCSI Disk Array Controller Support.
 
 
@@ -1416,7 +1416,7 @@ config SCSI_T128
 
 
 config SCSI_U14_34F
 config SCSI_U14_34F
 	tristate "UltraStor 14F/34F support"
 	tristate "UltraStor 14F/34F support"
-	depends on ISA && SCSI
+	depends on ISA && SCSI && ISA_DMA_API
 	---help---
 	---help---
 	  This is support for the UltraStor 14F and 34F SCSI-2 host adapters.
 	  This is support for the UltraStor 14F and 34F SCSI-2 host adapters.
 	  The source at <file:drivers/scsi/u14-34f.c> contains some
 	  The source at <file:drivers/scsi/u14-34f.c> contains some

+ 2 - 2
drivers/usb/core/message.c

@@ -431,7 +431,7 @@ nomem:
  * (2) error, where io->status is a negative errno value.  The number
  * (2) error, where io->status is a negative errno value.  The number
  *     of io->bytes transferred before the error is usually less
  *     of io->bytes transferred before the error is usually less
  *     than requested, and can be nonzero.
  *     than requested, and can be nonzero.
- * (3) cancelation, a type of error with status -ECONNRESET that
+ * (3) cancellation, a type of error with status -ECONNRESET that
  *     is initiated by usb_sg_cancel().
  *     is initiated by usb_sg_cancel().
  *
  *
  * When this function returns, all memory allocated through usb_sg_init() or
  * When this function returns, all memory allocated through usb_sg_init() or
@@ -1282,7 +1282,7 @@ static void release_interface(struct device *dev)
  * bus rwsem; usb device driver probe() methods cannot use this routine.
  * bus rwsem; usb device driver probe() methods cannot use this routine.
  *
  *
  * Returns zero on success, or else the status code returned by the
  * Returns zero on success, or else the status code returned by the
- * underlying call that failed.  On succesful completion, each interface
+ * underlying call that failed.  On successful completion, each interface
  * in the original device configuration has been destroyed, and each one
  * in the original device configuration has been destroyed, and each one
  * in the new configuration has been probed by all relevant usb device
  * in the new configuration has been probed by all relevant usb device
  * drivers currently known to the kernel.
  * drivers currently known to the kernel.

+ 3 - 3
drivers/usb/core/urb.c

@@ -121,7 +121,7 @@ struct urb * usb_get_urb(struct urb *urb)
  * describing that request to the USB subsystem.  Request completion will
  * describing that request to the USB subsystem.  Request completion will
  * be indicated later, asynchronously, by calling the completion handler.
  * be indicated later, asynchronously, by calling the completion handler.
  * The three types of completion are success, error, and unlink
  * The three types of completion are success, error, and unlink
- * (a software-induced fault, also called "request cancelation").  
+ * (a software-induced fault, also called "request cancellation").  
  *
  *
  * URBs may be submitted in interrupt context.
  * URBs may be submitted in interrupt context.
  *
  *
@@ -170,7 +170,7 @@ struct urb * usb_get_urb(struct urb *urb)
  * As of Linux 2.6, all USB endpoint transfer queues support depths greater
  * As of Linux 2.6, all USB endpoint transfer queues support depths greater
  * than one.  This was previously a HCD-specific behavior, except for ISO
  * than one.  This was previously a HCD-specific behavior, except for ISO
  * transfers.  Non-isochronous endpoint queues are inactive during cleanup
  * transfers.  Non-isochronous endpoint queues are inactive during cleanup
- * after faults (transfer errors or cancelation).
+ * after faults (transfer errors or cancellation).
  *
  *
  * Reserved Bandwidth Transfers:
  * Reserved Bandwidth Transfers:
  *
  *
@@ -395,7 +395,7 @@ int usb_submit_urb(struct urb *urb, int mem_flags)
  *
  *
  * This routine cancels an in-progress request.  URBs complete only
  * This routine cancels an in-progress request.  URBs complete only
  * once per submission, and may be canceled only once per submission.
  * once per submission, and may be canceled only once per submission.
- * Successful cancelation means the requests's completion handler will
+ * Successful cancellation means the requests's completion handler will
  * be called with a status code indicating that the request has been
  * be called with a status code indicating that the request has been
  * canceled (rather than any other code) and will quickly be removed
  * canceled (rather than any other code) and will quickly be removed
  * from host controller data structures.
  * from host controller data structures.

+ 1 - 1
drivers/usb/gadget/ether.c

@@ -569,7 +569,7 @@ static const struct usb_cdc_ether_desc ether_desc = {
 
 
 /* include the status endpoint if we can, even where it's optional.
 /* include the status endpoint if we can, even where it's optional.
  * use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
  * use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
- * packet, to simplify cancelation; and a big transfer interval, to
+ * packet, to simplify cancellation; and a big transfer interval, to
  * waste less bandwidth.
  * waste less bandwidth.
  *
  *
  * some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
  * some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even

+ 1 - 1
drivers/usb/gadget/inode.c

@@ -275,7 +275,7 @@ static const char *CHIP;
  *
  *
  * After opening, configure non-control endpoints.  Then use normal
  * After opening, configure non-control endpoints.  Then use normal
  * stream read() and write() requests; and maybe ioctl() to get more
  * stream read() and write() requests; and maybe ioctl() to get more
- * precise FIFO status when recovering from cancelation.
+ * precise FIFO status when recovering from cancellation.
  */
  */
 
 
 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
 static void epio_complete (struct usb_ep *ep, struct usb_request *req)

+ 1 - 1
drivers/usb/gadget/lh7a40x_udc.c

@@ -705,7 +705,7 @@ void nuke(struct lh7a40x_ep *ep, int status)
 		done(ep, req, status);
 		done(ep, req, status);
 	}
 	}
 
 
-	/* Disable IRQ if EP is enabled (has decriptor) */
+	/* Disable IRQ if EP is enabled (has descriptor) */
 	if (ep->desc)
 	if (ep->desc)
 		pio_irq_disable(ep_index(ep));
 		pio_irq_disable(ep_index(ep));
 }
 }

+ 1 - 1
drivers/usb/gadget/serial.c

@@ -240,7 +240,7 @@ struct gs_dev {
 	struct usb_ep		*dev_notify_ep;	/* address of notify endpoint */
 	struct usb_ep		*dev_notify_ep;	/* address of notify endpoint */
 	struct usb_ep		*dev_in_ep;	/* address of in endpoint */
 	struct usb_ep		*dev_in_ep;	/* address of in endpoint */
 	struct usb_ep		*dev_out_ep;	/* address of out endpoint */
 	struct usb_ep		*dev_out_ep;	/* address of out endpoint */
-	struct usb_endpoint_descriptor		/* desciptor of notify ep */
+	struct usb_endpoint_descriptor		/* descriptor of notify ep */
 				*dev_notify_ep_desc;
 				*dev_notify_ep_desc;
 	struct usb_endpoint_descriptor		/* descriptor of in endpoint */
 	struct usb_endpoint_descriptor		/* descriptor of in endpoint */
 				*dev_in_ep_desc;
 				*dev_in_ep_desc;

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác