Przeglądaj źródła

Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6: (237 commits)
  Staging: android: binder: fix build errors
  Staging: android: add lowmemorykiller driver
  Staging: android: remove dummy android.c driver
  Staging: android: timed_gpio: Rename android_timed_gpio to timed_gpio
  Staging: android: add timed_gpio driver
  Staging: android: add ram_console driver
  Staging: android: add logging driver
  staging: android: binder: Fix use of euid
  Staging: android: binder: Fix gcc warnings about improper format specifiers for size_t in printk
  Staging: android: add binder driver
  Staging: add android framework
  Staging: epl: fix netdev->priv b0rkage
  Staging: epl: hr timers all run in hard irq context now
  Staging: epl: run Lindent on *.c files
  Staging: epl: run Lindent on *.h files
  Staging: epl: run Lindent on all user/*.h files
  Staging: epl: run Lindent on all kernel/*.h files
  Staging: add epl stack
  Staging: frontier: fix compiler warnings
  Staging: frontier: remove unused alphatrack_sysfs.c file
  ...
Linus Torvalds 16 lat temu
rodzic
commit
ce519e2327
100 zmienionych plików z 31136 dodań i 587 usunięć
  1. 2 0
      arch/arm/Kconfig
  2. 2 0
      arch/cris/Kconfig
  3. 2 0
      arch/h8300/Kconfig
  4. 32 0
      drivers/staging/Kconfig
  5. 16 0
      drivers/staging/Makefile
  6. 5 0
      drivers/staging/agnx/Kconfig
  7. 8 0
      drivers/staging/agnx/Makefile
  8. 22 0
      drivers/staging/agnx/TODO
  9. 154 0
      drivers/staging/agnx/agnx.h
  10. 418 0
      drivers/staging/agnx/debug.h
  11. 644 0
      drivers/staging/agnx/pci.c
  12. 960 0
      drivers/staging/agnx/phy.c
  13. 409 0
      drivers/staging/agnx/phy.h
  14. 894 0
      drivers/staging/agnx/rf.c
  15. 219 0
      drivers/staging/agnx/sta.c
  16. 222 0
      drivers/staging/agnx/sta.h
  17. 168 0
      drivers/staging/agnx/table.c
  18. 10 0
      drivers/staging/agnx/table.h
  19. 819 0
      drivers/staging/agnx/xmit.c
  20. 250 0
      drivers/staging/agnx/xmit.h
  21. 10 0
      drivers/staging/altpciechdma/Kconfig
  22. 2 0
      drivers/staging/altpciechdma/Makefile
  23. 15 0
      drivers/staging/altpciechdma/TODO
  24. 1184 0
      drivers/staging/altpciechdma/altpciechdma.c
  25. 86 0
      drivers/staging/android/Kconfig
  26. 5 0
      drivers/staging/android/Makefile
  27. 10 0
      drivers/staging/android/TODO
  28. 3503 0
      drivers/staging/android/binder.c
  29. 330 0
      drivers/staging/android/binder.h
  30. 607 0
      drivers/staging/android/logger.c
  31. 48 0
      drivers/staging/android/logger.h
  32. 119 0
      drivers/staging/android/lowmemorykiller.c
  33. 395 0
      drivers/staging/android/ram_console.c
  34. 177 0
      drivers/staging/android/timed_gpio.c
  35. 31 0
      drivers/staging/android/timed_gpio.h
  36. 6 0
      drivers/staging/asus_oled/Kconfig
  37. 1 0
      drivers/staging/asus_oled/Makefile
  38. 156 0
      drivers/staging/asus_oled/README
  39. 10 0
      drivers/staging/asus_oled/TODO
  40. 745 0
      drivers/staging/asus_oled/asus_oled.c
  41. 33 0
      drivers/staging/asus_oled/linux.txt
  42. 18 0
      drivers/staging/asus_oled/linux_f.txt
  43. 33 0
      drivers/staging/asus_oled/linux_fr.txt
  44. 33 0
      drivers/staging/asus_oled/tux.txt
  45. 33 0
      drivers/staging/asus_oled/tux_r.txt
  46. 33 0
      drivers/staging/asus_oled/tux_r2.txt
  47. 33 0
      drivers/staging/asus_oled/zig.txt
  48. 1 1
      drivers/staging/at76_usb/Kconfig
  49. 248 417
      drivers/staging/at76_usb/at76_usb.c
  50. 58 169
      drivers/staging/at76_usb/at76_usb.h
  51. 7 0
      drivers/staging/benet/Kconfig
  52. 6 0
      drivers/staging/benet/MAINTAINERS
  53. 14 0
      drivers/staging/benet/Makefile
  54. 6 0
      drivers/staging/benet/TODO
  55. 82 0
      drivers/staging/benet/asyncmesg.h
  56. 134 0
      drivers/staging/benet/be_cm.h
  57. 53 0
      drivers/staging/benet/be_common.h
  58. 348 0
      drivers/staging/benet/be_ethtool.c
  59. 1382 0
      drivers/staging/benet/be_init.c
  60. 863 0
      drivers/staging/benet/be_int.c
  61. 705 0
      drivers/staging/benet/be_netif.c
  62. 429 0
      drivers/staging/benet/benet.h
  63. 103 0
      drivers/staging/benet/bestatus.h
  64. 243 0
      drivers/staging/benet/cev.h
  65. 211 0
      drivers/staging/benet/cq.c
  66. 71 0
      drivers/staging/benet/descriptors.h
  67. 179 0
      drivers/staging/benet/doorbells.h
  68. 66 0
      drivers/staging/benet/ep.h
  69. 299 0
      drivers/staging/benet/eq.c
  70. 1273 0
      drivers/staging/benet/eth.c
  71. 55 0
      drivers/staging/benet/etx_context.h
  72. 565 0
      drivers/staging/benet/funcobj.c
  73. 222 0
      drivers/staging/benet/fwcmd_common.h
  74. 717 0
      drivers/staging/benet/fwcmd_common_bmap.h
  75. 280 0
      drivers/staging/benet/fwcmd_eth_bmap.h
  76. 54 0
      drivers/staging/benet/fwcmd_hdr_bmap.h
  77. 94 0
      drivers/staging/benet/fwcmd_mcc.h
  78. 244 0
      drivers/staging/benet/fwcmd_opcodes.h
  79. 29 0
      drivers/staging/benet/fwcmd_types_bmap.h
  80. 182 0
      drivers/staging/benet/host_struct.h
  81. 830 0
      drivers/staging/benet/hwlib.h
  82. 1364 0
      drivers/staging/benet/mpu.c
  83. 74 0
      drivers/staging/benet/mpu.h
  84. 46 0
      drivers/staging/benet/mpu_context.h
  85. 825 0
      drivers/staging/benet/pcicfg.h
  86. 111 0
      drivers/staging/benet/post_codes.h
  87. 68 0
      drivers/staging/benet/regmap.h
  88. 27 0
      drivers/staging/comedi/Kconfig
  89. 17 0
      drivers/staging/comedi/Makefile
  90. 14 0
      drivers/staging/comedi/TODO
  91. 916 0
      drivers/staging/comedi/comedi.h
  92. 597 0
      drivers/staging/comedi/comedi_compat32.c
  93. 58 0
      drivers/staging/comedi/comedi_compat32.h
  94. 2244 0
      drivers/staging/comedi/comedi_fops.c
  95. 8 0
      drivers/staging/comedi/comedi_fops.h
  96. 77 0
      drivers/staging/comedi/comedi_ksyms.c
  97. 150 0
      drivers/staging/comedi/comedi_rt.h
  98. 537 0
      drivers/staging/comedi/comedidev.h
  99. 192 0
      drivers/staging/comedi/comedilib.h
  100. 846 0
      drivers/staging/comedi/drivers.c

+ 2 - 0
arch/arm/Kconfig

@@ -1325,6 +1325,8 @@ source "drivers/regulator/Kconfig"
 
 source "drivers/uio/Kconfig"
 
+source "drivers/staging/Kconfig"
+
 endmenu
 
 source "fs/Kconfig"

+ 2 - 0
arch/cris/Kconfig

@@ -681,6 +681,8 @@ source "drivers/usb/Kconfig"
 
 source "drivers/uwb/Kconfig"
 
+source "drivers/staging/Kconfig"
+
 source "arch/cris/Kconfig.debug"
 
 source "security/Kconfig"

+ 2 - 0
arch/h8300/Kconfig

@@ -220,6 +220,8 @@ source "drivers/uwb/Kconfig"
 
 endmenu
 
+source "drivers/staging/Kconfig"
+
 source "fs/Kconfig"
 
 source "arch/h8300/Kconfig.debug"

+ 32 - 0
drivers/staging/Kconfig

@@ -49,6 +49,8 @@ source "drivers/staging/sxg/Kconfig"
 
 source "drivers/staging/me4000/Kconfig"
 
+source "drivers/staging/meilhaus/Kconfig"
+
 source "drivers/staging/go7007/Kconfig"
 
 source "drivers/staging/usbip/Kconfig"
@@ -63,5 +65,35 @@ source "drivers/staging/at76_usb/Kconfig"
 
 source "drivers/staging/poch/Kconfig"
 
+source "drivers/staging/agnx/Kconfig"
+
+source "drivers/staging/otus/Kconfig"
+
+source "drivers/staging/rt2860/Kconfig"
+
+source "drivers/staging/rt2870/Kconfig"
+
+source "drivers/staging/benet/Kconfig"
+
+source "drivers/staging/comedi/Kconfig"
+
+source "drivers/staging/asus_oled/Kconfig"
+
+source "drivers/staging/panel/Kconfig"
+
+source "drivers/staging/altpciechdma/Kconfig"
+
+source "drivers/staging/rtl8187se/Kconfig"
+
+source "drivers/staging/rspiusb/Kconfig"
+
+source "drivers/staging/mimio/Kconfig"
+
+source "drivers/staging/frontier/Kconfig"
+
+source "drivers/staging/epl/Kconfig"
+
+source "drivers/staging/android/Kconfig"
+
 endif # !STAGING_EXCLUDE_BUILD
 endif # STAGING

+ 16 - 0
drivers/staging/Makefile

@@ -7,6 +7,7 @@ obj-$(CONFIG_ET131X)		+= et131x/
 obj-$(CONFIG_SLICOSS)		+= slicoss/
 obj-$(CONFIG_SXG)		+= sxg/
 obj-$(CONFIG_ME4000)		+= me4000/
+obj-$(CONFIG_MEILHAUS)		+= meilhaus/
 obj-$(CONFIG_VIDEO_GO7007)	+= go7007/
 obj-$(CONFIG_USB_IP_COMMON)	+= usbip/
 obj-$(CONFIG_W35UND)		+= winbond/
@@ -14,3 +15,18 @@ obj-$(CONFIG_PRISM2_USB)	+= wlan-ng/
 obj-$(CONFIG_ECHO)		+= echo/
 obj-$(CONFIG_USB_ATMEL)		+= at76_usb/
 obj-$(CONFIG_POCH)		+= poch/
+obj-$(CONFIG_AGNX)		+= agnx/
+obj-$(CONFIG_OTUS)		+= otus/
+obj-$(CONFIG_RT2860)		+= rt2860/
+obj-$(CONFIG_RT2870)		+= rt2870/
+obj-$(CONFIG_BENET)		+= benet/
+obj-$(CONFIG_COMEDI)		+= comedi/
+obj-$(CONFIG_ASUS_OLED)		+= asus_oled/
+obj-$(CONFIG_PANEL)		+= panel/
+obj-$(CONFIG_ALTERA_PCIE_CHDMA)	+= altpciechdma/
+obj-$(CONFIG_RTL8187SE)		+= rtl8187se/
+obj-$(CONFIG_USB_RSPI)		+= rspiusb/
+obj-$(CONFIG_INPUT_MIMIO)	+= mimio/
+obj-$(CONFIG_TRANZPORT)		+= frontier/
+obj-$(CONFIG_EPL)		+= epl/
+obj-$(CONFIG_ANDROID)		+= android/

+ 5 - 0
drivers/staging/agnx/Kconfig

@@ -0,0 +1,5 @@
+config AGNX
+	tristate "Wireless Airgo AGNX support"
+	depends on WLAN_80211 && MAC80211
+	---help---
+	  This is an experimental driver for Airgo AGNX00 wireless chip.

+ 8 - 0
drivers/staging/agnx/Makefile

@@ -0,0 +1,8 @@
+obj-$(CONFIG_AGNX)	+= agnx.o
+
+agnx-objs :=	rf.o	\
+		pci.o	\
+		xmit.o	\
+		table.o	\
+		sta.o	\
+		phy.o

+ 22 - 0
drivers/staging/agnx/TODO

@@ -0,0 +1,22 @@
+2008 7/18
+
+The RX has can't receive OFDM packet correctly,
+Guess it need be do RX calibrate.
+
+
+before 2008 3/1
+
+1: The RX get too much "CRC failed" pakets, it make the card work very unstable,
+2: After running a while, the card will get infinity "RX Frame" and "Error"
+interrupt, not know the root reason so far, try to fix it
+3: Using two tx queue txd and txm but not only txm.
+4: Set the hdr correctly.
+5: Try to do recalibrate correvtly
+6: To support G mode in future
+7: Fix the mac address can't be readed and set correctly in BE machine.
+8: Fix include and exclude FCS in promisous mode and manage mode
+9: Using sta_notify to notice sta change
+10: Turn on frame reception at the end of start
+11: Guess the card support HW_MULTICAST_FILTER
+12: The tx process should be implment atomic?
+13: Using mac80211 function to control the TX&RX LED.

+ 154 - 0
drivers/staging/agnx/agnx.h

@@ -0,0 +1,154 @@
+#ifndef AGNX_H_
+#define AGNX_H_
+
+#include "xmit.h"
+
+#define PFX				KBUILD_MODNAME ": "
+
+static inline u32 agnx_read32(void __iomem *mem_region, u32 offset)
+{
+	return ioread32(mem_region + offset);
+}
+
+static inline void agnx_write32(void __iomem *mem_region, u32 offset, u32 val)
+{
+	iowrite32(val, mem_region + offset);
+}
+
+/* static const struct ieee80211_rate agnx_rates_80211b[] = { */
+/* 	{ .rate = 10, */
+/* 	  .val = 0xa, */
+/* 	  .flags = IEEE80211_RATE_CCK }, */
+/* 	{ .rate = 20, */
+/* 	  .val = 0x14, */
+/* 	  .hw_value = -0x14, */
+/* 	  .flags = IEEE80211_RATE_CCK_2 }, */
+/* 	{ .rate = 55, */
+/* 	  .val = 0x37, */
+/* 	  .val2 = -0x37, */
+/* 	  .flags = IEEE80211_RATE_CCK_2 }, */
+/* 	{ .rate = 110, */
+/* 	  .val = 0x6e, */
+/* 	  .val2 = -0x6e, */
+/* 	  .flags = IEEE80211_RATE_CCK_2 } */
+/* }; */
+
+
+static const struct ieee80211_rate agnx_rates_80211g[] = {
+/* 	{ .bitrate = 10, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
+/* 	{ .bitrate = 20, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
+/* 	{ .bitrate = 55, .hw_value = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
+/* 	{ .bitrate = 110, .hw_value = 4, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, */
+ 	{ .bitrate = 10, .hw_value = 1, },
+ 	{ .bitrate = 20, .hw_value = 2, },
+ 	{ .bitrate = 55, .hw_value = 3, },
+ 	{ .bitrate = 110, .hw_value = 4,},
+
+	{ .bitrate = 60, .hw_value = 0xB, },
+	{ .bitrate = 90, .hw_value = 0xF, },
+	{ .bitrate = 120, .hw_value = 0xA },
+	{ .bitrate = 180, .hw_value = 0xE, },
+//	{ .bitrate = 240, .hw_value = 0xd, },
+	{ .bitrate = 360, .hw_value = 0xD, },
+	{ .bitrate = 480, .hw_value = 0x8, },
+	{ .bitrate = 540, .hw_value = 0xC, },
+};
+
+static const struct ieee80211_channel agnx_channels[] = {
+	{ .center_freq = 2412, .hw_value = 1, },
+	{ .center_freq = 2417, .hw_value = 2, },
+	{ .center_freq = 2422, .hw_value = 3, },
+	{ .center_freq = 2427, .hw_value = 4, },
+	{ .center_freq = 2432, .hw_value = 5, },
+	{ .center_freq = 2437, .hw_value = 6, },
+	{ .center_freq = 2442, .hw_value = 7, },
+	{ .center_freq = 2447, .hw_value = 8, },
+	{ .center_freq = 2452, .hw_value = 9, },
+	{ .center_freq = 2457, .hw_value = 10, },
+	{ .center_freq = 2462, .hw_value = 11, },
+	{ .center_freq = 2467, .hw_value = 12, },
+	{ .center_freq = 2472, .hw_value = 13, },
+	{ .center_freq = 2484, .hw_value = 14, },
+};
+
+#define NUM_DRIVE_MODES	2
+/* Agnx operate mode */
+enum {
+	AGNX_MODE_80211A,
+	AGNX_MODE_80211A_OOB,
+	AGNX_MODE_80211A_MIMO,
+	AGNX_MODE_80211B_SHORT,
+	AGNX_MODE_80211B_LONG,
+	AGNX_MODE_80211G,
+	AGNX_MODE_80211G_OOB,
+	AGNX_MODE_80211G_MIMO,
+};
+
+enum {
+	AGNX_UNINIT,
+	AGNX_START,
+	AGNX_STOP,
+};
+
+struct agnx_priv {
+	struct pci_dev *pdev;
+	struct ieee80211_hw *hw;
+
+	spinlock_t lock;
+	struct mutex mutex;
+	unsigned int init_status;
+
+	void __iomem *ctl;	/* pointer to base ram address */
+	void __iomem *data;	/* pointer to mem region #2 */
+
+	struct agnx_ring rx;
+	struct agnx_ring txm;
+	struct agnx_ring txd;
+
+	/* Need volatile? */
+	u32 irq_status;
+
+        struct delayed_work periodic_work; /* Periodic tasks like recalibrate*/
+	struct ieee80211_low_level_stats stats;
+
+//        unsigned int phymode;
+	int mode;
+	int channel;
+	u8 bssid[ETH_ALEN];
+
+	u8 mac_addr[ETH_ALEN];
+	u8 revid;
+
+	struct ieee80211_supported_band band;
+};
+
+
+#define AGNX_CHAINS_MAX	6
+#define AGNX_PERIODIC_DELAY 60000 /* unit: ms */
+#define LOCAL_STAID	0	/* the station entry for the card itself */
+#define BSSID_STAID	1	/* the station entry for the bsssid AP */
+#define	spi_delay()	udelay(40)
+#define eeprom_delay()	udelay(40)
+#define	routing_table_delay()	udelay(50)
+
+/* PDU pool MEM region #2 */
+#define AGNX_PDUPOOL		0x40000	/* PDU pool */
+#define AGNX_PDUPOOL_SIZE	0x8000	/* PDU pool size*/
+#define AGNX_PDU_TX_WQ		0x41000	/* PDU list TX workqueue */
+#define AGNX_PDU_FREE		0x41800	/* Free Pool */
+#define PDU_SIZE		0x80	/* Free Pool node size */
+#define PDU_FREE_CNT		0xd0 /* Free pool node count */
+
+
+/* RF stuffs */
+extern void rf_chips_init(struct agnx_priv *priv);
+extern void spi_rc_write(void __iomem *mem_region, u32 chip_ids, u32 sw);
+extern void calibrate_oscillator(struct agnx_priv *priv);
+extern void do_calibration(struct agnx_priv *priv);
+extern void antenna_calibrate(struct agnx_priv *priv);
+extern void __antenna_calibrate(struct agnx_priv *priv);
+extern void print_offsets(struct agnx_priv *priv);
+extern int agnx_set_channel(struct agnx_priv *priv, unsigned int channel);
+
+
+#endif /* AGNX_H_ */

+ 418 - 0
drivers/staging/agnx/debug.h

@@ -0,0 +1,418 @@
+#ifndef AGNX_DEBUG_H_
+#define AGNX_DEBUG_H_
+
+#include "agnx.h"
+#include "phy.h"
+#include "sta.h"
+#include "xmit.h"
+
+#define AGNX_TRACE              printk(KERN_ERR PFX "function:%s line:%d\n", __func__, __LINE__)
+
+#define PRINTK_LE16(prefix, var)	printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.4x\n", le16_to_cpu(var))
+#define PRINTK_LE32(prefix, var)	printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.8x\n", le32_to_cpu(var))
+#define PRINTK_U8(prefix, var) 		printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.2x\n", var)
+#define PRINTK_BE16(prefix, var)	printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.4x\n", be16_to_cpu(var))
+#define PRINTK_BE32(prefix, var)	printk(KERN_DEBUG PFX #prefix ": " #var " 0x%.8x\n", be32_to_cpu(var))
+#define PRINTK_BITS(prefix, field)    	printk(KERN_DEBUG PFX #prefix ": " #field ": 0x%x\n", (reg & field) >> field##_SHIFT)
+
+static inline void agnx_bug(char *reason)
+{
+	printk(KERN_ERR PFX "%s\n", reason);
+	BUG();
+}
+
+static inline void agnx_print_desc(struct agnx_desc *desc)
+{
+        u32 reg = be32_to_cpu(desc->frag);
+
+	PRINTK_BITS(DESC, PACKET_LEN);
+
+	if (reg & FIRST_FRAG) {
+		PRINTK_BITS(DESC, FIRST_PACKET_MASK);
+		PRINTK_BITS(DESC, FIRST_RESERV2);
+		PRINTK_BITS(DESC, FIRST_TKIP_ERROR);
+		PRINTK_BITS(DESC, FIRST_TKIP_PACKET);
+		PRINTK_BITS(DESC, FIRST_RESERV1);
+		PRINTK_BITS(DESC, FIRST_FRAG_LEN);
+	} else {
+		PRINTK_BITS(DESC, SUB_RESERV2);
+		PRINTK_BITS(DESC, SUB_TKIP_ERROR);
+		PRINTK_BITS(DESC, SUB_TKIP_PACKET);
+		PRINTK_BITS(DESC, SUB_RESERV1);
+		PRINTK_BITS(DESC, SUB_FRAG_LEN);
+	}
+
+	PRINTK_BITS(DESC, FIRST_FRAG);
+	PRINTK_BITS(DESC, LAST_FRAG);
+	PRINTK_BITS(DESC, OWNER);
+}
+
+
+static inline void dump_ieee80211b_phy_hdr(__be32 _11b0, __be32 _11b1)
+{
+
+}
+
+static inline void agnx_print_hdr(struct agnx_hdr *hdr)
+{
+	u32 reg;
+	int i;
+
+	reg = be32_to_cpu(hdr->reg0);
+	PRINTK_BITS(HDR, RTS);
+	PRINTK_BITS(HDR, MULTICAST);
+	PRINTK_BITS(HDR, ACK);
+	PRINTK_BITS(HDR, TM);
+	PRINTK_BITS(HDR, RELAY);
+	PRINTK_BITS(HDR, REVISED_FCS);
+	PRINTK_BITS(HDR, NEXT_BUFFER_ADDR);
+
+	reg = be32_to_cpu(hdr->reg1);
+	PRINTK_BITS(HDR, MAC_HDR_LEN);
+	PRINTK_BITS(HDR, DURATION_OVERIDE);
+	PRINTK_BITS(HDR, PHY_HDR_OVERIDE);
+	PRINTK_BITS(HDR, CRC_FAIL);
+	PRINTK_BITS(HDR, SEQUENCE_NUMBER);
+	PRINTK_BITS(HDR, BUFF_HEAD_ADDR);
+
+	reg = be32_to_cpu(hdr->reg2);
+	PRINTK_BITS(HDR, PDU_COUNT);
+	PRINTK_BITS(HDR, WEP_KEY);
+	PRINTK_BITS(HDR, USES_WEP_KEY);
+	PRINTK_BITS(HDR, KEEP_ALIVE);
+	PRINTK_BITS(HDR, BUFF_TAIL_ADDR);
+
+	reg = be32_to_cpu(hdr->reg3);
+	PRINTK_BITS(HDR, CTS_11G);
+	PRINTK_BITS(HDR, RTS_11G);
+	PRINTK_BITS(HDR, FRAG_SIZE);
+	PRINTK_BITS(HDR, PAYLOAD_LEN);
+	PRINTK_BITS(HDR, FRAG_NUM);
+
+	reg = be32_to_cpu(hdr->reg4);
+	PRINTK_BITS(HDR, RELAY_STAID);
+	PRINTK_BITS(HDR, STATION_ID);
+	PRINTK_BITS(HDR, WORKQUEUE_ID);
+
+	reg = be32_to_cpu(hdr->reg5);
+	/* printf the route flag */
+	PRINTK_BITS(HDR, ROUTE_HOST);
+	PRINTK_BITS(HDR, ROUTE_CARD_CPU);
+	PRINTK_BITS(HDR, ROUTE_ENCRYPTION);
+	PRINTK_BITS(HDR, ROUTE_TX);
+	PRINTK_BITS(HDR, ROUTE_RX1);
+	PRINTK_BITS(HDR, ROUTE_RX2);
+	PRINTK_BITS(HDR, ROUTE_COMPRESSION);
+
+	PRINTK_BE32(HDR, hdr->_11g0);
+	PRINTK_BE32(HDR, hdr->_11g1);
+	PRINTK_BE32(HDR, hdr->_11b0);
+	PRINTK_BE32(HDR, hdr->_11b1);
+
+	dump_ieee80211b_phy_hdr(hdr->_11b0, hdr->_11b1);
+
+	/* Fixme */
+	for (i = 0; i < ARRAY_SIZE(hdr->mac_hdr); i++) {
+		if (i == 0)
+			printk(KERN_DEBUG PFX "IEEE80211 HDR: ");
+		printk("%.2x ", hdr->mac_hdr[i]);
+		if (i + 1 == ARRAY_SIZE(hdr->mac_hdr))
+			printk("\n");
+	}
+
+	PRINTK_BE16(HDR, hdr->rts_duration);
+	PRINTK_BE16(HDR, hdr->last_duration);
+	PRINTK_BE16(HDR, hdr->sec_last_duration);
+	PRINTK_BE16(HDR, hdr->other_duration);
+	PRINTK_BE16(HDR, hdr->tx_other_duration);
+	PRINTK_BE16(HDR, hdr->last_11g_len);
+	PRINTK_BE16(HDR, hdr->other_11g_len);
+	PRINTK_BE16(HDR, hdr->last_11b_len);
+	PRINTK_BE16(HDR, hdr->other_11b_len);
+
+	/* FIXME */
+	reg = be16_to_cpu(hdr->reg6);
+	PRINTK_BITS(HDR, MBF);
+	PRINTK_BITS(HDR, RSVD4);
+
+	PRINTK_BE16(HDR, hdr->rx_frag_stat);
+
+	PRINTK_BE32(HDR, hdr->time_stamp);
+	PRINTK_BE32(HDR, hdr->phy_stats_hi);
+	PRINTK_BE32(HDR, hdr->phy_stats_lo);
+	PRINTK_BE32(HDR, hdr->mic_key0);
+	PRINTK_BE32(HDR, hdr->mic_key1);
+} /* agnx_print_hdr */
+
+
+static inline void agnx_print_rx_hdr(struct agnx_hdr *hdr)
+{
+	agnx_print_hdr(hdr);
+
+	PRINTK_BE16(HDR, hdr->rx.rx_packet_duration);
+	PRINTK_BE16(HDR, hdr->rx.replay_cnt);
+
+	PRINTK_U8(HDR, hdr->rx_channel);
+}
+
+static inline void agnx_print_tx_hdr(struct agnx_hdr *hdr)
+{
+	agnx_print_hdr(hdr);
+
+	PRINTK_U8(HDR, hdr->tx.long_retry_limit);
+	PRINTK_U8(HDR, hdr->tx.short_retry_limit);
+	PRINTK_U8(HDR, hdr->tx.long_retry_cnt);
+	PRINTK_U8(HDR, hdr->tx.short_retry_cnt);
+
+	PRINTK_U8(HDR, hdr->rx_channel);
+}
+
+static inline void
+agnx_print_sta_power(struct agnx_priv *priv, unsigned int sta_idx)
+{
+	struct agnx_sta_power power;
+	u32 reg;
+
+	get_sta_power(priv, &power, sta_idx);
+
+	reg = le32_to_cpu(power.reg);
+	PRINTK_BITS(STA_POWER, SIGNAL);
+	PRINTK_BITS(STA_POWER, RATE);
+	PRINTK_BITS(STA_POWER, TIFS);
+	PRINTK_BITS(STA_POWER, EDCF);
+	PRINTK_BITS(STA_POWER, CHANNEL_BOND);
+	PRINTK_BITS(STA_POWER, PHY_MODE);
+	PRINTK_BITS(STA_POWER, POWER_LEVEL);
+	PRINTK_BITS(STA_POWER, NUM_TRANSMITTERS);
+}
+
+static inline void
+agnx_print_sta_tx_wq(struct agnx_priv *priv, unsigned int sta_idx, unsigned int wq_idx)
+{
+	struct agnx_sta_tx_wq tx_wq;
+	u32 reg;
+
+	get_sta_tx_wq(priv, &tx_wq, sta_idx, wq_idx);
+
+	reg = le32_to_cpu(tx_wq.reg0);
+	PRINTK_BITS(STA_TX_WQ, TAIL_POINTER);
+	PRINTK_BITS(STA_TX_WQ, HEAD_POINTER_LOW);
+
+	reg = le32_to_cpu(tx_wq.reg3);
+	PRINTK_BITS(STA_TX_WQ, HEAD_POINTER_HIGH);
+	PRINTK_BITS(STA_TX_WQ, ACK_POINTER_LOW);
+
+	reg = le32_to_cpu(tx_wq.reg1);
+	PRINTK_BITS(STA_TX_WQ, ACK_POINTER_HIGH);
+	PRINTK_BITS(STA_TX_WQ, HEAD_TIMOUT_TAIL_PACK_CNT);
+	PRINTK_BITS(STA_TX_WQ, ACK_TIMOUT_TAIL_PACK_CNT);
+
+	reg = le32_to_cpu(tx_wq.reg2);
+	PRINTK_BITS(STA_TX_WQ, HEAD_TIMOUT_WIN_LIM_BYTE_CNT);
+	PRINTK_BITS(STA_TX_WQ, HEAD_TIMOUT_WIN_LIM_FRAG_CNT);
+	PRINTK_BITS(STA_TX_WQ, WORK_QUEUE_ACK_TYPE);
+	PRINTK_BITS(STA_TX_WQ, WORK_QUEUE_VALID);
+}
+
+static inline void agnx_print_sta_traffic(struct agnx_sta_traffic *traffic)
+{
+	u32 reg;
+
+	reg = le32_to_cpu(traffic->reg0);
+	PRINTK_BITS(STA_TRAFFIC, ACK_TIMOUT_CNT);
+	PRINTK_BITS(STA_TRAFFIC, TRAFFIC_ACK_TYPE);
+	PRINTK_BITS(STA_TRAFFIC, NEW_PACKET);
+	PRINTK_BITS(STA_TRAFFIC, TRAFFIC_VALID);
+	PRINTK_BITS(STA_TRAFFIC, RX_HDR_DESC_POINTER);
+
+	reg = le32_to_cpu(traffic->reg1);
+	PRINTK_BITS(STA_TRAFFIC, RX_PACKET_TIMESTAMP);
+	PRINTK_BITS(STA_TRAFFIC, TRAFFIC_RESERVED);
+	PRINTK_BITS(STA_TRAFFIC, SV);
+	PRINTK_BITS(STA_TRAFFIC, RX_SEQUENCE_NUM);
+
+	PRINTK_LE32(STA_TRAFFIC, traffic->tx_replay_cnt_low);
+
+	PRINTK_LE16(STA_TRAFFIC, traffic->tx_replay_cnt_high);
+	PRINTK_LE16(STA_TRAFFIC, traffic->rx_replay_cnt_high);
+
+	PRINTK_LE32(STA_TRAFFIC, traffic->rx_replay_cnt_low);
+}
+
+static inline void agnx_print_sta(struct agnx_priv *priv, unsigned int sta_idx)
+{
+	struct agnx_sta station;
+	struct agnx_sta *sta = &station;
+	u32 reg;
+	unsigned int i;
+
+	get_sta(priv, sta, sta_idx);
+
+	for (i = 0; i < 4; i++)
+		PRINTK_LE32(STA, sta->tx_session_keys[i]);
+	for (i = 0; i < 4; i++)
+		PRINTK_LE32(STA, sta->rx_session_keys[i]);
+
+	reg = le32_to_cpu(sta->reg);
+	PRINTK_BITS(STA, ID_1);
+	PRINTK_BITS(STA, ID_0);
+	PRINTK_BITS(STA, ENABLE_CONCATENATION);
+	PRINTK_BITS(STA, ENABLE_DECOMPRESSION);
+	PRINTK_BITS(STA, STA_RESERVED);
+	PRINTK_BITS(STA, EAP);
+	PRINTK_BITS(STA, ED_NULL);
+	PRINTK_BITS(STA, ENCRYPTION_POLICY);
+	PRINTK_BITS(STA, DEFINED_KEY_ID);
+	PRINTK_BITS(STA, FIXED_KEY);
+	PRINTK_BITS(STA, KEY_VALID);
+	PRINTK_BITS(STA, STATION_VALID);
+
+	PRINTK_LE32(STA, sta->tx_aes_blks_unicast);
+	PRINTK_LE32(STA, sta->rx_aes_blks_unicast);
+
+	PRINTK_LE16(STA, sta->aes_format_err_unicast_cnt);
+	PRINTK_LE16(STA, sta->aes_replay_unicast);
+
+	PRINTK_LE16(STA, sta->aes_decrypt_err_unicast);
+	PRINTK_LE16(STA, sta->aes_decrypt_err_default);
+
+	PRINTK_LE16(STA, sta->single_retry_packets);
+	PRINTK_LE16(STA, sta->failed_tx_packets);
+
+	PRINTK_LE16(STA, sta->muti_retry_packets);
+	PRINTK_LE16(STA, sta->ack_timeouts);
+
+	PRINTK_LE16(STA, sta->frag_tx_cnt);
+	PRINTK_LE16(STA, sta->rts_brq_sent);
+
+	PRINTK_LE16(STA, sta->tx_packets);
+	PRINTK_LE16(STA, sta->cts_back_timeout);
+
+	PRINTK_LE32(STA, sta->phy_stats_high);
+	PRINTK_LE32(STA, sta->phy_stats_low);
+
+//	for (i = 0; i < 8; i++)
+	agnx_print_sta_traffic(sta->traffic + 0);
+
+	PRINTK_LE16(STA, sta->traffic_class0_frag_success);
+	PRINTK_LE16(STA, sta->traffic_class1_frag_success);
+	PRINTK_LE16(STA, sta->traffic_class2_frag_success);
+	PRINTK_LE16(STA, sta->traffic_class3_frag_success);
+	PRINTK_LE16(STA, sta->traffic_class4_frag_success);
+	PRINTK_LE16(STA, sta->traffic_class5_frag_success);
+	PRINTK_LE16(STA, sta->traffic_class6_frag_success);
+	PRINTK_LE16(STA, sta->traffic_class7_frag_success);
+
+	PRINTK_LE16(STA, sta->num_frag_non_prime_rates);
+	PRINTK_LE16(STA, sta->ack_timeout_non_prime_rates);
+}
+
+
+static inline void dump_ieee80211_hdr(struct ieee80211_hdr *hdr, char *tag)
+{
+	u16 fctl;
+        int hdrlen;
+	DECLARE_MAC_BUF(mac);
+
+        fctl = le16_to_cpu(hdr->frame_control);
+	switch (fctl & IEEE80211_FCTL_FTYPE) {
+	case IEEE80211_FTYPE_DATA:
+		printk(PFX "%s DATA ", tag);
+		break;
+	case IEEE80211_FTYPE_CTL:
+		printk(PFX "%s CTL ", tag);
+		break;
+	case IEEE80211_FTYPE_MGMT:
+		printk(PFX "%s MGMT ", tag);
+		switch(fctl & IEEE80211_FCTL_STYPE) {
+		case IEEE80211_STYPE_ASSOC_REQ:
+			printk("SubType: ASSOC_REQ ");
+			break;
+		case IEEE80211_STYPE_ASSOC_RESP:
+			printk("SubType: ASSOC_RESP ");
+			break;
+		case IEEE80211_STYPE_REASSOC_REQ:
+			printk("SubType: REASSOC_REQ ");
+			break;
+		case IEEE80211_STYPE_REASSOC_RESP:
+			printk("SubType: REASSOC_RESP ");
+			break;
+		case IEEE80211_STYPE_PROBE_REQ:
+			printk("SubType: PROBE_REQ ");
+			break;
+		case IEEE80211_STYPE_PROBE_RESP:
+			printk("SubType: PROBE_RESP ");
+			break;
+		case IEEE80211_STYPE_BEACON:
+			printk("SubType: BEACON ");
+			break;
+		case IEEE80211_STYPE_ATIM:
+			printk("SubType: ATIM ");
+			break;
+		case IEEE80211_STYPE_DISASSOC:
+			printk("SubType: DISASSOC ");
+			break;
+		case IEEE80211_STYPE_AUTH:
+			printk("SubType: AUTH ");
+			break;
+		case IEEE80211_STYPE_DEAUTH:
+			printk("SubType: DEAUTH ");
+			break;
+		case IEEE80211_STYPE_ACTION:
+			printk("SubType: ACTION ");
+			break;
+		default:
+			printk("SubType: Unknow\n");
+		}
+		break;
+	default:
+		printk(PFX "%s Packet type: Unknow\n", tag);
+	}
+
+        hdrlen = ieee80211_hdrlen(fctl);
+
+	if (hdrlen >= 4)
+		printk("FC=0x%04x DUR=0x%04x",
+		       fctl, le16_to_cpu(hdr->duration_id));
+	if (hdrlen >= 10)
+		printk(" A1=%s", print_mac(mac, hdr->addr1));
+	if (hdrlen >= 16)
+		printk(" A2=%s", print_mac(mac, hdr->addr2));
+	if (hdrlen >= 24)
+		printk(" A3=%s", print_mac(mac, hdr->addr3));
+	if (hdrlen >= 30)
+		printk(" A4=%s", print_mac(mac, hdr->addr4));
+	printk("\n");
+}
+
+static inline void dump_txm_registers(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	int i;
+	for (i = 0; i <=0x1e8; i += 4) {
+		printk(KERN_DEBUG PFX "TXM: %x---> 0x%.8x\n", i, ioread32(ctl + i));
+	}
+}
+static inline void dump_rxm_registers(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	int i;
+	for (i = 0; i <=0x108; i += 4)
+		printk(KERN_DEBUG PFX "RXM: %x---> 0x%.8x\n", i, ioread32(ctl + 0x2000 + i));
+}
+static inline void dump_bm_registers(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	int i;
+	for (i = 0; i <=0x90; i += 4)
+		printk(KERN_DEBUG PFX "BM: %x---> 0x%.8x\n", i, ioread32(ctl + 0x2c00 + i));
+}
+static inline void dump_cir_registers(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	int i;
+	for (i = 0; i <=0xb8; i += 4)
+		printk(KERN_DEBUG PFX "CIR: %x---> 0x%.8x\n", i, ioread32(ctl + 0x3000 + i));
+}
+
+#endif /* AGNX_DEBUG_H_ */

+ 644 - 0
drivers/staging/agnx/pci.c

@@ -0,0 +1,644 @@
+/**
+ * Airgo MIMO wireless driver
+ *
+ * Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
+
+ * Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
+ * works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "agnx.h"
+#include "debug.h"
+#include "xmit.h"
+#include "phy.h"
+
+MODULE_AUTHOR("Li YanBo <dreamfly281@gmail.com>");
+MODULE_DESCRIPTION("Airgo MIMO PCI wireless driver");
+MODULE_LICENSE("GPL");
+
+static struct pci_device_id agnx_pci_id_tbl[] __devinitdata = {
+	{ PCI_DEVICE(0x17cb, 0x0001) },	/* Beklin F5d8010, Netgear WGM511 etc */
+	{ PCI_DEVICE(0x17cb, 0x0002) },	/* Netgear Wpnt511 */
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, agnx_pci_id_tbl);
+
+
+static inline void agnx_interrupt_ack(struct agnx_priv *priv, u32 *reason)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+
+	if ( *reason & AGNX_STAT_RX ) {
+		/* Mark complete RX */
+		reg = ioread32(ctl + AGNX_CIR_RXCTL);
+		reg |= 0x4;
+		iowrite32(reg, ctl + AGNX_CIR_RXCTL);
+		/* disable Rx interrupt */
+	}
+	if ( *reason & AGNX_STAT_TX ) {
+		reg = ioread32(ctl + AGNX_CIR_TXDCTL);
+		if (reg & 0x4) {
+			iowrite32(reg, ctl + AGNX_CIR_TXDCTL);
+			*reason |= AGNX_STAT_TXD;
+		}
+ 		reg = ioread32(ctl + AGNX_CIR_TXMCTL);
+		if (reg & 0x4) {
+			iowrite32(reg, ctl + AGNX_CIR_TXMCTL);
+			*reason |= AGNX_STAT_TXM;
+		}
+	}
+	if ( *reason & AGNX_STAT_X ) {
+/* 		reg = ioread32(ctl + AGNX_INT_STAT); */
+/* 		iowrite32(reg, ctl + AGNX_INT_STAT); */
+/* 		/\* FIXME reinit interrupt mask *\/ */
+/* 		reg = 0xc390bf9 & ~IRQ_TX_BEACON; */
+/* 		reg &= ~IRQ_TX_DISABLE; */
+/* 		iowrite32(reg, ctl + AGNX_INT_MASK); */
+/* 		iowrite32(0x800, ctl + AGNX_CIR_BLKCTL); */
+	}
+} /* agnx_interrupt_ack */
+
+static irqreturn_t agnx_interrupt_handler(int irq, void *dev_id)
+{
+	struct ieee80211_hw *dev = dev_id;
+	struct agnx_priv *priv = dev->priv;
+	void __iomem *ctl = priv->ctl;
+	irqreturn_t ret = IRQ_NONE;
+	u32 irq_reason;
+
+	spin_lock(&priv->lock);
+
+//	printk(KERN_ERR PFX "Get a interrupt %s\n", __func__);
+
+	if (priv->init_status != AGNX_START)
+		goto out;
+
+	/* FiXME  Here has no lock, Is this will lead to race? */
+	irq_reason = ioread32(ctl + AGNX_CIR_BLKCTL);
+	if (!(irq_reason & 0x7))
+		goto out;
+
+	ret = IRQ_HANDLED;
+	priv->irq_status = ioread32(ctl + AGNX_INT_STAT);
+
+//	printk(PFX "Interrupt reason is 0x%x\n", irq_reason);
+	/* Make sure the txm and txd flags don't conflict with other unknown
+	   interrupt flag, maybe is not necessary */
+	irq_reason &= 0xF;
+
+	disable_rx_interrupt(priv);
+	/* TODO Make sure the card finished initialized */
+	agnx_interrupt_ack(priv, &irq_reason);
+
+	if ( irq_reason & AGNX_STAT_RX )
+		handle_rx_irq(priv);
+	if ( irq_reason & AGNX_STAT_TXD )
+		handle_txd_irq(priv);
+	if ( irq_reason & AGNX_STAT_TXM )
+		handle_txm_irq(priv);
+	if ( irq_reason & AGNX_STAT_X )
+		handle_other_irq(priv);
+
+	enable_rx_interrupt(priv);
+out:
+	spin_unlock(&priv->lock);
+	return ret;
+} /* agnx_interrupt_handler */
+
+
+/* FIXME */
+static int agnx_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+{
+	AGNX_TRACE;
+	return _agnx_tx(dev->priv, skb);
+} /* agnx_tx */
+
+
+static int agnx_get_mac_address(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	/* Attention! directly read the MAC or other date from EEPROM will
+	 lead to cardbus(WGM511) lock up when write to PM PLL register */
+	reg = agnx_read32(ctl, 0x3544);
+	udelay(40);
+	reg = agnx_read32(ctl, 0x354c);
+	udelay(50);
+	/* Get the mac address */
+	reg = agnx_read32(ctl, 0x3544);
+	udelay(40);
+
+	/* HACK */
+	reg = cpu_to_le32(reg);
+	priv->mac_addr[0] = ((u8 *)&reg)[2];
+	priv->mac_addr[1] = ((u8 *)&reg)[3];
+	reg = agnx_read32(ctl, 0x3548);
+	udelay(50);
+	*((u32 *)(priv->mac_addr + 2)) = cpu_to_le32(reg);
+
+	if (!is_valid_ether_addr(priv->mac_addr)) {
+		DECLARE_MAC_BUF(mbuf);
+		printk(KERN_WARNING PFX "read mac %s\n", print_mac(mbuf, priv->mac_addr));
+		printk(KERN_WARNING PFX "Invalid hwaddr! Using random hwaddr\n");
+		random_ether_addr(priv->mac_addr);
+	}
+
+	return 0;
+} /* agnx_get_mac_address */
+
+static int agnx_alloc_rings(struct agnx_priv *priv)
+{
+	unsigned int len;
+	AGNX_TRACE;
+
+	/* Allocate RX/TXM/TXD rings info */
+	priv->rx.size = AGNX_RX_RING_SIZE;
+	priv->txm.size = AGNX_TXM_RING_SIZE;
+	priv->txd.size = AGNX_TXD_RING_SIZE;
+
+	len = priv->rx.size + priv->txm.size + priv->txd.size;
+
+//	priv->rx.info = kzalloc(sizeof(struct agnx_info) * len, GFP_KERNEL);
+	priv->rx.info = kzalloc(sizeof(struct agnx_info) * len, GFP_ATOMIC);
+	if (!priv->rx.info)
+		return -ENOMEM;
+	priv->txm.info = priv->rx.info + priv->rx.size;
+	priv->txd.info = priv->txm.info + priv->txm.size;
+
+	/* Allocate RX/TXM/TXD descriptors */
+	priv->rx.desc = pci_alloc_consistent(priv->pdev, sizeof(struct agnx_desc) * len,
+					     &priv->rx.dma);
+	if (!priv->rx.desc) {
+		kfree(priv->rx.info);
+		return -ENOMEM;
+	}
+
+	priv->txm.desc = priv->rx.desc + priv->rx.size;
+	priv->txm.dma = priv->rx.dma + sizeof(struct agnx_desc) * priv->rx.size;
+	priv->txd.desc = priv->txm.desc + priv->txm.size;
+	priv->txd.dma = priv->txm.dma + sizeof(struct agnx_desc) * priv->txm.size;
+
+	return 0;
+} /* agnx_alloc_rings */
+
+static void rings_free(struct agnx_priv *priv)
+{
+	unsigned int len = priv->rx.size + priv->txm.size + priv->txd.size;
+	unsigned long flags;
+	AGNX_TRACE;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	kfree(priv->rx.info);
+	pci_free_consistent(priv->pdev, sizeof(struct agnx_desc) * len,
+			    priv->rx.desc, priv->rx.dma);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+#if 0
+static void agnx_periodic_work_handler(struct work_struct *work)
+{
+	struct agnx_priv *priv = container_of(work, struct agnx_priv,
+                                             periodic_work.work);
+//	unsigned long flags;
+	unsigned long delay;
+
+	/* fixme: using mutex?? */
+//	spin_lock_irqsave(&priv->lock, flags);
+
+	/* TODO Recalibrate*/
+//	calibrate_oscillator(priv);
+//	antenna_calibrate(priv);
+//	agnx_send_packet(priv, 997);
+	/* FIXME */
+/* 	if (debug == 3) */
+/*                 delay = msecs_to_jiffies(AGNX_PERIODIC_DELAY); */
+/* 	else */
+	delay = msecs_to_jiffies(AGNX_PERIODIC_DELAY);
+//		delay = round_jiffies(HZ * 15);
+
+	queue_delayed_work(priv->hw->workqueue, &priv->periodic_work, delay);
+
+//	spin_unlock_irqrestore(&priv->lock, flags);
+}
+#endif
+
+static int agnx_start(struct ieee80211_hw *dev)
+{
+	struct agnx_priv *priv = dev->priv;
+	/* unsigned long delay; */
+	int err = 0;
+	AGNX_TRACE;
+
+	err = agnx_alloc_rings(priv);
+	if (err) {
+		printk(KERN_ERR PFX "Can't alloc RX/TXM/TXD rings\n");
+		goto out;
+	}
+	err = request_irq(priv->pdev->irq, &agnx_interrupt_handler,
+			  IRQF_SHARED, "agnx_pci", dev);
+	if (err) {
+		printk(KERN_ERR PFX "Failed to register IRQ handler\n");
+		rings_free(priv);
+		goto out;
+	}
+
+//	mdelay(500);
+
+	might_sleep();
+	agnx_hw_init(priv);
+
+//	mdelay(500);
+	might_sleep();
+
+	priv->init_status = AGNX_START;
+/*         INIT_DELAYED_WORK(&priv->periodic_work, agnx_periodic_work_handler); */
+/* 	delay = msecs_to_jiffies(AGNX_PERIODIC_DELAY); */
+/*         queue_delayed_work(priv->hw->workqueue, &priv->periodic_work, delay); */
+out:
+	return err;
+} /* agnx_start */
+
+static void agnx_stop(struct ieee80211_hw *dev)
+{
+	struct agnx_priv *priv = dev->priv;
+	AGNX_TRACE;
+
+	priv->init_status = AGNX_STOP;
+	/* make sure hardware will not generate irq */
+	agnx_hw_reset(priv);
+	free_irq(priv->pdev->irq, dev);
+        flush_workqueue(priv->hw->workqueue);
+//	cancel_delayed_work_sync(&priv->periodic_work);
+	unfill_rings(priv);
+	rings_free(priv);
+}
+
+static int agnx_config(struct ieee80211_hw *dev,
+		       struct ieee80211_conf *conf)
+{
+	struct agnx_priv *priv = dev->priv;
+	int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+	AGNX_TRACE;
+
+	spin_lock(&priv->lock);
+	/* FIXME need priv lock? */
+	if (channel != priv->channel) {
+		priv->channel = channel;
+		agnx_set_channel(priv, priv->channel);
+	}
+
+	spin_unlock(&priv->lock);
+	return 0;
+}
+
+static int agnx_config_interface(struct ieee80211_hw *dev,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_if_conf *conf)
+{
+	struct agnx_priv *priv = dev->priv;
+	void __iomem *ctl = priv->ctl;
+	AGNX_TRACE;
+
+	spin_lock(&priv->lock);
+
+	if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
+//		u32 reghi, reglo;
+		agnx_set_bssid(priv, conf->bssid);
+		memcpy(priv->bssid, conf->bssid, ETH_ALEN);
+		hash_write(priv, conf->bssid, BSSID_STAID);
+		sta_init(priv, BSSID_STAID);
+		/* FIXME needed? */
+		sta_power_init(priv, BSSID_STAID);
+		agnx_write32(ctl, AGNX_BM_MTSM, 0xff & ~0x1);
+	}
+	spin_unlock(&priv->lock);
+	return 0;
+} /* agnx_config_interface */
+
+
+static void agnx_configure_filter(struct ieee80211_hw *dev,
+				  unsigned int changed_flags,
+				  unsigned int *total_flags,
+				  int mc_count, struct dev_mc_list *mclist)
+{
+	unsigned int new_flags = 0;
+
+	*total_flags = new_flags;
+	/* TODO */
+}
+
+static int agnx_add_interface(struct ieee80211_hw *dev,
+			      struct ieee80211_if_init_conf *conf)
+{
+	struct agnx_priv *priv = dev->priv;
+	AGNX_TRACE;
+
+	spin_lock(&priv->lock);
+	/* FIXME */
+	if (priv->mode != NL80211_IFTYPE_MONITOR)
+		return -EOPNOTSUPP;
+
+	switch (conf->type) {
+	case NL80211_IFTYPE_STATION:
+		priv->mode = conf->type;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	spin_unlock(&priv->lock);
+
+	return 0;
+}
+
+static void agnx_remove_interface(struct ieee80211_hw *dev,
+				  struct ieee80211_if_init_conf *conf)
+{
+	struct agnx_priv *priv = dev->priv;
+	AGNX_TRACE;
+
+	/* TODO */
+	priv->mode = NL80211_IFTYPE_MONITOR;
+}
+
+static int agnx_get_stats(struct ieee80211_hw *dev,
+			  struct ieee80211_low_level_stats *stats)
+{
+	struct agnx_priv *priv = dev->priv;
+	AGNX_TRACE;
+	spin_lock(&priv->lock);
+	/* TODO !! */
+	memcpy(stats, &priv->stats, sizeof(*stats));
+	spin_unlock(&priv->lock);
+
+	return 0;
+}
+
+static u64 agnx_get_tsft(struct ieee80211_hw *dev)
+{
+	void __iomem *ctl = ((struct agnx_priv *)dev->priv)->ctl;
+	u32 tsftl;
+	u64 tsft;
+	AGNX_TRACE;
+
+	/* FIXME */
+	tsftl = ioread32(ctl + AGNX_TXM_TIMESTAMPLO);
+	tsft = ioread32(ctl + AGNX_TXM_TIMESTAMPHI);
+	tsft <<= 32;
+	tsft |= tsftl;
+
+	return tsft;
+}
+
+static int agnx_get_tx_stats(struct ieee80211_hw *dev,
+			     struct ieee80211_tx_queue_stats *stats)
+{
+	struct agnx_priv *priv = dev->priv;
+	AGNX_TRACE;
+
+	/* FIXME now we just using txd queue, but should using txm queue too */
+	stats[0].len = (priv->txd.idx - priv->txd.idx_sent) / 2;
+	stats[0].limit = priv->txd.size - 2;
+	stats[0].count = priv->txd.idx / 2;
+
+	return 0;
+}
+
+static struct ieee80211_ops agnx_ops = {
+	.tx			= agnx_tx,
+	.start			= agnx_start,
+	.stop			= agnx_stop,
+	.add_interface		= agnx_add_interface,
+	.remove_interface	= agnx_remove_interface,
+	.config			= agnx_config,
+	.config_interface	= agnx_config_interface,
+ 	.configure_filter	= agnx_configure_filter,
+	.get_stats		= agnx_get_stats,
+	.get_tx_stats		= agnx_get_tx_stats,
+	.get_tsf		= agnx_get_tsft
+};
+
+static void __devexit agnx_pci_remove(struct pci_dev *pdev)
+{
+	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
+	struct agnx_priv *priv = dev->priv;
+	AGNX_TRACE;
+
+	if (!dev)
+		return;
+	ieee80211_unregister_hw(dev);
+	pci_iounmap(pdev, priv->ctl);
+	pci_iounmap(pdev, priv->data);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	ieee80211_free_hw(dev);
+}
+
+static int __devinit agnx_pci_probe(struct pci_dev *pdev,
+				    const struct pci_device_id *id)
+{
+	struct ieee80211_hw *dev;
+	struct agnx_priv *priv;
+	u32 mem_addr0, mem_len0;
+	u32 mem_addr1, mem_len1;
+	int err;
+	DECLARE_MAC_BUF(mac);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR PFX "Can't enable new PCI device\n");
+		return err;
+	}
+
+	/* get pci resource */
+	mem_addr0 = pci_resource_start(pdev, 0);
+	mem_len0 = pci_resource_len(pdev, 0);
+	mem_addr1 = pci_resource_start(pdev, 1);
+	mem_len1 = pci_resource_len(pdev, 1);
+	printk(KERN_DEBUG PFX "Memaddr0 is %x, length is %x\n", mem_addr0, mem_len0);
+	printk(KERN_DEBUG PFX "Memaddr1 is %x, length is %x\n", mem_addr1, mem_len1);
+
+	err = pci_request_regions(pdev, "agnx-pci");
+	if (err) {
+		printk(KERN_ERR PFX "Can't obtain PCI resource\n");
+		return err;
+	}
+
+	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
+	    pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+		printk(KERN_ERR PFX "No suitable DMA available\n");
+		goto err_free_reg;
+	}
+
+	pci_set_master(pdev);
+	printk(KERN_DEBUG PFX "pdev->irq is %d\n", pdev->irq);
+
+	dev = ieee80211_alloc_hw(sizeof(*priv), &agnx_ops);
+	if (!dev) {
+		printk(KERN_ERR PFX "ieee80211 alloc failed\n");
+		err = -ENOMEM;
+		goto err_free_reg;
+	}
+	/* init priv  */
+	priv = dev->priv;
+	memset(priv, 0, sizeof(*priv));
+	priv->mode = NL80211_IFTYPE_MONITOR;
+	priv->pdev = pdev;
+	priv->hw = dev;
+	spin_lock_init(&priv->lock);
+	priv->init_status = AGNX_UNINIT;
+
+	/* Map mem #1 and #2 */
+	priv->ctl = pci_iomap(pdev, 0, mem_len0);
+//	printk(KERN_DEBUG PFX"MEM1 mapped address is 0x%p\n", priv->ctl);
+	if (!priv->ctl) {
+		printk(KERN_ERR PFX "Can't map device memory\n");
+		goto err_free_dev;
+	}
+	priv->data = pci_iomap(pdev, 1, mem_len1);
+	printk(KERN_DEBUG PFX "MEM2 mapped address is 0x%p\n", priv->data);
+	if (!priv->data) {
+		printk(KERN_ERR PFX "Can't map device memory\n");
+		goto err_iounmap2;
+	}
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &priv->revid);
+
+	priv->band.channels   = (struct ieee80211_channel *)agnx_channels;
+	priv->band.n_channels = ARRAY_SIZE(agnx_channels);
+	priv->band.bitrates   = (struct ieee80211_rate *)agnx_rates_80211g;
+	priv->band.n_bitrates = ARRAY_SIZE(agnx_rates_80211g);
+
+	/* Init ieee802.11 dev  */
+	SET_IEEE80211_DEV(dev, &pdev->dev);
+	pci_set_drvdata(pdev, dev);
+	dev->extra_tx_headroom = sizeof(struct agnx_hdr);
+
+	/* FIXME It only include FCS in promious mode but not manage mode */
+/*      dev->flags =  IEEE80211_HW_RX_INCLUDES_FCS; */
+	dev->channel_change_time = 5000;
+	dev->max_signal = 100;
+	/* FIXME */
+	dev->queues = 1;
+
+	agnx_get_mac_address(priv);
+
+	SET_IEEE80211_PERM_ADDR(dev, priv->mac_addr);
+
+/* 	/\* FIXME *\/ */
+/* 	for (i = 1; i < NUM_DRIVE_MODES; i++) { */
+/* 		err = ieee80211_register_hwmode(dev, &priv->modes[i]); */
+/* 		if (err) { */
+/* 			printk(KERN_ERR PFX "Can't register hwmode\n"); */
+/* 			goto  err_iounmap; */
+/* 		} */
+/* 	} */
+
+	priv->channel = 1;
+	dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+
+	err = ieee80211_register_hw(dev);
+	if (err) {
+		printk(KERN_ERR PFX "Can't register hardware\n");
+		goto err_iounmap;
+	}
+
+	agnx_hw_reset(priv);
+
+
+	printk(PFX "%s: hwaddr %s, Rev 0x%02x\n", wiphy_name(dev->wiphy),
+	       print_mac(mac, dev->wiphy->perm_addr), priv->revid);
+	return 0;
+
+ err_iounmap:
+	pci_iounmap(pdev, priv->data);
+
+ err_iounmap2:
+	pci_iounmap(pdev, priv->ctl);
+
+ err_free_dev:
+	pci_set_drvdata(pdev, NULL);
+	ieee80211_free_hw(dev);
+
+ err_free_reg:
+	pci_release_regions(pdev);
+
+	pci_disable_device(pdev);
+	return err;
+} /* agnx_pci_probe*/
+
+#ifdef CONFIG_PM
+
+static int agnx_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
+	AGNX_TRACE;
+
+	ieee80211_stop_queues(dev);
+	agnx_stop(dev);
+
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int agnx_pci_resume(struct pci_dev *pdev)
+{
+	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
+	AGNX_TRACE;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	agnx_start(dev);
+	ieee80211_wake_queues(dev);
+
+	return 0;
+}
+
+#else
+
+#define agnx_pci_suspend NULL
+#define agnx_pci_resume NULL
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver agnx_pci_driver = {
+	.name		= "agnx-pci",
+	.id_table	= agnx_pci_id_tbl,
+	.probe		= agnx_pci_probe,
+	.remove		= __devexit_p(agnx_pci_remove),
+	.suspend	= agnx_pci_suspend,
+	.resume		= agnx_pci_resume,
+};
+
+static int __init agnx_pci_init(void)
+{
+	AGNX_TRACE;
+	return pci_register_driver(&agnx_pci_driver);
+}
+
+static void __exit agnx_pci_exit(void)
+{
+	AGNX_TRACE;
+	pci_unregister_driver(&agnx_pci_driver);
+}
+
+
+module_init(agnx_pci_init);
+module_exit(agnx_pci_exit);

+ 960 - 0
drivers/staging/agnx/phy.c

@@ -0,0 +1,960 @@
+/**
+ * Airgo MIMO wireless driver
+ *
+ * Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
+
+ * Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
+ * works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "agnx.h"
+#include "debug.h"
+#include "phy.h"
+#include "table.h"
+#include "sta.h"
+#include "xmit.h"
+
+u8 read_from_eeprom(struct agnx_priv *priv, u16 address)
+{
+	void __iomem *ctl = priv->ctl;
+	struct agnx_eeprom cmd;
+	u32 reg;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd = EEPROM_CMD_READ << AGNX_EEPROM_COMMAND_SHIFT;
+	cmd.address = address;
+	/* Verify that the Status bit is clear */
+	/* Read Command and Address are written to the Serial Interface */
+	iowrite32(*(__le32 *)&cmd, ctl + AGNX_CIR_SERIALITF);
+	/* Wait for the Status bit to clear again */
+	eeprom_delay();
+	/* Read from Data */
+	reg = ioread32(ctl + AGNX_CIR_SERIALITF);
+
+	cmd = *(struct agnx_eeprom *)&reg;
+
+	return cmd.data;
+}
+
+static int card_full_reset(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
+	agnx_write32(ctl, AGNX_CIR_BLKCTL, 0x80);
+	reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
+	return 0;
+}
+
+inline void enable_power_saving(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+
+	reg = agnx_read32(ctl, AGNX_PM_PMCTL);
+	reg &= ~0x8;
+	agnx_write32(ctl, AGNX_PM_PMCTL, reg);
+}
+
+inline void disable_power_saving(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+
+	reg = agnx_read32(ctl, AGNX_PM_PMCTL);
+	reg |= 0x8;
+	agnx_write32(ctl, AGNX_PM_PMCTL, reg);
+}
+
+
+void disable_receiver(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	AGNX_TRACE;
+
+	/* FIXME Disable the receiver */
+	agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x0);
+	/* Set gain control reset */
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
+	/* Reset gain control reset */
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
+}
+
+
+/* Fixme this shoule be disable RX, above is enable RX */
+void enable_receiver(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	AGNX_TRACE;
+
+	/* Set adaptive gain control discovery mode */
+	agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
+	/* Set gain control reset */
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
+	/* Clear gain control reset */
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
+}
+
+static void mac_address_set(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u8 *mac_addr = priv->mac_addr;
+	u32 reg;
+
+	/* FIXME */
+	reg = (mac_addr[0] << 24) | (mac_addr[1] << 16) | mac_addr[2] << 8 | mac_addr[3];
+	iowrite32(reg, ctl + AGNX_RXM_MACHI);
+ 	reg = (mac_addr[4] << 8) | mac_addr[5];
+	iowrite32(reg, ctl + AGNX_RXM_MACLO);
+}
+
+static void receiver_bssid_set(struct agnx_priv *priv, u8 *bssid)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+
+	disable_receiver(priv);
+	/* FIXME */
+	reg = bssid[0] << 24 | (bssid[1] << 16) | (bssid[2] << 8) | bssid[3];
+	iowrite32(reg, ctl + AGNX_RXM_BSSIDHI);
+ 	reg = (bssid[4] << 8) | bssid[5];
+	iowrite32(reg, ctl + AGNX_RXM_BSSIDLO);
+
+	/* Enable the receiver */
+	enable_receiver(priv);
+
+	/* Clear the TSF */
+/* 	agnx_write32(ctl, AGNX_TXM_TSFLO, 0x0); */
+/* 	agnx_write32(ctl, AGNX_TXM_TSFHI, 0x0); */
+	/* Clear the TBTT */
+	agnx_write32(ctl, AGNX_TXM_TBTTLO, 0x0);
+	agnx_write32(ctl, AGNX_TXM_TBTTHI, 0x0);
+	disable_receiver(priv);
+} /* receiver_bssid_set */
+
+static void band_management_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	void __iomem *data = priv->data;
+	u32 reg;
+	int i;
+	AGNX_TRACE;
+
+	agnx_write32(ctl, AGNX_BM_TXWADDR, AGNX_PDU_TX_WQ);
+	agnx_write32(ctl, AGNX_CIR_ADDRWIN, 0x0);
+	memset_io(data + AGNX_PDUPOOL, 0x0, AGNX_PDUPOOL_SIZE);
+	agnx_write32(ctl, AGNX_BM_BMCTL, 0x200);
+
+	agnx_write32(ctl, AGNX_BM_CIPDUWCNT, 0x40);
+	agnx_write32(ctl, AGNX_BM_SPPDUWCNT, 0x2);
+	agnx_write32(ctl, AGNX_BM_RFPPDUWCNT, 0x0);
+	agnx_write32(ctl, AGNX_BM_RHPPDUWCNT, 0x22);
+
+	/* FIXME Initialize the Free Pool Linked List */
+	/*    1. Write the Address of the Next Node ((0x41800 + node*size)/size)
+	      to the first word of each node.  */
+	for (i = 0; i < PDU_FREE_CNT; i++) {
+		iowrite32((AGNX_PDU_FREE + (i+1)*PDU_SIZE)/PDU_SIZE,
+			  data + AGNX_PDU_FREE + (PDU_SIZE * i));
+		/* The last node should be set to 0x0 */
+		if ((i + 1) == PDU_FREE_CNT)
+			memset_io(data + AGNX_PDU_FREE + (PDU_SIZE * i),
+				  0x0, PDU_SIZE);
+	}
+
+	/* Head is First Pool address (0x41800) / size (0x80) */
+	agnx_write32(ctl, AGNX_BM_FPLHP, AGNX_PDU_FREE/PDU_SIZE);
+	/* Tail is Last Pool Address (0x47f80) / size (0x80) */
+	agnx_write32(ctl, AGNX_BM_FPLTP, 0x47f80/PDU_SIZE);
+	/* Count is Number of Nodes in the Pool (0xd0) */
+	agnx_write32(ctl, AGNX_BM_FPCNT, PDU_FREE_CNT);
+
+	/* Start all workqueue */
+	agnx_write32(ctl, AGNX_BM_CIWQCTL, 0x80000);
+	agnx_write32(ctl, AGNX_BM_CPULWCTL, 0x80000);
+	agnx_write32(ctl, AGNX_BM_CPUHWCTL, 0x80000);
+	agnx_write32(ctl, AGNX_BM_CPUTXWCTL, 0x80000);
+	agnx_write32(ctl, AGNX_BM_CPURXWCTL, 0x80000);
+	agnx_write32(ctl, AGNX_BM_SPRXWCTL, 0x80000);
+	agnx_write32(ctl, AGNX_BM_SPTXWCTL, 0x80000);
+	agnx_write32(ctl, AGNX_BM_RFPWCTL, 0x80000);
+
+	/* Enable the Band Management */
+	reg = agnx_read32(ctl, AGNX_BM_BMCTL);
+	reg |= 0x1;
+	agnx_write32(ctl, AGNX_BM_BMCTL, reg);
+} /* band_managment_init */
+
+
+static void system_itf_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x0);
+	agnx_write32(ctl, AGNX_PM_TESTPHY, 0x11e143a);
+
+	if (priv->revid == 0) {
+		reg = agnx_read32(ctl, AGNX_SYSITF_SYSMODE);
+		reg |= 0x11;
+		agnx_write32(ctl, AGNX_SYSITF_SYSMODE, reg);
+	}
+	/* ??? What is that means? it should difference for differice type
+	 of cards */
+	agnx_write32(ctl, AGNX_CIR_SERIALITF, 0xfff81006);
+
+	agnx_write32(ctl, AGNX_SYSITF_GPIOIN, 0x1f0000);
+	agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x5);
+	reg = agnx_read32(ctl, AGNX_SYSITF_GPIOIN);
+}
+
+static void encryption_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	AGNX_TRACE;
+
+	agnx_write32(ctl, AGNX_ENCRY_WEPKEY0, 0x0);
+	agnx_write32(ctl, AGNX_ENCRY_WEPKEY1, 0x0);
+	agnx_write32(ctl, AGNX_ENCRY_WEPKEY2, 0x0);
+	agnx_write32(ctl, AGNX_ENCRY_WEPKEY3, 0x0);
+	agnx_write32(ctl, AGNX_ENCRY_CCMRECTL, 0x8);
+}
+
+static void tx_management_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	void __iomem *data = priv->data;
+	u32 reg;
+	AGNX_TRACE;
+
+	/* Fill out the ComputationalEngineLookupTable
+	 * starting at memory #2 offset 0x800
+	 */
+	tx_engine_lookup_tbl_init(priv);
+	memset_io(data + 0x1000, 0, 0xfe0);
+	/* Enable Transmission Management Functions */
+	agnx_write32(ctl, AGNX_TXM_ETMF, 0x3ff);
+	/* Write 0x3f to Transmission Template */
+	agnx_write32(ctl, AGNX_TXM_TXTEMP, 0x3f);
+
+	if (priv->revid >= 2)
+		agnx_write32(ctl, AGNX_TXM_SIFSPIFS, 0x1e140a0b);
+	else
+		agnx_write32(ctl, AGNX_TXM_SIFSPIFS, 0x1e190a0b);
+
+	reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
+	reg &= 0xff00;
+	reg |= 0xb;
+	agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
+	reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
+	reg &= 0xffff00ff;
+	reg |= 0xa00;
+	agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
+	/* Enable TIFS */
+	agnx_write32(ctl, AGNX_TXM_CTL, 0x40000);
+
+	reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
+	reg &= 0xff00ffff;
+	reg |= 0x510000;
+	agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
+	reg = agnx_read32(ctl, AGNX_TXM_PROBDELAY);
+	reg &= 0xff00ffff;
+	agnx_write32(ctl, AGNX_TXM_PROBDELAY, reg);
+	reg = agnx_read32(ctl, AGNX_TXM_TIFSEIFS);
+	reg &= 0x00ffffff;
+	reg |= 0x1c000000;
+	agnx_write32(ctl, AGNX_TXM_TIFSEIFS, reg);
+	reg = agnx_read32(ctl, AGNX_TXM_PROBDELAY);
+	reg &= 0x00ffffff;
+	reg |= 0x01000000;
+	agnx_write32(ctl, AGNX_TXM_PROBDELAY, reg);
+
+	/* # Set DIF 0-1,2-3,4-5,6-7 to defaults */
+	agnx_write32(ctl, AGNX_TXM_DIF01, 0x321d321d);
+	agnx_write32(ctl, AGNX_TXM_DIF23, 0x321d321d);
+	agnx_write32(ctl, AGNX_TXM_DIF45, 0x321d321d);
+	agnx_write32(ctl, AGNX_TXM_DIF67, 0x321d321d);
+
+	/* Max Ack timeout limit */
+	agnx_write32(ctl, AGNX_TXM_MAXACKTIM, 0x1e19);
+	/* Max RX Data Timeout count, */
+	reg = agnx_read32(ctl, AGNX_TXM_MAXRXTIME);
+	reg &= 0xffff0000;
+	reg |= 0xff;
+	agnx_write32(ctl, AGNX_TXM_MAXRXTIME, reg);
+
+	/* CF poll RX Timeout count */
+	reg = agnx_read32(ctl, AGNX_TXM_CFPOLLRXTIM);
+	reg &= 0xffff;
+	reg |= 0xff0000;
+	agnx_write32(ctl, AGNX_TXM_CFPOLLRXTIM, reg);
+
+	/* Max Timeout Exceeded count, */
+	reg = agnx_read32(ctl, AGNX_TXM_MAXTIMOUT);
+	reg &= 0xff00ffff;
+	reg |= 0x190000;
+	agnx_write32(ctl, AGNX_TXM_MAXTIMOUT, reg);
+
+	/* CF ack timeout limit for 11b */
+	reg = agnx_read32(ctl, AGNX_TXM_CFACKT11B);
+	reg &= 0xff00;
+	reg |= 0x1e;
+	agnx_write32(ctl, AGNX_TXM_CFACKT11B, reg);
+
+	/* Max CF Poll Timeout Count */
+	reg = agnx_read32(ctl, AGNX_TXM_CFPOLLRXTIM);
+	reg &= 0xffff0000;
+	reg |= 0x19;
+	agnx_write32(ctl, AGNX_TXM_CFPOLLRXTIM, reg);
+	/* CF Poll RX Timeout Count */
+	reg = agnx_read32(ctl, AGNX_TXM_CFPOLLRXTIM);
+	reg &= 0xffff0000;
+	reg |= 0x1e;
+	agnx_write32(ctl, AGNX_TXM_CFPOLLRXTIM, reg);
+
+	/* # write default to */
+	/*    1. Schedule Empty Count */
+	agnx_write32(ctl, AGNX_TXM_SCHEMPCNT, 0x5);
+	/*    2. CFP Period Count */
+	agnx_write32(ctl, AGNX_TXM_CFPERCNT, 0x1);
+	/*    3. CFP MDV  */
+	agnx_write32(ctl, AGNX_TXM_CFPMDV, 0x10000);
+
+	/* Probe Delay */
+	reg = agnx_read32(ctl, AGNX_TXM_PROBDELAY);
+	reg &= 0xffff0000;
+	reg |= 0x400;
+	agnx_write32(ctl, AGNX_TXM_PROBDELAY, reg);
+
+	/* Max CCA count Slot */
+	reg = agnx_read32(ctl, AGNX_TXM_MAXCCACNTSLOT);
+	reg &= 0xffff00ff;
+	reg |= 0x900;
+	agnx_write32(ctl, AGNX_TXM_MAXCCACNTSLOT, reg);
+
+	/* Slot limit/1 msec Limit */
+	reg = agnx_read32(ctl, AGNX_TXM_SLOTLIMIT);
+	reg &= 0xff00ffff;
+	reg |= 0x140077;
+	agnx_write32(ctl, AGNX_TXM_SLOTLIMIT, reg);
+
+	/* # Set CW #(0-7) to default */
+	agnx_write32(ctl, AGNX_TXM_CW0, 0xff0007);
+	agnx_write32(ctl, AGNX_TXM_CW1, 0xff0007);
+	agnx_write32(ctl, AGNX_TXM_CW2, 0xff0007);
+	agnx_write32(ctl, AGNX_TXM_CW3, 0xff0007);
+	agnx_write32(ctl, AGNX_TXM_CW4, 0xff0007);
+	agnx_write32(ctl, AGNX_TXM_CW5, 0xff0007);
+	agnx_write32(ctl, AGNX_TXM_CW6, 0xff0007);
+	agnx_write32(ctl, AGNX_TXM_CW7, 0xff0007);
+
+	/* # Set Short/Long limit #(0-7) to default */
+	agnx_write32(ctl, AGNX_TXM_SLBEALIM0,  0xa000a);
+	agnx_write32(ctl, AGNX_TXM_SLBEALIM1,  0xa000a);
+	agnx_write32(ctl, AGNX_TXM_SLBEALIM2,  0xa000a);
+	agnx_write32(ctl, AGNX_TXM_SLBEALIM3,  0xa000a);
+	agnx_write32(ctl, AGNX_TXM_SLBEALIM4,  0xa000a);
+	agnx_write32(ctl, AGNX_TXM_SLBEALIM5,  0xa000a);
+	agnx_write32(ctl, AGNX_TXM_SLBEALIM6,  0xa000a);
+	agnx_write32(ctl, AGNX_TXM_SLBEALIM7,  0xa000a);
+
+	reg = agnx_read32(ctl, AGNX_TXM_CTL);
+	reg |= 0x1400;
+	agnx_write32(ctl, AGNX_TXM_CTL, reg);
+	/* Wait for bit 0 in Control Reg to clear  */
+	udelay(80);
+	reg = agnx_read32(ctl, AGNX_TXM_CTL);
+	/* Or 0x18000 to Control reg */
+	reg = agnx_read32(ctl, AGNX_TXM_CTL);
+	reg |= 0x18000;
+	agnx_write32(ctl, AGNX_TXM_CTL, reg);
+	/* Wait for bit 0 in Control Reg to clear */
+	udelay(80);
+	reg = agnx_read32(ctl, AGNX_TXM_CTL);
+
+	/* Set Listen Interval Count to default */
+	agnx_write32(ctl, AGNX_TXM_LISINTERCNT, 0x1);
+	/* Set DTIM period count to default */
+	agnx_write32(ctl, AGNX_TXM_DTIMPERICNT, 0x2000);
+} /* tx_management_init */
+
+static void rx_management_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	AGNX_TRACE;
+
+	/* Initialize the Routing Table */
+	routing_table_init(priv);
+
+	if (priv->revid >= 3) {
+		agnx_write32(ctl, 0x2074, 0x1f171710);
+		agnx_write32(ctl, 0x2078, 0x10100d0d);
+		agnx_write32(ctl, 0x207c, 0x11111010);
+	}
+	else
+		agnx_write32(ctl, AGNX_RXM_DELAY11, 0x0);
+	agnx_write32(ctl, AGNX_RXM_REQRATE, 0x8195e00);
+}
+
+
+static void agnx_timer_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	AGNX_TRACE;
+
+/* 	/\* Write 0x249f00 (tick duration?) to Timer 1 *\/ */
+/* 	agnx_write32(ctl, AGNX_TIMCTL_TIMER1, 0x249f00); */
+/* 	/\* Write 0xe2 to Timer 1 Control *\/ */
+/* 	agnx_write32(ctl, AGNX_TIMCTL_TIM1CTL, 0xe2); */
+
+	/* Write 0x249f00 (tick duration?) to Timer 1 */
+	agnx_write32(ctl, AGNX_TIMCTL_TIMER1, 0x0);
+	/* Write 0xe2 to Timer 1 Control */
+	agnx_write32(ctl, AGNX_TIMCTL_TIM1CTL, 0x0);
+
+	iowrite32(0xFFFFFFFF, priv->ctl + AGNX_TXM_BEACON_CTL);
+}
+
+static void power_manage_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	agnx_write32(ctl, AGNX_PM_MACMSW, 0x1f);
+	agnx_write32(ctl, AGNX_PM_RFCTL, 0x1f);
+
+	reg = agnx_read32(ctl, AGNX_PM_PMCTL);
+	reg &= 0xf00f;
+	reg |= 0xa0;
+	agnx_write32(ctl, AGNX_PM_PMCTL, reg);
+
+	if (priv->revid >= 3) {
+		reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
+		reg |= 0x18;
+		agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
+	}
+} /* power_manage_init */
+
+
+static void gain_ctlcnt_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	agnx_write32(ctl, AGNX_GCR_TRACNT5, 0x119);
+	agnx_write32(ctl, AGNX_GCR_TRACNT6, 0x118);
+	agnx_write32(ctl, AGNX_GCR_TRACNT7, 0x117);
+
+	reg = agnx_read32(ctl, AGNX_PM_PMCTL);
+	reg |= 0x8;
+	agnx_write32(ctl, AGNX_PM_PMCTL, reg);
+
+	reg = agnx_read32(ctl, AGNX_PM_PMCTL);
+	reg &= ~0x8;
+	agnx_write32(ctl, AGNX_PM_PMCTL, reg);
+
+	agnx_write32(ctl, AGNX_CIR_ADDRWIN, 0x0);
+
+	/* FIXME Write the initial Station Descriptor for the card */
+	sta_init(priv, LOCAL_STAID);
+	sta_init(priv, BSSID_STAID);
+
+	/* Enable staion 0 and 1 can do TX */
+	/* It seemed if we set other bit to 1 the bit 0 will
+	   be auto change to 0 */
+	agnx_write32(ctl, AGNX_BM_TXTOPEER, 0x2 | 0x1);
+//	agnx_write32(ctl, AGNX_BM_TXTOPEER, 0x1);
+} /* gain_ctlcnt_init */
+
+
+static void phy_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	void __iomem *data = priv->data;
+	u32 reg;
+	AGNX_TRACE;
+
+	/* Load InitialGainTable */
+	gain_table_init(priv);
+
+  	agnx_write32(ctl, AGNX_CIR_ADDRWIN, 0x2000000);
+
+	/* Clear the following offsets in Memory Range #2: */
+	memset_io(data + 0x5040, 0, 0xa * 4);
+	memset_io(data + 0x5080, 0, 0xa * 4);
+	memset_io(data + 0x50c0, 0, 0xa * 4);
+	memset_io(data + 0x5400, 0, 0x80 * 4);
+	memset_io(data + 0x6000, 0, 0x280 * 4);
+	memset_io(data + 0x7000, 0, 0x280 * 4);
+	memset_io(data + 0x8000, 0, 0x280 * 4);
+
+	/* Initialize the Following Registers According to PCI Revision ID */
+	if (priv->revid == 0) {
+		/* fixme the part hasn't been update but below has been update
+		   based on WGM511 */
+		agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
+		agnx_write32(ctl, AGNX_ACI_TIMER1, 0x1d);
+		agnx_write32(ctl, AGNX_ACI_TIMER2, 0x3);
+		agnx_write32(ctl, AGNX_ACI_AICCHA0OVE, 0x11);
+		agnx_write32(ctl, AGNX_ACI_AICCHA1OVE, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THD0A, 0x64);
+		agnx_write32(ctl, AGNX_GCR_THD0AL, 0x4b);
+		agnx_write32(ctl, AGNX_GCR_THD0B, 0x4b);
+		agnx_write32(ctl, AGNX_GCR_DUNSAT, 0x14);
+		agnx_write32(ctl, AGNX_GCR_DSAT, 0x24);
+		agnx_write32(ctl, AGNX_GCR_DFIRCAL, 0x8);
+		agnx_write32(ctl, AGNX_GCR_DGCTL11A, 0x1a);
+		agnx_write32(ctl, AGNX_GCR_DGCTL11B, 0x3);
+		agnx_write32(ctl, AGNX_GCR_GAININIT, 0xd);
+		agnx_write32(ctl, AGNX_GCR_THNOSIG, 0x1);
+		agnx_write32(ctl, AGNX_GCR_COARSTEP, 0x7);
+		agnx_write32(ctl, AGNX_GCR_SIFST11A, 0x28);
+		agnx_write32(ctl, AGNX_GCR_SIFST11B, 0x28);
+		reg = agnx_read32(ctl, AGNX_GCR_CWDETEC);
+		reg |= 0x1;
+		agnx_write32(ctl, AGNX_GCR_CWDETEC, reg);
+		agnx_write32(ctl, AGNX_GCR_0X38, 0x1e);
+		agnx_write32(ctl, AGNX_GCR_BOACT, 0x26);
+		agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
+		agnx_write32(ctl, AGNX_GCR_NLISTANT, 0x3);
+		agnx_write32(ctl, AGNX_GCR_NACTIANT, 0x3);
+		agnx_write32(ctl, AGNX_GCR_NMEASANT, 0x3);
+		agnx_write32(ctl, AGNX_GCR_NCAPTANT, 0x3);
+		agnx_write32(ctl, AGNX_GCR_THCAP11A, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THCAP11B, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THCAPRX11A, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THCAPRX11B, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THLEVDRO, 0x10);
+		agnx_write32(ctl, AGNX_GCR_MAXRXTIME11A, 0x1);
+		agnx_write32(ctl, AGNX_GCR_MAXRXTIME11B, 0x1);
+		agnx_write32(ctl, AGNX_GCR_CORRTIME, 0x190);
+		agnx_write32(ctl, AGNX_GCR_SIGHTH, 0x78);
+		agnx_write32(ctl, AGNX_GCR_SIGLTH, 0x1c);
+		agnx_write32(ctl, AGNX_GCR_CORRDROP, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THCD, 0x0);
+		agnx_write32(ctl, AGNX_GCR_MAXPOWDIFF, 0x1);
+		agnx_write32(ctl, AGNX_GCR_TESTBUS, 0x0);
+		agnx_write32(ctl, AGNX_GCR_ANTCFG, 0x1f);
+		agnx_write32(ctl, AGNX_GCR_THJUMP, 0x14);
+		agnx_write32(ctl, AGNX_GCR_THPOWER, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THPOWCLIP, 0x30);
+		agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 0x32);
+		agnx_write32(ctl, AGNX_GCR_THRX11BPOWMIN, 0x19);
+		agnx_write32(ctl, AGNX_GCR_0X14c, 0x0);
+		agnx_write32(ctl, AGNX_GCR_0X150, 0x0);
+		agnx_write32(ctl, 0x9400, 0x0);
+		agnx_write32(ctl, 0x940c, 0x6ff);
+		agnx_write32(ctl, 0x9428, 0xa0);
+		agnx_write32(ctl, 0x9434, 0x0);
+		agnx_write32(ctl, 0x9c04, 0x15);
+		agnx_write32(ctl, 0x9c0c, 0x7f);
+		agnx_write32(ctl, 0x9c34, 0x0);
+		agnx_write32(ctl, 0xc000, 0x38d);
+		agnx_write32(ctl, 0x14018, 0x0);
+		agnx_write32(ctl, 0x16000, 0x1);
+		agnx_write32(ctl, 0x11004, 0x0);
+		agnx_write32(ctl, 0xec54, 0xa);
+		agnx_write32(ctl, 0xec1c, 0x5);
+	} else if (priv->revid > 0) {
+		agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
+		agnx_write32(ctl, AGNX_ACI_TIMER1, 0x21);
+		agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
+		agnx_write32(ctl, AGNX_ACI_AICCHA0OVE, 0x11);
+		agnx_write32(ctl, AGNX_ACI_AICCHA1OVE, 0x0);
+		agnx_write32(ctl, AGNX_GCR_DUNSAT, 0x14);
+		agnx_write32(ctl, AGNX_GCR_DSAT, 0x24);
+		agnx_write32(ctl, AGNX_GCR_DFIRCAL, 0x8);
+		agnx_write32(ctl, AGNX_GCR_DGCTL11A, 0x1a);
+		agnx_write32(ctl, AGNX_GCR_DGCTL11B, 0x3);
+		agnx_write32(ctl, AGNX_GCR_GAININIT, 0xd);
+		agnx_write32(ctl, AGNX_GCR_THNOSIG, 0x1);
+		agnx_write32(ctl, AGNX_GCR_COARSTEP, 0x7);
+		agnx_write32(ctl, AGNX_GCR_SIFST11A, 0x28);
+		agnx_write32(ctl, AGNX_GCR_SIFST11B, 0x28);
+		agnx_write32(ctl, AGNX_GCR_CWDETEC, 0x0);
+		agnx_write32(ctl, AGNX_GCR_0X38, 0x1e);
+//		agnx_write32(ctl, AGNX_GCR_BOACT, 0x26);
+		agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
+
+		agnx_write32(ctl, AGNX_GCR_THCAP11A, 0x32);
+		agnx_write32(ctl, AGNX_GCR_THCAP11B, 0x32);
+		agnx_write32(ctl, AGNX_GCR_THCAPRX11A, 0x32);
+		agnx_write32(ctl, AGNX_GCR_THCAPRX11B, 0x32);
+		agnx_write32(ctl, AGNX_GCR_THLEVDRO, 0x10);
+		agnx_write32(ctl, AGNX_GCR_MAXRXTIME11A, 0x1ad);
+		agnx_write32(ctl, AGNX_GCR_MAXRXTIME11B, 0xa10);
+		agnx_write32(ctl, AGNX_GCR_CORRTIME, 0x190);
+		agnx_write32(ctl, AGNX_GCR_CORRDROP, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THCD, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THCS, 0x0);
+		agnx_write32(ctl, AGNX_GCR_MAXPOWDIFF, 0x4);
+		agnx_write32(ctl, AGNX_GCR_TESTBUS, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THJUMP, 0x1e);
+		agnx_write32(ctl, AGNX_GCR_THPOWER, 0x0);
+		agnx_write32(ctl, AGNX_GCR_THPOWCLIP, 0x2a);
+		agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 0x3c);
+		agnx_write32(ctl, AGNX_GCR_THRX11BPOWMIN, 0x19);
+		agnx_write32(ctl, AGNX_GCR_0X14c, 0x0);
+		agnx_write32(ctl, AGNX_GCR_0X150, 0x0);
+		agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
+		agnx_write32(ctl, AGNX_GCR_WATCHDOG, 0x37);
+		agnx_write32(ctl, 0x9400, 0x0);
+		agnx_write32(ctl, 0x940c, 0x6ff);
+		agnx_write32(ctl, 0x9428, 0xa0);
+		agnx_write32(ctl, 0x9434, 0x0);
+		agnx_write32(ctl, 0x9c04, 0x15);
+		agnx_write32(ctl, 0x9c0c, 0x7f);
+		agnx_write32(ctl, 0x9c34, 0x0);
+		agnx_write32(ctl, 0xc000, 0x38d);
+		agnx_write32(ctl, 0x14014, 0x1000);
+		agnx_write32(ctl, 0x14018, 0x0);
+		agnx_write32(ctl, 0x16000, 0x1);
+		agnx_write32(ctl, 0x11004, 0x0);
+		agnx_write32(ctl, 0xec54, 0xa);
+		agnx_write32(ctl, 0xec1c, 0x50);
+	} else if (priv->revid > 1) {
+		reg = agnx_read32(ctl, 0xec18);
+		reg |= 0x8;
+		agnx_write32(ctl, 0xec18, reg);
+	}
+
+	/* Write the TX Fir Coefficient Table */
+	tx_fir_table_init(priv);
+
+	reg = agnx_read32(ctl, AGNX_PM_PMCTL);
+	reg &= ~0x8;
+	agnx_write32(ctl, AGNX_PM_PMCTL, reg);
+	reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
+	reg |= 0x1;
+	agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
+
+/* 	reg = agnx_read32(ctl, 0x1a030); */
+/* 	reg &= ~0x4; */
+/* 	agnx_write32(ctl, 0x1a030, reg); */
+
+	agnx_write32(ctl, AGNX_GCR_TRACNT4, 0x113);
+} /* phy_init */
+
+static void chip_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	band_management_init(priv);
+
+	rf_chips_init(priv);
+
+	reg = agnx_read32(ctl, AGNX_PM_PMCTL);
+	reg |= 0x8;
+	agnx_write32(ctl, AGNX_PM_PMCTL, reg);
+
+	/* Initialize the PHY */
+	phy_init(priv);
+
+	encryption_init(priv);
+
+	tx_management_init(priv);
+
+	rx_management_init(priv);
+
+	power_manage_init(priv);
+
+	/* Initialize the Timers */
+	agnx_timer_init(priv);
+
+	/* Write 0xc390bf9 to Interrupt Mask (Disable TX) */
+	reg = 0xc390bf9 & ~IRQ_TX_BEACON;
+	reg &= ~IRQ_TX_DISABLE;
+	agnx_write32(ctl, AGNX_INT_MASK, reg);
+
+	reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
+	reg |= 0x800;
+	agnx_write32(ctl, AGNX_CIR_BLKCTL, reg);
+
+	/* set it when need get multicast enable? */
+	agnx_write32(ctl, AGNX_BM_MTSM, 0xff);
+} /* chip_init */
+
+
+static inline void set_promis_and_managed(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x10 | 0x2);
+	agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x10 | 0x2);
+}
+static inline void set_learn_mode(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x8);
+}
+static inline void set_scan_mode(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x20);
+}
+static inline void set_promiscuous_mode(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	/* agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x210);*/
+	agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x10);
+}
+static inline void set_managed_mode(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x2);
+}
+static inline void set_adhoc_mode(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	agnx_write32(ctl, AGNX_SYSITF_SYSMODE, 0x0);
+}
+
+#if 0
+static void unknow_register_write(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x0, 0x3e);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x4, 0xb2);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x8, 0x140);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0xc, 0x1C0);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x10, 0x1FF);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x14, 0x1DD);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x18, 0x15F);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x1c, 0xA1);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x20, 0x3E7);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x24, 0x36B);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x28, 0x348);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x2c, 0x37D);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x30, 0x3DE);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x34, 0x36);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x38, 0x64);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x3c, 0x57);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x40, 0x23);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x44, 0x3ED);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x48, 0x3C9);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x4c, 0x3CA);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x50, 0x3E7);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x54, 0x8);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x58, 0x1F);
+	agnx_write32(ctl, AGNX_UNKNOWN_BASE + 0x5c, 0x1a);
+}
+#endif
+
+static void card_interface_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u8 bssid[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+	u32 reg;
+	unsigned int i;
+	AGNX_TRACE;
+
+	might_sleep();
+	/* Clear RX Control and Enable RX queues */
+	agnx_write32(ctl, AGNX_CIR_RXCTL, 0x8);
+
+	might_sleep();
+	/* Do a full reset of the card */
+	card_full_reset(priv);
+	might_sleep();
+
+	/* Check and set Card Endianness */
+	reg = ioread32(priv->ctl + AGNX_CIR_ENDIAN);
+	/* TODO If not 0xB3B2B1B0 set to 0xB3B2B1B0 */
+	printk(KERN_INFO PFX "CIR_ENDIAN is %x\n", reg);
+
+
+	/* Config the eeprom */
+	agnx_write32(ctl, AGNX_CIR_SERIALITF, 0x7000086);
+	udelay(10);
+	reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
+
+
+	agnx_write32(ctl, AGNX_PM_SOFTRST, 0x80000033);
+	reg = agnx_read32(ctl, 0xec50);
+	reg |= 0xf;
+	agnx_write32(ctl, 0xec50, reg);
+	agnx_write32(ctl, AGNX_PM_SOFTRST, 0x0);
+
+
+	reg = agnx_read32(ctl, AGNX_SYSITF_GPIOIN);
+	udelay(10);
+	reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
+
+	/* Dump the eeprom */
+	do {
+		char eeprom[0x100000/0x100];
+
+		for (i = 0; i < 0x100000; i += 0x100) {
+			agnx_write32(ctl, AGNX_CIR_SERIALITF, 0x3000000 + i);
+			udelay(13);
+			reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
+			udelay(70);
+			reg = agnx_read32(ctl, AGNX_CIR_SERIALITF);
+			eeprom[i/0x100] = reg & 0xFF;
+			udelay(10);
+		}
+		print_hex_dump_bytes(PFX "EEPROM: ", DUMP_PREFIX_NONE, eeprom,
+				     ARRAY_SIZE(eeprom));
+	} while(0);
+
+	spi_rc_write(ctl, RF_CHIP0, 0x26);
+        reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+
+	/* Initialize the system interface */
+	system_itf_init(priv);
+
+	might_sleep();
+	/* Chip Initialization (Polaris) */
+	chip_init(priv);
+	might_sleep();
+
+	/* Calibrate the antennae */
+	antenna_calibrate(priv);
+
+	reg = agnx_read32(ctl, 0xec50);
+	reg &= ~0x40;
+	agnx_write32(ctl, 0xec50, reg);
+	agnx_write32(ctl, AGNX_PM_SOFTRST, 0x0);
+	agnx_write32(ctl, AGNX_PM_PLLCTL, 0x1);
+
+	reg = agnx_read32(ctl, AGNX_BM_BMCTL);
+	reg |= 0x8000;
+	agnx_write32(ctl, AGNX_BM_BMCTL, reg);
+	enable_receiver(priv);
+	reg = agnx_read32(ctl, AGNX_SYSITF_SYSMODE);
+	reg |= 0x200;
+	agnx_write32(ctl, AGNX_SYSITF_SYSMODE, reg);
+	enable_receiver(priv);
+
+	might_sleep();
+	/* Initialize Gain Control Counts */
+	gain_ctlcnt_init(priv);
+
+	/* Write Initial Station Power Template for this station(#0) */
+	sta_power_init(priv, LOCAL_STAID);
+
+	might_sleep();
+	/* Initialize the rx,td,tm rings, for each node in the ring */
+	fill_rings(priv);
+
+	might_sleep();
+
+
+	agnx_write32(ctl, AGNX_PM_SOFTRST, 0x80000033);
+	agnx_write32(ctl, 0xec50, 0xc);
+	agnx_write32(ctl, AGNX_PM_SOFTRST, 0x0);
+
+	/* FIXME Initialize the transmit control register */
+	agnx_write32(ctl, AGNX_TXM_CTL, 0x194c1);
+
+	enable_receiver(priv);
+
+	might_sleep();
+	/* FIXME Set the Receive Control Mac Address to card address */
+	mac_address_set(priv);
+	enable_receiver(priv);
+	might_sleep();
+
+	/* Set the recieve request rate */
+	/* FIXME Enable the request */
+	/* Check packet length */
+	/* Set maximum packet length */
+/* 	agnx_write32(ctl, AGNX_RXM_REQRATE, 0x88195e00); */
+/* 	enable_receiver(priv); */
+
+	/* Set the Receiver BSSID */
+	receiver_bssid_set(priv, bssid);
+
+	/* FIXME Set to managed mode */
+	set_managed_mode(priv);
+//	set_promiscuous_mode(priv);
+/* 	set_scan_mode(priv); */
+/* 	set_learn_mode(priv); */
+// 	set_promis_and_managed(priv);
+// 	set_adhoc_mode(priv);
+
+	/* Set the recieve request rate */
+	/* Check packet length */
+	agnx_write32(ctl, AGNX_RXM_REQRATE, 0x08000000);
+	reg = agnx_read32(ctl, AGNX_RXM_REQRATE);
+	/* Set maximum packet length */
+	reg |= 0x00195e00;
+	agnx_write32(ctl, AGNX_RXM_REQRATE, reg);
+
+	/* Configure the RX and TX interrupt */
+	reg = ENABLE_RX_INTERRUPT | RX_CACHE_LINE | FRAG_LEN_2048 | FRAG_BE;
+	agnx_write32(ctl, AGNX_CIR_RXCFG, reg);
+	/* FIXME */
+	reg = ENABLE_TX_INTERRUPT | TX_CACHE_LINE | FRAG_LEN_2048 | FRAG_BE;
+	agnx_write32(ctl, AGNX_CIR_TXCFG, reg);
+
+	/* Enable RX TX Interrupts */
+	agnx_write32(ctl, AGNX_CIR_RXCTL, 0x80);
+	agnx_write32(ctl, AGNX_CIR_TXMCTL, 0x80);
+	agnx_write32(ctl, AGNX_CIR_TXDCTL, 0x80);
+
+	/* FIXME Set the master control interrupt in block control */
+	agnx_write32(ctl, AGNX_CIR_BLKCTL, 0x800);
+
+	/* Enable RX and TX queues */
+	reg = agnx_read32(ctl, AGNX_CIR_RXCTL);
+	reg |= 0x8;
+	agnx_write32(ctl, AGNX_CIR_RXCTL, reg);
+	reg = agnx_read32(ctl, AGNX_CIR_TXMCTL);
+	reg |= 0x8;
+	agnx_write32(ctl, AGNX_CIR_TXMCTL, reg);
+	reg = agnx_read32(ctl, AGNX_CIR_TXDCTL);
+	reg |= 0x8;
+	agnx_write32(ctl, AGNX_CIR_TXDCTL, reg);
+
+	agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x5);
+	/* FIXME */
+	/*  unknow_register_write(priv); */
+	/* Update local card hash entry */
+	hash_write(priv, priv->mac_addr, LOCAL_STAID);
+
+	might_sleep();
+
+	/* FIXME */
+	agnx_set_channel(priv, 1);
+	might_sleep();
+} /* agnx_card_interface_init */
+
+
+void agnx_hw_init(struct agnx_priv *priv)
+{
+	AGNX_TRACE;
+	might_sleep();
+	card_interface_init(priv);
+}
+
+int agnx_hw_reset(struct agnx_priv *priv)
+{
+	return card_full_reset(priv);
+}
+
+int agnx_set_ssid(struct agnx_priv *priv, u8 *ssid, size_t ssid_len)
+{
+	AGNX_TRACE;
+	return 0;
+}
+
+void agnx_set_bssid(struct agnx_priv *priv, u8 *bssid)
+{
+	receiver_bssid_set(priv, bssid);
+}

+ 409 - 0
drivers/staging/agnx/phy.h

@@ -0,0 +1,409 @@
+#ifndef AGNX_PHY_H_
+#define AGNX_PHY_H_
+
+#include "agnx.h"
+
+/* Transmission Managment Registers */
+#define AGNX_TXM_BASE		0x0000
+#define AGNX_TXM_CTL		0x0000	/* control register */
+#define AGNX_TXM_ETMF		0x0004 /* enable transmission management functions */
+#define AGNX_TXM_TXTEMP		0x0008 /* transmission template */
+#define AGNX_TXM_RETRYSTAID	0x000c /* Retry Station ID */
+#define AGNX_TXM_TIMESTAMPLO		0x0010	/* Timestamp Lo */
+#define AGNX_TXM_TIMESTAMPHI		0x0014	/* Timestamp Hi */
+#define AGNX_TXM_TXDELAY	0x0018  /* tx delay */
+#define AGNX_TXM_TBTTLO		0x0020	/* tbtt Lo */
+#define AGNX_TXM_TBTTHI		0x0024	/* tbtt Hi */
+#define AGNX_TXM_BEAINTER	0x0028 /* Beacon Interval */
+#define AGNX_TXM_NAV		0x0030 /* NAV */
+#define AGNX_TXM_CFPMDV		0x0034 /* CFP MDV */
+#define AGNX_TXM_CFPERCNT	0x0038 /* CFP period count */
+#define AGNX_TXM_PROBDELAY	0x003c /* probe delay */
+#define AGNX_TXM_LISINTERCNT	0x0040 /* listen interval count */
+#define AGNX_TXM_DTIMPERICNT	0x004c /* DTIM period count */
+
+#define AGNX_TXM_BEACON_CTL	0x005c /* beacon control */
+
+#define AGNX_TXM_SCHEMPCNT	0x007c /* schedule empty count */
+#define AGNX_TXM_MAXTIMOUT	0x0084 /* max timeout exceed count */
+#define AGNX_TXM_MAXCFPTIM	0x0088 /* max CF poll timeout count */
+#define AGNX_TXM_MAXRXTIME	0x008c /* max RX timeout count */
+#define AGNX_TXM_MAXACKTIM	0x0090	/* max ACK timeout count */
+#define AGNX_TXM_DIF01		0x00a0 /* DIF 0-1 */
+#define AGNX_TXM_DIF23		0x00a4 /* DIF 2-3 */
+#define AGNX_TXM_DIF45		0x00a8 /* DIF 4-5 */
+#define AGNX_TXM_DIF67		0x00ac /* DIF 6-7 */
+#define AGNX_TXM_SIFSPIFS	0x00b0 /* SIFS/PIFS */
+#define AGNX_TXM_TIFSEIFS	0x00b4 /* TIFS/EIFS */
+#define AGNX_TXM_MAXCCACNTSLOT	0x00b8 /* max CCA count slot */
+#define AGNX_TXM_SLOTLIMIT	0x00bc /* slot limit/1 msec limit */
+#define AGNX_TXM_CFPOLLRXTIM	0x00f0 /* CF poll RX timeout count */
+#define AGNX_TXM_CFACKT11B	0x00f4 /* CF ack timeout limit for 11b */
+#define AGNX_TXM_CW0		0x0100 /* CW 0 */
+#define AGNX_TXM_SLBEALIM0	0x0108 /* short/long beacon limit 0 */
+#define AGNX_TXM_CW1		0x0120 /* CW 1 */
+#define AGNX_TXM_SLBEALIM1	0x0128 /* short/long beacon limit 1 */
+#define AGNX_TXM_CW2		0x0140 /* CW 2 */
+#define AGNX_TXM_SLBEALIM2	0x0148 /* short/long beacon limit 2 */
+#define AGNX_TXM_CW3		0x0160 /* CW 3 */
+#define AGNX_TXM_SLBEALIM3	0x0168 /* short/long beacon limit 3 */
+#define AGNX_TXM_CW4		0x0180 /* CW 4 */
+#define AGNX_TXM_SLBEALIM4	0x0188 /* short/long beacon limit 4 */
+#define AGNX_TXM_CW5		0x01a0 /* CW 5 */
+#define AGNX_TXM_SLBEALIM5	0x01a8 /* short/long beacon limit 5 */
+#define AGNX_TXM_CW6		0x01c0 /* CW 6 */
+#define AGNX_TXM_SLBEALIM6	0x01c8 /* short/long beacon limit 6 */
+#define AGNX_TXM_CW7		0x01e0 /* CW 7 */
+#define AGNX_TXM_SLBEALIM7	0x01e8 /* short/long beacon limit 7 */
+#define AGNX_TXM_BEACONTEMP     0x1000	/* beacon template */
+#define AGNX_TXM_STAPOWTEMP	0x1a00 /*  Station Power Template */
+
+/* Receive Management Control Registers */
+#define AGNX_RXM_BASE		0x2000
+#define AGNX_RXM_REQRATE	0x2000	/* requested rate */
+#define AGNX_RXM_MACHI		0x2004	/* first 4 bytes of mac address */
+#define AGNX_RXM_MACLO		0x2008	/* last 2 bytes of mac address */
+#define AGNX_RXM_BSSIDHI	0x200c	/* bssid hi */
+#define AGNX_RXM_BSSIDLO	0x2010	/* bssid lo */
+#define AGNX_RXM_HASH_CMD_FLAG	0x2014	/* Flags for the RX Hash Command Default:0 */
+#define AGNX_RXM_HASH_CMD_HIGH	0x2018	/* The High half of the Hash Command */
+#define AGNX_RXM_HASH_CMD_LOW	0x201c	/* The Low half of the Hash Command */
+#define AGNX_RXM_ROUTAB		0x2020	/* routing table */
+#define		ROUTAB_SUBTYPE_SHIFT	24
+#define		ROUTAB_TYPE_SHIFT	28
+#define		ROUTAB_STATUS_SHIFT	30
+#define		ROUTAB_RW_SHIFT		31
+#define		ROUTAB_ROUTE_DROP	0xf00000 /* Drop */
+#define		ROUTAB_ROUTE_CPU	0x400000 /* CPU */
+#define		ROUTAB_ROUTE_ENCRY	0x500800 /* Encryption */
+#define		ROUTAB_ROUTE_RFP	0x800000 /* RFP */
+
+#define		ROUTAB_TYPE_MANAG	0x0 /* Management */
+#define		ROUTAB_TYPE_CTL		0x1 /* Control */
+#define		ROUTAB_TYPE_DATA	0x2 /* Data */
+
+#define		ROUTAB_SUBTYPE_DATA		0x0
+#define		ROUTAB_SUBTYPE_DATAACK		0x1
+#define		ROUTAB_SUBTYPE_DATAPOLL		0x2
+#define		ROUTAB_SUBTYPE_DATAPOLLACK	0x3
+#define		ROUTAB_SUBTYPE_NULL		0x4 /* NULL */
+#define		ROUTAB_SUBTYPE_NULLACK		0x5
+#define		ROUTAB_SUBTYPE_NULLPOLL		0x6
+#define		ROUTAB_SUBTYPE_NULLPOLLACK	0x7
+#define		ROUTAB_SUBTYPE_QOSDATA		0x8 /* QOS DATA */
+#define		ROUTAB_SUBTYPE_QOSDATAACK	0x9
+#define		ROUTAB_SUBTYPE_QOSDATAPOLL	0xa
+#define		ROUTAB_SUBTYPE_QOSDATAACKPOLL	0xb
+#define		ROUTAB_SUBTYPE_QOSNULL		0xc
+#define		ROUTAB_SUBTYPE_QOSNULLACK	0xd
+#define		ROUTAB_SUBTYPE_QOSNULLPOLL	0xe
+#define		ROUTAB_SUBTYPE_QOSNULLPOLLACK	0xf
+#define AGNX_RXM_DELAY11	   0x2024	/* delay 11(AB) */
+#define AGNX_RXM_SOF_CNT	   0x2028	/* SOF Count */
+#define AGNX_RXM_FRAG_CNT	   0x202c	/* Fragment Count*/
+#define AGNX_RXM_FCS_CNT	   0x2030	/* FCS Count */
+#define AGNX_RXM_BSSID_MISS_CNT	   0x2034	/* BSSID Miss Count */
+#define AGNX_RXM_PDU_ERR_CNT	   0x2038	/* PDU Error Count */
+#define AGNX_RXM_DEST_MISS_CNT	   0x203C	/* Destination Miss Count */
+#define AGNX_RXM_DROP_CNT	   0x2040	/* Drop Count */
+#define AGNX_RXM_ABORT_CNT	   0x2044	/* Abort Count */
+#define AGNX_RXM_RELAY_CNT	   0x2048	/* Relay Count */
+#define AGNX_RXM_HASH_MISS_CNT	   0x204c	/* Hash Miss Count */
+#define AGNX_RXM_SA_HI		   0x2050	/* Address of received packet Hi */
+#define AGNX_RXM_SA_LO		   0x2054	/* Address of received packet Lo */
+#define AGNX_RXM_HASH_DUMP_LST	   0x2100	/* Contains Hash Data */
+#define AGNX_RXM_HASH_DUMP_MST	   0x2104	/* Contains Hash Data */
+#define AGNX_RXM_HASH_DUMP_DATA    0x2108	/* The Station ID to dump */
+
+
+/* Encryption Managment */
+#define AGNX_ENCRY_BASE		0x2400
+#define AGNX_ENCRY_WEPKEY0	0x2440 /* wep key #0 */
+#define AGNX_ENCRY_WEPKEY1	0x2444 /* wep key #1 */
+#define AGNX_ENCRY_WEPKEY2	0x2448 /* wep key #2 */
+#define AGNX_ENCRY_WEPKEY3	0x244c /* wep key #3 */
+#define AGNX_ENCRY_CCMRECTL	0x2460 /* ccm replay control */
+
+
+/* Band Management Registers */
+#define AGNX_BM_BASE		0x2c00
+#define AGNX_BM_BMCTL		0x2c00  /* band management control */
+#define AGNX_BM_TXWADDR		0x2c18  /* tx workqueue address start */
+#define AGNX_BM_TXTOPEER	0x2c24	/* transmit to peers */
+#define AGNX_BM_FPLHP		0x2c2c  /* free pool list head pointer */
+#define AGNX_BM_FPLTP		0x2c30  /* free pool list tail pointer */
+#define AGNX_BM_FPCNT		0x2c34  /* free pool count */
+#define AGNX_BM_CIPDUWCNT	0x2c38  /* card interface pdu workqueue count */
+#define AGNX_BM_SPPDUWCNT	0x2c3c  /* sp pdu workqueue count */
+#define AGNX_BM_RFPPDUWCNT	0x2c40  /* rfp pdu workqueue count */
+#define AGNX_BM_RHPPDUWCNT	0x2c44  /* rhp pdu workqueue count */
+#define AGNX_BM_CIWQCTL		0x2c48 /* Card Interface WorkQueue Control */
+#define AGNX_BM_CPUTXWCTL	0x2c50  /* cpu tx workqueue control */
+#define AGNX_BM_CPURXWCTL	0x2c58  /* cpu rx workqueue control */
+#define AGNX_BM_CPULWCTL	0x2c60 /* cpu low workqueue control */
+#define AGNX_BM_CPUHWCTL	0x2c68 /* cpu high workqueue control */
+#define AGNX_BM_SPTXWCTL	0x2c70 /* sp tx workqueue control */
+#define AGNX_BM_SPRXWCTL	0x2c78 /* sp rx workqueue control */
+#define AGNX_BM_RFPWCTL		0x2c80 /* RFP workqueue control */
+#define AGNX_BM_MTSM		0x2c90 /* Multicast Transmit Station Mask */
+
+/* Card Interface Registers (32bits) */
+#define AGNX_CIR_BASE		0x3000
+#define AGNX_CIR_BLKCTL		0x3000	/* block control*/
+#define		AGNX_STAT_TX	0x1
+#define		AGNX_STAT_RX	0x2
+#define		AGNX_STAT_X	0x4
+/* Below two interrupt flags will be set by our but not CPU or the card */
+#define		AGNX_STAT_TXD	0x10
+#define		AGNX_STAT_TXM	0x20
+
+#define AGNX_CIR_ADDRWIN	0x3004	/* Addressable Windows*/
+#define AGNX_CIR_ENDIAN		0x3008  /* card endianness */
+#define AGNX_CIR_SERIALITF	0x3020	/* serial interface */
+#define AGNX_CIR_RXCFG		0x3040	/* receive config */
+#define		ENABLE_RX_INTERRUPT 0x20
+#define		RX_CACHE_LINE	    0x8
+/* the RX fragment length */
+#define		FRAG_LEN_256	0x0 /* 256B */
+#define		FRAG_LEN_512	0x1
+#define		FRAG_LEN_1024	0x2
+#define		FRAG_LEN_2048	0x3
+#define		FRAG_BE		0x10
+#define AGNX_CIR_RXCTL		0x3050	/* receive control */
+/* memory address, chipside */
+#define AGNX_CIR_RXCMSTART	0x3054	/* receive client memory start */
+#define AGNX_CIR_RXCMEND	0x3058	/* receive client memory end */
+/* memory address, pci */
+#define AGNX_CIR_RXHOSTADDR	0x3060	/* receive hostside address */
+/* memory address, chipside */
+#define AGNX_CIR_RXCLIADDR	0x3064	/* receive clientside address */
+#define AGNX_CIR_RXDMACTL	0x3068	/* receive dma control */
+#define AGNX_CIR_TXCFG		0x3080	/* transmit config */
+#define AGNX_CIR_TXMCTL		0x3090 /* Transmit Management Control */
+#define		ENABLE_TX_INTERRUPT 0x20
+#define		TX_CACHE_LINE	    0x8
+#define AGNX_CIR_TXMSTART	0x3094 /* Transmit Management Start */
+#define AGNX_CIR_TXMEND		0x3098 /* Transmit Management End */
+#define AGNX_CIR_TXDCTL		0x30a0	/* transmit data control */
+/* memeory address, chipset */
+#define AGNX_CIR_TXDSTART	0x30a4	/* transmit data start */
+#define AGNX_CIR_TXDEND		0x30a8	/* transmit data end */
+#define AGNX_CIR_TXMHADDR	0x30b0 /* Transmit Management Hostside Address */
+#define AGNX_CIR_TXMCADDR	0x30b4 /* Transmit Management Clientside Address */
+#define AGNX_CIR_TXDMACTL	0x30b8	/* transmit dma control */
+
+
+/* Power Managment Unit */
+#define AGNX_PM_BASE		0x3c00
+#define AGNX_PM_PMCTL		0x3c00	/* PM Control*/
+#define AGNX_PM_MACMSW		0x3c08 /* MAC Manual Slow Work Enable */
+#define AGNX_PM_RFCTL		0x3c0c /* RF Control */
+#define AGNX_PM_PHYMW		0x3c14	/* Phy Mannal Work */
+#define AGNX_PM_SOFTRST		0x3c18	/* PMU Soft Reset */
+#define AGNX_PM_PLLCTL		0x3c1c	/* PMU PLL control*/
+#define AGNX_PM_TESTPHY		0x3c24 /* PMU Test Phy */
+
+
+/* Interrupt Control interface */
+#define AGNX_INT_BASE		0x4000
+#define AGNX_INT_STAT		0x4000	/* interrupt status */
+#define AGNX_INT_MASK		0x400c	/* interrupt mask */
+/* FIXME */
+#define		IRQ_TX_BEACON	0x1	/* TX Beacon */
+#define		IRQ_TX_RETRY	0x8	/* TX Retry Interrupt */
+#define		IRQ_TX_ACTIVITY	0x10	/* TX Activity */
+#define		IRQ_RX_ACTIVITY	0x20	/* RX Activity */
+/* FIXME I guess that instead RX a none exist staion's packet or
+   the station hasn't been init */
+#define		IRQ_RX_X	0x40
+#define		IRQ_RX_Y	0x80	/* RX ? */
+#define		IRQ_RX_HASHHIT	0x100	/* RX Hash Hit */
+#define		IRQ_RX_FRAME	0x200	/* RX Frame */
+#define		IRQ_ERR_INT	0x400	/* Error Interrupt */
+#define		IRQ_TX_QUE_FULL	0x800	/* TX Workqueue Full */
+#define		IRQ_BANDMAN_ERR	0x10000	/* Bandwidth Management Error */
+#define		IRQ_TX_DISABLE	0x20000	/* TX Disable */
+#define		IRQ_RX_IVASESKEY 0x80000 /* RX Invalid Session Key */
+#define		IRQ_RX_KEYIDMIS	0x100000 /* RX key ID Mismatch */
+#define		IRQ_REP_THHIT	0x200000 /* Replay Threshold Hit */
+#define		IRQ_TIMER1	0x4000000 /* Timer1 */
+#define		IRQ_TIMER_CNT	0x10000000 /* Timer Count */
+#define		IRQ_PHY_FASTINT 0x20000000 /* Phy Fast Interrupt */
+#define		IRQ_PHY_SLOWINT	0x40000000 /* Phy Slow Interrupt */
+#define		IRQ_OTHER	0x80000000 /* Unknow interrupt */
+#define		AGNX_IRQ_ALL   	0xffffffff
+
+/* System Interface */
+#define AGNX_SYSITF_BASE	0x4400
+#define AGNX_SYSITF_SYSMODE	0x4400	/* system mode */
+#define AGNX_SYSITF_GPIOIN	0x4410 /* GPIO In */
+/* PIN lines for leds? */
+#define AGNX_SYSITF_GPIOUT	0x4414	/* GPIO Out */
+
+/* Timer Control */
+#define AGNX_TIMCTL_TIMER1	0x4800 /* Timer 1 */
+#define AGNX_TIMCTL_TIM1CTL	0x4808 /* Timer 1 Control */
+
+
+/* Antenna Calibration Interface */
+#define AGNX_ACI_BASE		0x5000
+#define AGNX_ACI_MODE		0x5000 /* Mode */
+#define AGNX_ACI_MEASURE	0x5004 /* Measure */
+#define AGNX_ACI_SELCHAIN	0x5008 /* Select Chain */
+#define AGNX_ACI_LEN		0x500c /* Length */
+#define AGNX_ACI_TIMER1		0x5018 /* Timer 1 */
+#define AGNX_ACI_TIMER2		0x501c /* Timer 2 */
+#define AGNX_ACI_OFFSET		0x5020 /* Offset */
+#define AGNX_ACI_STATUS		0x5030 /* Status */
+#define		CALI_IDLE	0x0
+#define		CALI_DONE	0x1
+#define		CALI_BUSY	0x2
+#define		CALI_ERR	0x3
+#define AGNX_ACI_AICCHA0OVE	0x5034 /* AIC Channel 0 Override */
+#define AGNX_ACI_AICCHA1OVE	0x5038 /* AIC Channel 1 Override */
+
+/* Gain Control Registers */
+#define AGNX_GCR_BASE		0x9000
+/* threshold of primary antenna */
+#define AGNX_GCR_THD0A		0x9000	/* threshold? D0 A */
+/* low threshold of primary antenna */
+#define AGNX_GCR_THD0AL		0x9004	/* threshold? D0 A low */
+/* threshold of secondary antenna */
+#define AGNX_GCR_THD0B		0x9008	/* threshold? D0_B */
+#define AGNX_GCR_DUNSAT		0x900c /* d unsaturated */
+#define AGNX_GCR_DSAT		0x9010 /* d saturated */
+#define AGNX_GCR_DFIRCAL	0x9014 /* D Fir/Cal */
+#define AGNX_GCR_DGCTL11A	0x9018 /* d gain control 11a */
+#define AGNX_GCR_DGCTL11B	0x901c /* d gain control 11b */
+/* strength of gain */
+#define AGNX_GCR_GAININIT	0x9020	/* gain initialization */
+#define AGNX_GCR_THNOSIG	0x9024 /* threhold no signal */
+#define AGNX_GCR_COARSTEP	0x9028 /* coarse stepping */
+#define AGNX_GCR_SIFST11A	0x902c /* sifx time 11a */
+#define AGNX_GCR_SIFST11B	0x9030 /* sifx time 11b */
+#define AGNX_GCR_CWDETEC	0x9034 /* cw detection */
+#define AGNX_GCR_0X38		0x9038 /* ???? */
+#define AGNX_GCR_BOACT		0x903c	/* BO Active */
+#define AGNX_GCR_BOINACT	0x9040	/* BO Inactive */
+#define AGNX_GCR_BODYNA		0x9044	/* BO dynamic */
+/* 802.11 mode(a,b,g) */
+#define AGNX_GCR_DISCOVMOD	0x9048	/* discovery mode */
+#define AGNX_GCR_NLISTANT	0x904c	/* number of listening antenna */
+#define AGNX_GCR_NACTIANT	0x9050	/* number of active antenna */
+#define AGNX_GCR_NMEASANT	0x9054	/* number of measuring antenna */
+#define AGNX_GCR_NCAPTANT	0x9058	/* number of capture antenna */
+#define AGNX_GCR_THCAP11A	0x905c /* threshold capture 11a */
+#define AGNX_GCR_THCAP11B	0x9060 /* threshold capture 11b */
+#define AGNX_GCR_THCAPRX11A	0x9064 /* threshold capture rx 11a */
+#define AGNX_GCR_THCAPRX11B	0x9068 /* threshold capture rx 11b */
+#define AGNX_GCR_THLEVDRO	0x906c /* threshold level drop */
+#define AGNX_GCR_GAINSET0	0x9070 /* Gainset 0 */
+#define AGNX_GCR_GAINSET1	0x9074 /* Gainset 1 */
+#define AGNX_GCR_GAINSET2	0x9078 /* Gainset 2 */
+#define AGNX_GCR_MAXRXTIME11A	0x907c /* maximum rx time 11a */
+#define AGNX_GCR_MAXRXTIME11B	0x9080 /* maximum rx time 11b */
+#define AGNX_GCR_CORRTIME	0x9084 /* correction time */
+/* reset the subsystem, 0 = disable, 1 = enable */
+#define AGNX_GCR_RSTGCTL	0x9088	/* reset gain control */
+/* channel receiving */
+#define AGNX_GCR_RXCHANEL	0x908c	/* receive channel */
+#define AGNX_GCR_NOISE0		0x9090 /* Noise 0 */
+#define AGNX_GCR_NOISE1		0x9094 /* Noise 1 */
+#define AGNX_GCR_NOISE2		0x9098 /* Noise 2 */
+#define AGNX_GCR_SIGHTH		0x909c	/* Signal High Threshold */
+#define AGNX_GCR_SIGLTH		0x90a0	/* Signal Low Threshold */
+#define AGNX_GCR_CORRDROP	0x90a4 /* correction drop */
+/* threshold of tertiay antenna */
+#define AGNX_GCR_THCD		0x90a8	/* threshold? CD */
+#define AGNX_GCR_THCS		0x90ac	/* threshold? CS */
+#define AGNX_GCR_MAXPOWDIFF	0x90b8 /* maximum power difference */
+#define AGNX_GCR_TRACNT4	0x90ec /* Transition Count 4 */
+#define AGNX_GCR_TRACNT5      	0x90f0	/* transition count 5 */
+#define AGNX_GCR_TRACNT6       	0x90f4	/* transition count 6 */
+#define AGNX_GCR_TRACNT7       	0x90f8	/* transition coutn 7 */
+#define AGNX_GCR_TESTBUS	0x911c /* test bus */
+#define AGNX_GCR_CHAINNUM	0x9120 /* Number of Chains */
+#define AGNX_GCR_ANTCFG		0x9124	/* Antenna Config */
+#define AGNX_GCR_THJUMP		0x912c /* threhold jump */
+#define AGNX_GCR_THPOWER	0x9130 /* threshold power */
+#define AGNX_GCR_THPOWCLIP	0x9134 /* threshold power clip*/
+#define AGNX_GCR_FORCECTLCLK	0x9138 /* Force Gain Control Clock */
+#define AGNX_GCR_GAINSETWRITE	0x913c /* Gainset Write */
+#define AGNX_GCR_THD0BTFEST	0x9140	/* threshold d0 b tf estimate */
+#define AGNX_GCR_THRX11BPOWMIN	0x9144	/* threshold rx 11b power minimum */
+#define AGNX_GCR_0X14c		0x914c /* ?? */
+#define AGNX_GCR_0X150		0x9150 /* ?? */
+#define AGNX_GCR_RXOVERIDE	0x9194	/* recieve override */
+#define AGNX_GCR_WATCHDOG	0x91b0	/* watchdog timeout */
+
+
+/* Spi Interface */
+#define AGNX_SPI_BASE		0xdc00
+#define AGNX_SPI_CFG		0xdc00 /* spi configuration */
+/* Only accept 16 bits */
+#define AGNX_SPI_WMSW		0xdc04	/* write most significant word */
+/* Only accept 16 bits */
+#define AGNX_SPI_WLSW		0xdc08	/* write least significant word */
+#define AGNX_SPI_CTL		0xdc0c	/* spi control */
+#define AGNX_SPI_RMSW		0xdc10 /* read most significant word */
+#define AGNX_SPI_RLSW		0xdc14 /* read least significant word */
+/* SPI Control Mask */
+#define		SPI_READ_CTL		0x4000 /* read control */
+#define		SPI_BUSY_CTL		0x8000 /* busy control */
+/* RF and synth chips in spi */
+#define		RF_CHIP0	0x400
+#define		RF_CHIP1	0x800
+#define		RF_CHIP2	0x1000
+#define		SYNTH_CHIP	0x2000
+
+/* Unknown register */
+#define AGNX_UNKNOWN_BASE	0x7800
+
+/* FIXME MonitorGain */
+#define AGNX_MONGCR_BASE	0x12000
+
+/* Gain Table */
+#define AGNX_GAIN_TABLE		0x12400
+
+/* The initial FIR coefficient table */
+#define AGNX_FIR_BASE		0x19804
+
+#define AGNX_ENGINE_LOOKUP_TBL	0x800
+
+/* eeprom commands */
+#define EEPROM_CMD_NULL		0x0 /* NULL */
+#define EEPROM_CMD_WRITE	0x2 /* write */
+#define EEPROM_CMD_READ		0x3 /* read */
+#define EEPROM_CMD_STATUSREAD	0x5 /* status register read */
+#define EEPROM_CMD_WRITEENABLE	0x6 /* write enable */
+#define EEPROM_CMD_CONFIGURE	0x7 /* configure */
+
+#define EEPROM_DATAFORCOFIGURE	0x6 /* ??? */
+
+/* eeprom address */
+#define EEPROM_ADDR_SUBVID	0x0 /* Sub Vendor ID */
+#define EEPROM_ADDR_SUBSID	0x2 /* Sub System ID */
+#define EEPROM_ADDR_MACADDR	0x146 /* MAC Address */
+#define EEPROM_ADDR_LOTYPE	0x14f /* LO type */
+
+struct agnx_eeprom {
+	u8 data;	/* date */
+	u16 address;	/* address in EEPROM */
+	u8 cmd;		/* command, unknown, status */
+}  __attribute__((__packed__));
+
+#define AGNX_EEPROM_COMMAND_SHIFT	5
+#define AGNX_EEPROM_COMMAND_STAT	0x01
+
+void disable_receiver(struct agnx_priv *priv);
+void enable_receiver(struct agnx_priv *priv);
+u8 read_from_eeprom(struct agnx_priv *priv, u16 address);
+void agnx_hw_init(struct agnx_priv *priv);
+int agnx_hw_reset(struct agnx_priv *priv);
+int agnx_set_ssid(struct agnx_priv *priv, u8 *ssid, size_t ssid_len);
+void agnx_set_bssid(struct agnx_priv *priv, u8 *bssid);
+void enable_power_saving(struct agnx_priv *priv);
+void disable_power_saving(struct agnx_priv *priv);
+void calibrate_antenna_period(unsigned long data);
+
+#endif /* AGNX_PHY_H_ */

+ 894 - 0
drivers/staging/agnx/rf.c

@@ -0,0 +1,894 @@
+/**
+ * Airgo MIMO wireless driver
+ *
+ * Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
+
+ * Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
+ * works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation;
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "agnx.h"
+#include "debug.h"
+#include "phy.h"
+#include "table.h"
+
+/* FIXME! */
+static inline void spi_write(void __iomem *region, u32 chip_ids, u32 sw,
+		      u16 size, u32 control)
+{
+	u32 reg;
+	u32 lsw = sw & 0xffff;		/* lower 16 bits of sw*/
+	u32 msw = sw >> 16;		/* high 16 bits of sw */
+
+	/* FIXME Write Most Significant Word of the 32bit data to MSW */
+	/* FIXME And Least Significant Word to LSW */
+	iowrite32((lsw), region + AGNX_SPI_WLSW);
+	iowrite32((msw), region + AGNX_SPI_WMSW);
+	reg = chip_ids | size | control;
+	/* Write chip id(s), write size and busy control to Control Register */
+	iowrite32((reg), region + AGNX_SPI_CTL);
+	/* Wait for Busy control to clear */
+	spi_delay();
+}
+
+/*
+ * Write to SPI Synth register
+ */
+static inline void spi_sy_write(void __iomem *region, u32 chip_ids, u32 sw)
+{
+	/* FIXME the size 0x15 is a magic value*/
+	spi_write(region, chip_ids, sw, 0x15, SPI_BUSY_CTL);
+}
+
+/*
+ * Write to SPI RF register
+ */
+static inline void spi_rf_write(void __iomem *region, u32 chip_ids, u32 sw)
+{
+	/* FIXME the size 0xd is a magic value*/
+	spi_write(region, chip_ids, sw, 0xd, SPI_BUSY_CTL);
+} /* spi_rf_write */
+
+/*
+ * Write to SPI with Read Control bit set
+ */
+inline void spi_rc_write(void __iomem *region, u32 chip_ids, u32 sw)
+{
+	/* FIXME the size 0xe5 is a magic value */
+	spi_write(region, chip_ids, sw, 0xe5, SPI_BUSY_CTL|SPI_READ_CTL);
+}
+
+/* Get the active chains's count */
+static int get_active_chains(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	int num = 0;
+	u32 reg;
+	AGNX_TRACE;
+
+	spi_rc_write(ctl, RF_CHIP0, 0x21);
+	reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+	if (reg == 1)
+		num++;
+
+	spi_rc_write(ctl, RF_CHIP1, 0x21);
+	reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+	if (reg == 1)
+		num++;
+
+	spi_rc_write(ctl, RF_CHIP2, 0x21);
+	reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+	if (reg == 1)
+		num++;
+
+	spi_rc_write(ctl, RF_CHIP0, 0x26);
+	reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+	if (0x33 != reg)
+		printk(KERN_WARNING PFX "Unmatched rf chips result\n");
+
+	return num;
+} /* get_active_chains */
+
+void rf_chips_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	int num;
+	AGNX_TRACE;
+
+	if (priv->revid == 1) {
+		reg = agnx_read32(ctl, AGNX_SYSITF_GPIOUT);
+		reg |= 0x8;
+		agnx_write32(ctl, AGNX_SYSITF_GPIOUT, reg);
+	}
+
+	/* Set SPI clock speed to 200NS */
+        reg = agnx_read32(ctl, AGNX_SPI_CFG);
+        reg &= ~0xF;
+        reg |= 0x3;
+        agnx_write32(ctl, AGNX_SPI_CFG, reg);
+
+        /* Set SPI clock speed to 50NS */
+	reg = agnx_read32(ctl, AGNX_SPI_CFG);
+	reg &= ~0xF;
+	reg |= 0x1;
+	agnx_write32(ctl, AGNX_SPI_CFG, reg);
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1101);
+
+	num = get_active_chains(priv);
+	printk(KERN_INFO PFX "Active chains are %d\n", num);
+
+	reg = agnx_read32(ctl, AGNX_SPI_CFG);
+	reg &= ~0xF;
+	agnx_write32(ctl, AGNX_SPI_CFG, reg);
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1908);
+} /* rf_chips_init */
+
+
+static u32 channel_tbl[15][9] = {
+	{0,  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+	{1,  0x00, 0x00, 0x624, 0x00, 0x1a4, 0x28, 0x00, 0x1e},
+	{2,  0x00, 0x00, 0x615, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
+	{3,  0x00, 0x00, 0x61a, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
+	{4,  0x00, 0x00, 0x61f, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
+	{5,  0x00, 0x00, 0x624, 0x00, 0x1ae, 0x28, 0x00, 0x1e},
+	{6,  0x00, 0x00, 0x61f, 0x00, 0x1b3, 0x28, 0x00, 0x1e},
+	{7,  0x00, 0x00, 0x624, 0x00, 0x1b3, 0x28, 0x00, 0x1e},
+	{8,  0x00, 0x00, 0x629, 0x00, 0x1b3, 0x28, 0x00, 0x1e},
+	{9,  0x00, 0x00, 0x624, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
+	{10, 0x00, 0x00, 0x629, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
+	{11, 0x00, 0x00, 0x62e, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
+	{12, 0x00, 0x00, 0x633, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
+	{13, 0x00, 0x00, 0x628, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
+	{14, 0x00, 0x00, 0x644, 0x00, 0x1b8, 0x28, 0x00, 0x1e},
+};
+
+
+static inline void
+channel_tbl_write(struct agnx_priv *priv, unsigned int channel, unsigned int reg_num)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+
+	reg = channel_tbl[channel][reg_num];
+	reg <<= 4;
+	reg |= reg_num;
+	spi_sy_write(ctl, SYNTH_CHIP, reg);
+}
+
+static void synth_freq_set(struct agnx_priv *priv, unsigned int channel)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
+
+	/* Set the Clock bits to 50NS */
+	reg = agnx_read32(ctl, AGNX_SPI_CFG);
+	reg &= ~0xF;
+	reg |= 0x1;
+	agnx_write32(ctl, AGNX_SPI_CFG, reg);
+
+	/* Write 0x00c0 to LSW and 0x3 to MSW of Synth Chip */
+	spi_sy_write(ctl, SYNTH_CHIP, 0x300c0);
+
+	spi_sy_write(ctl, SYNTH_CHIP, 0x32);
+
+	/* # Write to Register 1 on the Synth Chip */
+	channel_tbl_write(priv, channel, 1);
+	/* # Write to Register 3 on the Synth Chip */
+	channel_tbl_write(priv, channel, 3);
+	/* # Write to Register 6 on the Synth Chip */
+	channel_tbl_write(priv, channel, 6);
+	/* # Write to Register 5 on the Synth Chip */
+	channel_tbl_write(priv, channel, 5);
+	/* # Write to register 8 on the Synth Chip */
+	channel_tbl_write(priv, channel, 8);
+
+	/* FIXME Clear the clock bits */
+	reg = agnx_read32(ctl, AGNX_SPI_CFG);
+	reg &= ~0xf;
+	agnx_write32(ctl, AGNX_SPI_CFG, reg);
+} /* synth_chip_init */
+
+
+static void antenna_init(struct agnx_priv *priv, int num_antenna)
+{
+	void __iomem *ctl = priv->ctl;
+
+	switch (num_antenna) {
+	case 1:
+		agnx_write32(ctl, AGNX_GCR_NLISTANT, 1);
+		agnx_write32(ctl, AGNX_GCR_NMEASANT, 1);
+		agnx_write32(ctl, AGNX_GCR_NACTIANT, 1);
+		agnx_write32(ctl, AGNX_GCR_NCAPTANT, 1);
+
+		agnx_write32(ctl, AGNX_GCR_ANTCFG, 7);
+		agnx_write32(ctl, AGNX_GCR_BOACT, 34);
+		agnx_write32(ctl, AGNX_GCR_BOINACT, 34);
+		agnx_write32(ctl, AGNX_GCR_BODYNA, 30);
+
+		agnx_write32(ctl, AGNX_GCR_THD0A, 125);
+		agnx_write32(ctl, AGNX_GCR_THD0AL, 100);
+		agnx_write32(ctl, AGNX_GCR_THD0B, 90);
+
+		agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 80);
+		agnx_write32(ctl, AGNX_GCR_SIGHTH, 100);
+		agnx_write32(ctl, AGNX_GCR_SIGLTH, 16);
+		break;
+	case 2:
+		agnx_write32(ctl, AGNX_GCR_NLISTANT, 2);
+		agnx_write32(ctl, AGNX_GCR_NMEASANT, 2);
+		agnx_write32(ctl, AGNX_GCR_NACTIANT, 2);
+		agnx_write32(ctl, AGNX_GCR_NCAPTANT, 2);
+		agnx_write32(ctl, AGNX_GCR_ANTCFG, 15);
+		agnx_write32(ctl, AGNX_GCR_BOACT, 36);
+		agnx_write32(ctl, AGNX_GCR_BOINACT, 36);
+		agnx_write32(ctl, AGNX_GCR_BODYNA, 32);
+		agnx_write32(ctl, AGNX_GCR_THD0A, 120);
+		agnx_write32(ctl, AGNX_GCR_THD0AL, 100);
+		agnx_write32(ctl, AGNX_GCR_THD0B, 80);
+		agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 70);
+		agnx_write32(ctl, AGNX_GCR_SIGHTH, 100);
+		agnx_write32(ctl, AGNX_GCR_SIGLTH, 32);
+		break;
+	case 3:
+		agnx_write32(ctl, AGNX_GCR_NLISTANT, 3);
+		agnx_write32(ctl, AGNX_GCR_NMEASANT, 3);
+		agnx_write32(ctl, AGNX_GCR_NACTIANT, 3);
+		agnx_write32(ctl, AGNX_GCR_NCAPTANT, 3);
+		agnx_write32(ctl, AGNX_GCR_ANTCFG, 31);
+		agnx_write32(ctl, AGNX_GCR_BOACT, 36);
+		agnx_write32(ctl, AGNX_GCR_BOINACT, 36);
+		agnx_write32(ctl, AGNX_GCR_BODYNA, 32);
+		agnx_write32(ctl, AGNX_GCR_THD0A, 100);
+		agnx_write32(ctl, AGNX_GCR_THD0AL, 100);
+		agnx_write32(ctl, AGNX_GCR_THD0B, 70);
+		agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 70);
+		agnx_write32(ctl, AGNX_GCR_SIGHTH, 100);
+		agnx_write32(ctl, AGNX_GCR_SIGLTH, 48);
+//		agnx_write32(ctl, AGNX_GCR_SIGLTH, 16);
+		break;
+	default:
+		printk(KERN_WARNING PFX "Unknow antenna number\n");
+	}
+} /* antenna_init */
+
+static void chain_update(struct agnx_priv *priv, u32 chain)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	spi_rc_write(ctl, RF_CHIP0, 0x20);
+	reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+
+	if (reg == 0x4)
+		spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, reg|0x1000);
+	else if (reg != 0x0)
+     		spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, reg|0x1000);
+        else {
+		if (chain == 3 || chain == 6) {
+			spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, reg|0x1000);
+			agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
+		} else if (chain == 2 || chain == 4) {
+			spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, reg|0x1000);
+			spi_rf_write(ctl, RF_CHIP2, 0x1005);
+			agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x824);
+		} else if (chain == 1) {
+			spi_rf_write(ctl, RF_CHIP0, reg|0x1000);
+			spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1004);
+			agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0xc36);
+		}
+	}
+
+	spi_rc_write(ctl, RF_CHIP0, 0x22);
+	reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+
+	switch (reg) {
+	case 0:
+		spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1005);
+		break;
+	case 1:
+		spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
+		break;
+	case 2:
+		if (chain == 6 || chain == 4) {
+			spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1202);
+			spi_rf_write(ctl, RF_CHIP2, 0x1005);
+		} else if (chain < 3) {
+			spi_rf_write(ctl, RF_CHIP0, 0x1202);
+			spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1005);
+		}
+		break;
+	default:
+		if (chain == 3) {
+			spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1203);
+			spi_rf_write(ctl, RF_CHIP2, 0x1201);
+		} else if (chain == 2) {
+			spi_rf_write(ctl, RF_CHIP0, 0x1203);
+			spi_rf_write(ctl, RF_CHIP2, 0x1200);
+			spi_rf_write(ctl, RF_CHIP1, 0x1201);
+		} else if (chain == 1) {
+			spi_rf_write(ctl, RF_CHIP0, 0x1203);
+			spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1200);
+		} else if (chain == 4) {
+			spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1203);
+			spi_rf_write(ctl, RF_CHIP2, 0x1201);
+		} else {
+			spi_rf_write(ctl, RF_CHIP0, 0x1203);
+			spi_rf_write(ctl, RF_CHIP1|RF_CHIP2, 0x1201);
+		}
+	}
+} /* chain_update */
+
+static void antenna_config(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	/* Write 0x0 to the TX Management Control Register Enable bit */
+	reg = agnx_read32(ctl, AGNX_TXM_CTL);
+	reg &= ~0x1;
+	agnx_write32(ctl, AGNX_TXM_CTL, reg);
+
+	/* FIXME */
+	/* Set initial value based on number of Antennae */
+	antenna_init(priv, 3);
+
+	/* FIXME Update Power Templates for current valid Stations */
+	/* sta_power_init(priv, 0);*/
+
+	/* FIXME the number of chains should get from eeprom*/
+	chain_update(priv, AGNX_CHAINS_MAX);
+} /* antenna_config */
+
+void calibrate_oscillator(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	spi_rc_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
+	reg = agnx_read32(ctl, AGNX_GCR_GAINSET1);
+	reg |= 0x10;
+	agnx_write32(ctl, AGNX_GCR_GAINSET1, reg);
+
+	agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 1);
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 1);
+
+	agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
+
+	agnx_write32(ctl, AGNX_ACI_TIMER1, 0x27);
+	agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
+	/* (Residual DC Calibration) to Calibration Mode */
+	agnx_write32(ctl, AGNX_ACI_MODE, 0x2);
+
+	spi_rc_write(ctl, RF_CHIP0|RF_CHIP1, 0x1004);
+	agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
+	/* (TX LO Calibration) to Calibration Mode */
+	agnx_write32(ctl, AGNX_ACI_MODE, 0x4);
+
+	do {
+		u32  reg1, reg2, reg3;
+		/* Enable Power Saving Control */
+		enable_power_saving(priv);
+		/* Save the following registers to restore */
+		reg1 = ioread32(ctl + 0x11000);
+		reg2 = ioread32(ctl + 0xec50);
+		reg3 = ioread32(ctl + 0xec54);
+		wmb();
+
+		agnx_write32(ctl, 0x11000, 0xcfdf);
+		agnx_write32(ctl, 0xec50, 0x70);
+		/* Restore the registers */
+		agnx_write32(ctl, 0x11000, reg1);
+		agnx_write32(ctl, 0xec50, reg2);
+		agnx_write32(ctl, 0xec54, reg3);
+		/* Disable Power Saving Control */
+		disable_power_saving(priv);
+	} while (0);
+
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0);
+} /* calibrate_oscillator */
+
+
+static void radio_channel_set(struct agnx_priv *priv, unsigned int channel)
+{
+	void __iomem *ctl = priv->ctl;
+	unsigned int freq = priv->band.channels[channel - 1].center_freq;
+	u32 reg;
+	AGNX_TRACE;
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
+	/* Set SPI Clock to 50 Ns */
+	reg = agnx_read32(ctl, AGNX_SPI_CFG);
+	reg &= ~0xF;
+	reg |= 0x1;
+	agnx_write32(ctl, AGNX_SPI_CFG, reg);
+
+	/* Clear the Disable Tx interrupt bit in Interrupt Mask */
+/* 	reg = agnx_read32(ctl, AGNX_INT_MASK); */
+/* 	reg &= ~IRQ_TX_DISABLE; */
+/* 	agnx_write32(ctl, AGNX_INT_MASK, reg); */
+
+	/* Band Selection */
+	reg = agnx_read32(ctl, AGNX_SYSITF_GPIOUT);
+	reg |= 0x8;
+	agnx_write32(ctl, AGNX_SYSITF_GPIOUT, reg);
+
+	/* FIXME Set the SiLabs Chip Frequency */
+	synth_freq_set(priv, channel);
+
+	reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
+	reg |= 0x80100030;
+	agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
+	reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
+	reg |= 0x20009;
+	agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
+
+	agnx_write32(ctl, AGNX_SYSITF_GPIOUT, 0x5);
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1100);
+
+	/* Load the MonitorGain Table */
+	monitor_gain_table_init(priv);
+
+	/* Load the TX Fir table */
+	tx_fir_table_init(priv);
+
+	reg = agnx_read32(ctl, AGNX_PM_PMCTL);
+	reg |= 0x8;
+	agnx_write32(ctl, AGNX_PM_PMCTL, reg);
+
+	spi_rc_write(ctl, RF_CHIP0|RF_CHIP1, 0x22);
+	udelay(80);
+	reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+
+
+	agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0xff);
+	agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
+
+	reg = agnx_read32(ctl, 0xec50);
+	reg |= 0x4f;
+	agnx_write32(ctl, 0xec50, reg);
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
+	agnx_write32(ctl, 0x11008, 0x1);
+	agnx_write32(ctl, 0x1100c, 0x0);
+	agnx_write32(ctl, 0x11008, 0x0);
+	agnx_write32(ctl, 0xec50, 0xc);
+
+	agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
+	agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
+	agnx_write32(ctl, 0x11010, 0x6e);
+	agnx_write32(ctl, 0x11014, 0x6c);
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1201);
+
+	/* Calibrate the Antenna */
+	/* antenna_calibrate(priv); */
+	/* Calibrate the TxLocalOscillator */
+	calibrate_oscillator(priv);
+
+	reg = agnx_read32(ctl, AGNX_PM_PMCTL);
+	reg &= ~0x8;
+	agnx_write32(ctl, AGNX_PM_PMCTL, reg);
+	agnx_write32(ctl, AGNX_GCR_GAININIT, 0xa);
+	agnx_write32(ctl, AGNX_GCR_THCD, 0x0);
+
+	agnx_write32(ctl, 0x11018, 0xb);
+	agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x0);
+
+	/* Write Frequency to Gain Control Channel */
+	agnx_write32(ctl, AGNX_GCR_RXCHANEL, freq);
+	/* Write 0x140000/Freq to 0x9c08 */
+	reg = 0x140000/freq;
+	agnx_write32(ctl, 0x9c08, reg);
+
+	reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
+	reg &= ~0x80100030;
+	agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
+
+	reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
+	reg &= ~0x20009;
+	reg |= 0x1;
+	agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
+
+	agnx_write32(ctl, AGNX_ACI_MODE, 0x0);
+
+/* FIXME According to Number of Chains: */
+
+/* 			   1. 1: */
+/*          1. Write 0x1203 to RF Chip 0 */
+/*          2. Write 0x1200 to RF Chips 1 +2  */
+/* 			   2. 2: */
+/*          1. Write 0x1203 to RF Chip 0 */
+/*          2. Write 0x1200 to RF Chip 2 */
+/*          3. Write 0x1201 to RF Chip 1  */
+/* 			   3. 3: */
+/*          1. Write 0x1203 to RF Chip 0 */
+/*          2. Write 0x1201 to RF Chip 1 + 2  */
+/* 			   4. 4: */
+/*          1. Write 0x1203 to RF Chip 0 + 1 */
+/*          2. Write 0x1200 to RF Chip 2  */
+
+/* 			   5. 6: */
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1, 0x1203);
+	spi_rf_write(ctl, RF_CHIP2, 0x1201);
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1000);
+	agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
+
+	/* FIXME Set the Disable Tx interrupt bit in Interrupt Mask
+	   (Or 0x20000 to Interrupt Mask) */
+/* 	reg = agnx_read32(ctl, AGNX_INT_MASK); */
+/* 	reg |= IRQ_TX_DISABLE; */
+/* 	agnx_write32(ctl, AGNX_INT_MASK, reg); */
+
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
+
+	/* Configure the Antenna */
+	antenna_config(priv);
+
+	/* Write 0x0 to Discovery Mode Enable detect G, B, A packet? */
+	agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0);
+
+	reg = agnx_read32(ctl, AGNX_RXM_REQRATE);
+	reg |= 0x80000000;
+	agnx_write32(ctl, AGNX_RXM_REQRATE, reg);
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
+
+	/* enable radio on and the power LED */
+	reg = agnx_read32(ctl, AGNX_SYSITF_GPIOUT);
+	reg &= ~0x1;
+	reg |= 0x2;
+	agnx_write32(ctl, AGNX_SYSITF_GPIOUT, reg);
+
+	reg = agnx_read32(ctl, AGNX_TXM_CTL);
+	reg |= 0x1;
+	agnx_write32(ctl, AGNX_TXM_CTL, reg);
+} /* radio_channel_set */
+
+static void base_band_filter_calibrate(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1700);
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1001);
+	agnx_write32(ctl, AGNX_GCR_FORCECTLCLK, 0x0);
+	spi_rc_write(ctl, RF_CHIP0, 0x27);
+	spi_rc_write(ctl, RF_CHIP1, 0x27);
+	spi_rc_write(ctl, RF_CHIP2, 0x27);
+	agnx_write32(ctl, AGNX_GCR_FORCECTLCLK, 0x1);
+}
+
+static void print_offset(struct agnx_priv *priv, u32 chain)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 offset;
+
+	iowrite32((chain), ctl + AGNX_ACI_SELCHAIN);
+	udelay(10);
+	offset = (ioread32(ctl + AGNX_ACI_OFFSET));
+	printk(PFX "Chain is 0x%x, Offset is 0x%x\n", chain, offset);
+}
+
+void print_offsets(struct agnx_priv *priv)
+{
+	print_offset(priv, 0);
+	print_offset(priv, 4);
+	print_offset(priv, 1);
+	print_offset(priv, 5);
+	print_offset(priv, 2);
+	print_offset(priv, 6);
+}
+
+
+struct chains {
+	u32 cali;		/* calibrate  value*/
+
+#define  NEED_CALIBRATE		0
+#define  SUCCESS_CALIBRATE	1
+	int status;
+};
+
+static void chain_calibrate(struct agnx_priv *priv, struct chains *chains,
+			    unsigned int num)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 calibra = chains[num].cali;
+
+	if (num < 3)
+		calibra |= 0x1400;
+	else
+		calibra |= 0x1500;
+
+	switch (num) {
+	case 0:
+	case 4:
+		spi_rf_write(ctl, RF_CHIP0, calibra);
+		break;
+	case 1:
+	case 5:
+		spi_rf_write(ctl, RF_CHIP1, calibra);
+		break;
+	case 2:
+	case 6:
+		spi_rf_write(ctl, RF_CHIP2, calibra);
+		break;
+	default:
+		BUG();
+	}
+} /* chain_calibrate */
+
+
+static void inline get_calibrete_value(struct agnx_priv *priv, struct chains *chains,
+				       unsigned int num)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 offset;
+
+	iowrite32((num), ctl + AGNX_ACI_SELCHAIN);
+	/* FIXME */
+	udelay(10);
+	offset = (ioread32(ctl + AGNX_ACI_OFFSET));
+
+	if (offset < 0xf) {
+		chains[num].status = SUCCESS_CALIBRATE;
+		return;
+	}
+
+	if (num == 0 || num == 1 || num == 2) {
+		if ( 0 == chains[num].cali)
+			chains[num].cali = 0xff;
+		else
+			chains[num].cali--;
+	} else
+		chains[num].cali++;
+
+	chains[num].status = NEED_CALIBRATE;
+}
+
+static inline void calibra_delay(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	unsigned int i = 100;
+
+	wmb();
+	while (i--) {
+		reg = (ioread32(ctl + AGNX_ACI_STATUS));
+		if (reg == 0x4000)
+			break;
+		udelay(10);
+	}
+	if (!i)
+		printk(PFX "calibration failed\n");
+}
+
+void do_calibration(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	struct chains chains[7];
+	unsigned int i, j;
+	AGNX_TRACE;
+
+	for (i = 0; i < 7; i++) {
+		if (i == 3)
+			continue;
+
+		chains[i].cali = 0x7f;
+		chains[i].status = NEED_CALIBRATE;
+	}
+
+	/* FIXME 0x300 is a magic number */
+	for (j = 0; j < 0x300; j++) {
+		if (chains[0].status == SUCCESS_CALIBRATE &&
+		    chains[1].status == SUCCESS_CALIBRATE &&
+		    chains[2].status == SUCCESS_CALIBRATE &&
+		    chains[4].status == SUCCESS_CALIBRATE &&
+		    chains[5].status == SUCCESS_CALIBRATE &&
+		    chains[6].status == SUCCESS_CALIBRATE)
+			break;
+
+		/* Attention, there is no chain 3 */
+		for (i = 0; i < 7; i++) {
+			if (i == 3)
+				continue;
+			if (chains[i].status == NEED_CALIBRATE)
+				chain_calibrate(priv, chains, i);
+		}
+		/* Write 0x1 to Calibration Measure */
+		iowrite32((0x1), ctl + AGNX_ACI_MEASURE);
+		calibra_delay(priv);
+
+		for (i = 0; i < 7; i++) {
+			if (i == 3)
+				continue;
+
+			get_calibrete_value(priv, chains, i);
+		}
+	}
+	printk(PFX "Clibrate times is %d\n", j);
+	print_offsets(priv);
+} /* do_calibration */
+
+void antenna_calibrate(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+	AGNX_TRACE;
+
+	agnx_write32(ctl, AGNX_GCR_NLISTANT, 0x3);
+	agnx_write32(ctl, AGNX_GCR_NMEASANT, 0x3);
+	agnx_write32(ctl, AGNX_GCR_NACTIANT, 0x3);
+	agnx_write32(ctl, AGNX_GCR_NCAPTANT, 0x3);
+
+	agnx_write32(ctl, AGNX_GCR_ANTCFG, 0x1f);
+	agnx_write32(ctl, AGNX_GCR_BOACT, 0x24);
+	agnx_write32(ctl, AGNX_GCR_BOINACT, 0x24);
+	agnx_write32(ctl, AGNX_GCR_BODYNA, 0x20);
+	agnx_write32(ctl, AGNX_GCR_THD0A, 0x64);
+	agnx_write32(ctl, AGNX_GCR_THD0AL, 0x64);
+	agnx_write32(ctl, AGNX_GCR_THD0B, 0x46);
+	agnx_write32(ctl, AGNX_GCR_THD0BTFEST, 0x3c);
+	agnx_write32(ctl, AGNX_GCR_SIGHTH, 0x64);
+	agnx_write32(ctl, AGNX_GCR_SIGLTH, 0x30);
+
+	spi_rc_write(ctl, RF_CHIP0, 0x20);
+	/* Fixme */
+	udelay(80);
+	/*    1. Should read 0x0  */
+	reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+	if (0x0 != reg)
+		printk(KERN_WARNING PFX "Unmatched rf chips result\n");
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1000);
+
+	agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
+
+	spi_rc_write(ctl, RF_CHIP0, 0x22);
+	udelay(80);
+	reg = agnx_read32(ctl, AGNX_SPI_RLSW);
+	if (0x0 != reg)
+		printk(KERN_WARNING PFX "Unmatched rf chips result\n");
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1005);
+
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x1);
+	agnx_write32(ctl, AGNX_GCR_RSTGCTL, 0x0);
+
+	reg = agnx_read32(ctl, AGNX_PM_SOFTRST);
+	reg |= 0x1c000032;
+	agnx_write32(ctl, AGNX_PM_SOFTRST, reg);
+	reg = agnx_read32(ctl, AGNX_PM_PLLCTL);
+	reg |= 0x0003f07;
+	agnx_write32(ctl, AGNX_PM_PLLCTL, reg);
+
+	reg = agnx_read32(ctl, 0xec50);
+	reg |= 0x40;
+	agnx_write32(ctl, 0xec50, reg);
+
+	agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0xff8);
+	agnx_write32(ctl, AGNX_GCR_DISCOVMOD, 0x3);
+
+	agnx_write32(ctl, AGNX_GCR_CHAINNUM, 0x6);
+	agnx_write32(ctl, 0x19874, 0x0);
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1700);
+
+	/* Calibrate the BaseBandFilter */
+	base_band_filter_calibrate(priv);
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1002);
+
+	agnx_write32(ctl, AGNX_GCR_GAINSET0, 0x1d);
+	agnx_write32(ctl, AGNX_GCR_GAINSET1, 0x1d);
+	agnx_write32(ctl, AGNX_GCR_GAINSET2, 0x1d);
+	agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x1);
+
+	agnx_write32(ctl, AGNX_ACI_MODE, 0x1);
+	agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
+
+	agnx_write32(ctl, AGNX_ACI_TIMER1, 0x27);
+	agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
+
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1400);
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1500);
+
+	/* Measure Calibration */
+	agnx_write32(ctl, AGNX_ACI_MEASURE, 0x1);
+	calibra_delay(priv);
+
+	/* do calibration */
+	do_calibration(priv);
+
+	agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
+	agnx_write32(ctl, AGNX_ACI_TIMER1, 0x21);
+	agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
+	agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
+
+	reg = agnx_read32(ctl, AGNX_GCR_GAINSET0);
+	reg &= 0xf;
+	agnx_write32(ctl, AGNX_GCR_GAINSET0, reg);
+	reg = agnx_read32(ctl, AGNX_GCR_GAINSET1);
+	reg &= 0xf;
+	agnx_write32(ctl, AGNX_GCR_GAINSET1, reg);
+	reg = agnx_read32(ctl, AGNX_GCR_GAINSET2);
+	reg &= 0xf;
+	agnx_write32(ctl, AGNX_GCR_GAINSET2, reg);
+
+	agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x0);
+	disable_receiver(priv);
+} /* antenna_calibrate */
+
+void __antenna_calibrate(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+
+	/* Calibrate the BaseBandFilter */
+	/* base_band_filter_calibrate(priv); */
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1002);
+
+
+	agnx_write32(ctl, AGNX_GCR_GAINSET0, 0x1d);
+	agnx_write32(ctl, AGNX_GCR_GAINSET1, 0x1d);
+	agnx_write32(ctl, AGNX_GCR_GAINSET2, 0x1d);
+
+	agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x1);
+
+	agnx_write32(ctl, AGNX_ACI_MODE, 0x1);
+	agnx_write32(ctl, AGNX_ACI_LEN, 0x3ff);
+
+
+	agnx_write32(ctl, AGNX_ACI_TIMER1, 0x27);
+	agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1400);
+	spi_rf_write(ctl, RF_CHIP0|RF_CHIP1|RF_CHIP2, 0x1500);
+	/* Measure Calibration */
+	agnx_write32(ctl, AGNX_ACI_MEASURE, 0x1);
+	calibra_delay(priv);
+	do_calibration(priv);
+	agnx_write32(ctl, AGNX_GCR_RXOVERIDE, 0x0);
+
+	agnx_write32(ctl, AGNX_ACI_TIMER1, 0x21);
+	agnx_write32(ctl, AGNX_ACI_TIMER2, 0x27);
+
+	agnx_write32(ctl, AGNX_ACI_LEN, 0xf);
+
+	reg = agnx_read32(ctl, AGNX_GCR_GAINSET0);
+	reg &= 0xf;
+	agnx_write32(ctl, AGNX_GCR_GAINSET0, reg);
+	reg = agnx_read32(ctl, AGNX_GCR_GAINSET1);
+	reg &= 0xf;
+	agnx_write32(ctl, AGNX_GCR_GAINSET1, reg);
+	reg = agnx_read32(ctl, AGNX_GCR_GAINSET2);
+	reg &= 0xf;
+	agnx_write32(ctl, AGNX_GCR_GAINSET2, reg);
+
+
+	agnx_write32(ctl, AGNX_GCR_GAINSETWRITE, 0x0);
+
+	/* Write 0x3 Gain Control Discovery Mode */
+	enable_receiver(priv);
+}
+
+int agnx_set_channel(struct agnx_priv *priv, unsigned int channel)
+{
+	AGNX_TRACE;
+
+	printk(KERN_ERR PFX "Channel is %d %s\n", channel, __func__);
+	radio_channel_set(priv, channel);
+	return 0;
+}

+ 219 - 0
drivers/staging/agnx/sta.c

@@ -0,0 +1,219 @@
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include "phy.h"
+#include "sta.h"
+#include "debug.h"
+
+void hash_read(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id)
+{
+	void __iomem *ctl = priv->ctl;
+
+	reglo &= 0xFFFF;
+	reglo |= 0x30000000;
+	reglo |= 0x40000000;	/* Set status busy */
+	reglo |= sta_id << 16;
+
+	iowrite32(0, ctl + AGNX_RXM_HASH_CMD_FLAG);
+	iowrite32(reghi, ctl + AGNX_RXM_HASH_CMD_HIGH);
+	iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
+
+	reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_HIGH);
+        reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
+	printk(PFX "RX hash cmd are : %.8x%.8x\n", reghi, reglo);
+}
+
+void hash_write(struct agnx_priv *priv, u8 *mac_addr, u8 sta_id)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reghi, reglo;
+
+	if (!is_valid_ether_addr(mac_addr))
+		printk(KERN_WARNING PFX "Update hash table: Invalid hwaddr!\n");
+
+	reghi = mac_addr[0] << 24 | mac_addr[1] << 16 | mac_addr[2] << 8 | mac_addr[3];
+	reglo = mac_addr[4] << 8 | mac_addr[5];
+	reglo |= 0x10000000;	/* Set hash commmand */
+	reglo |= 0x40000000;	/* Set status busy */
+	reglo |= sta_id << 16;
+
+	iowrite32(0, ctl + AGNX_RXM_HASH_CMD_FLAG);
+	iowrite32(reghi, ctl + AGNX_RXM_HASH_CMD_HIGH);
+	iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
+
+        reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
+	if (!(reglo & 0x80000000))
+		printk(KERN_WARNING PFX "Update hash table failed\n");
+}
+
+void hash_delete(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id)
+{
+	void __iomem *ctl = priv->ctl;
+
+	reglo &= 0xFFFF;
+	reglo |= 0x20000000;
+	reglo |= 0x40000000;	/* Set status busy */
+	reglo |= sta_id << 16;
+
+	iowrite32(0, ctl + AGNX_RXM_HASH_CMD_FLAG);
+	iowrite32(reghi, ctl + AGNX_RXM_HASH_CMD_HIGH);
+	iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
+	reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_HIGH);
+
+        reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
+	printk(PFX "RX hash cmd are : %.8x%.8x\n", reghi, reglo);
+
+}
+
+void hash_dump(struct agnx_priv *priv, u8 sta_id)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reghi, reglo;
+
+	reglo = 0x0;		/* dump command */
+	reglo|= 0x40000000;  	/* status bit */
+	iowrite32(reglo, ctl + AGNX_RXM_HASH_CMD_LOW);
+	iowrite32(sta_id << 16, ctl + AGNX_RXM_HASH_DUMP_DATA);
+
+	udelay(80);
+
+	reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_HIGH);
+        reglo = ioread32(ctl + AGNX_RXM_HASH_CMD_LOW);
+	printk(PFX "hash cmd are : %.8x%.8x\n", reghi, reglo);
+	reghi = ioread32(ctl + AGNX_RXM_HASH_CMD_FLAG);
+	printk(PFX "hash flag is : %.8x\n", reghi);
+	reghi = ioread32(ctl + AGNX_RXM_HASH_DUMP_MST);
+	reglo = ioread32(ctl + AGNX_RXM_HASH_DUMP_LST);
+	printk(PFX "hash dump mst lst: %.8x%.8x\n", reghi, reglo);
+	reghi = ioread32(ctl + AGNX_RXM_HASH_DUMP_DATA);
+	printk(PFX "hash dump data: %.8x\n", reghi);
+}
+
+void get_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx)
+{
+	void __iomem *ctl = priv->ctl;
+        memcpy_fromio(power, ctl + AGNX_TXM_STAPOWTEMP + sizeof(*power) * sta_idx,
+		      sizeof(*power));
+}
+
+inline void
+set_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx)
+{
+	void __iomem *ctl = priv->ctl;
+	/* FIXME   2. Write Template to offset + station number  */
+        memcpy_toio(ctl + AGNX_TXM_STAPOWTEMP + sizeof(*power) * sta_idx,
+		    power, sizeof(*power));
+}
+
+
+void get_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
+		   unsigned int sta_idx, unsigned int wq_idx)
+{
+	void __iomem *data = priv->data;
+	memcpy_fromio(tx_wq, data + AGNX_PDU_TX_WQ + sizeof(*tx_wq) * STA_TX_WQ_NUM * sta_idx +
+		      sizeof(*tx_wq) * wq_idx,  sizeof(*tx_wq));
+
+}
+
+inline void set_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
+		   unsigned int sta_idx, unsigned int wq_idx)
+{
+	void __iomem *data = priv->data;
+	memcpy_toio(data + AGNX_PDU_TX_WQ + sizeof(*tx_wq) * STA_TX_WQ_NUM * sta_idx +
+		    sizeof(*tx_wq) * wq_idx, tx_wq, sizeof(*tx_wq));
+}
+
+
+void get_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx)
+{
+	void __iomem *data = priv->data;
+
+	memcpy_fromio(sta, data + AGNX_PDUPOOL + sizeof(*sta) * sta_idx,
+		      sizeof(*sta));
+}
+
+inline void set_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx)
+{
+	void __iomem *data = priv->data;
+
+        memcpy_toio(data + AGNX_PDUPOOL + sizeof(*sta) * sta_idx,
+		    sta, sizeof(*sta));
+}
+
+/* FIXME */
+void sta_power_init(struct agnx_priv *priv, unsigned int sta_idx)
+{
+	struct agnx_sta_power power;
+	u32 reg;
+	AGNX_TRACE;
+
+	memset(&power, 0, sizeof(power));
+	reg = agnx_set_bits(EDCF, EDCF_SHIFT, 0x1);
+	power.reg = cpu_to_le32(reg);
+	set_sta_power(priv, &power, sta_idx);
+	udelay(40);
+} /* add_power_template */
+
+
+/* @num: The #number of station that is visible to the card */
+static void sta_tx_workqueue_init(struct agnx_priv *priv, unsigned int sta_idx)
+{
+	struct agnx_sta_tx_wq tx_wq;
+	u32 reg;
+	unsigned int i;
+
+	memset(&tx_wq, 0, sizeof(tx_wq));
+
+	reg = agnx_set_bits(WORK_QUEUE_VALID, WORK_QUEUE_VALID_SHIFT, 1);
+	reg |= agnx_set_bits(WORK_QUEUE_ACK_TYPE, WORK_QUEUE_ACK_TYPE_SHIFT, 1);
+//	reg |= agnx_set_bits(WORK_QUEUE_ACK_TYPE, WORK_QUEUE_ACK_TYPE_SHIFT, 0);
+	tx_wq.reg2 |= cpu_to_le32(reg);
+
+	/* Suppose all 8 traffic class are used */
+	for (i = 0; i < STA_TX_WQ_NUM; i++)
+		set_sta_tx_wq(priv, &tx_wq, sta_idx, i);
+} /* sta_tx_workqueue_init */
+
+
+static void sta_traffic_init(struct agnx_sta_traffic *traffic)
+{
+	u32 reg;
+	memset(traffic, 0, sizeof(*traffic));
+
+	reg = agnx_set_bits(NEW_PACKET, NEW_PACKET_SHIFT, 1);
+	reg |= agnx_set_bits(TRAFFIC_VALID, TRAFFIC_VALID_SHIFT, 1);
+//	reg |= agnx_set_bits(TRAFFIC_ACK_TYPE, TRAFFIC_ACK_TYPE_SHIFT, 1);
+	traffic->reg0 = cpu_to_le32(reg);
+
+	/* 	3. setting RX Sequence Number to 4095 */
+	reg = agnx_set_bits(RX_SEQUENCE_NUM, RX_SEQUENCE_NUM_SHIFT, 4095);
+	traffic->reg1 = cpu_to_le32(reg);
+}
+
+
+/* @num: The #number of station that is visible to the card */
+void sta_init(struct agnx_priv *priv, unsigned int sta_idx)
+{
+	/* FIXME the length of sta is 256 bytes Is that
+	 * dangerous to stack overflow? */
+	struct agnx_sta sta;
+	u32 reg;
+	int i;
+
+	memset(&sta, 0, sizeof(sta));
+	/* Set valid to 1 */
+	reg = agnx_set_bits(STATION_VALID, STATION_VALID_SHIFT, 1);
+	/* Set Enable Concatenation to 0 (?) */
+	reg |= agnx_set_bits(ENABLE_CONCATENATION, ENABLE_CONCATENATION_SHIFT, 0);
+	/* Set Enable Decompression to 0 (?) */
+	reg |= agnx_set_bits(ENABLE_DECOMPRESSION, ENABLE_DECOMPRESSION_SHIFT, 0);
+	sta.reg = cpu_to_le32(reg);
+
+	/* Initialize each of the Traffic Class Structures by: */
+	for (i = 0; i < 8; i++)
+		sta_traffic_init(sta.traffic + i);
+
+	set_sta(priv, &sta, sta_idx);
+	sta_tx_workqueue_init(priv, sta_idx);
+} /* sta_descriptor_init */
+
+

+ 222 - 0
drivers/staging/agnx/sta.h

@@ -0,0 +1,222 @@
+#ifndef AGNX_STA_H_
+#define AGNX_STA_H_
+
+#define STA_TX_WQ_NUM	8	/* The number of TX workqueue one STA has */
+
+struct agnx_hash_cmd {
+	__be32 cmdhi;
+#define MACLO		0xFFFF0000
+#define MACLO_SHIFT	16
+#define STA_ID		0x0000FFF0
+#define STA_ID_SHIFT	4
+#define CMD		0x0000000C
+#define CMD_SHIFT	2
+#define STATUS		0x00000002
+#define STATUS_SHIFT	1
+#define PASS		0x00000001
+#define PASS_SHIFT	1
+	__be32 cmdlo;
+}__attribute__((__packed__));
+
+
+/*
+ * Station Power Template
+ * FIXME Just for agn100 yet
+ */
+struct agnx_sta_power {
+	__le32 reg;
+#define SIGNAL			0x000000FF /* signal */
+#define SIGNAL_SHIFT		0
+#define RATE			0x00000F00
+#define RATE_SHIFT		8
+#define TIFS			0x00001000
+#define TIFS_SHIFT		12
+#define EDCF			0x00002000
+#define EDCF_SHIFT		13
+#define CHANNEL_BOND		0x00004000
+#define CHANNEL_BOND_SHIFT	14
+#define PHY_MODE		0x00038000
+#define PHY_MODE_SHIFT		15
+#define POWER_LEVEL		0x007C0000
+#define POWER_LEVEL_SHIFT	18
+#define NUM_TRANSMITTERS	0x00800000
+#define NUM_TRANSMITTERS_SHIFT	23
+} __attribute__((__packed__));
+
+/*
+ * TX Workqueue Descriptor
+ */
+struct agnx_sta_tx_wq {
+	__le32 reg0;
+#define HEAD_POINTER_LOW	0xFF000000 /* Head pointer low */
+#define HEAD_POINTER_LOW_SHIFT	24
+#define TAIL_POINTER		0x00FFFFFF /* Tail pointer */
+#define TAIL_POINTER_SHIFT	0
+
+	__le32 reg3;
+#define ACK_POINTER_LOW	        0xFFFF0000	/* ACK pointer low */
+#define ACK_POINTER_LOW_SHIFT	16
+#define HEAD_POINTER_HIGH	0x0000FFFF	/* Head pointer high */
+#define HEAD_POINTER_HIGH_SHIFT	0
+
+	__le32 reg1;
+/* ACK timeout tail packet count */
+#define ACK_TIMOUT_TAIL_PACK_CNT	0xFFF00000
+#define ACK_TIMOUT_TAIL_PACK_CNT_SHIFT	20
+/* Head timeout tail packet count */
+#define HEAD_TIMOUT_TAIL_PACK_CNT	0x000FFF00
+#define HEAD_TIMOUT_TAIL_PACK_CNT_SHIFT	8
+#define ACK_POINTER_HIGH	        0x000000FF /* ACK pointer high */
+#define ACK_POINTER_HIGH_SHIFT		0
+
+	__le32 reg2;
+#define WORK_QUEUE_VALID		0x80000000 /* valid */
+#define WORK_QUEUE_VALID_SHIFT		31
+#define WORK_QUEUE_ACK_TYPE		0x40000000 /* ACK type */
+#define WORK_QUEUE_ACK_TYPE_SHIFT	30
+/* Head timeout window limit fragmentation count */
+#define HEAD_TIMOUT_WIN_LIM_FRAG_CNT	0x3FFF0000
+#define HEAD_TIMOUT_WIN_LIM_FRAG_CNT_SHIFT	16
+/* Head timeout window limit byte count */
+#define HEAD_TIMOUT_WIN_LIM_BYTE_CNT	0x0000FFFF
+#define HEAD_TIMOUT_WIN_LIM_BYTE_CNT_SHIFT	 0
+} __attribute__((__packed__));
+
+
+/*
+ * Traffic Class Structure
+ */
+struct agnx_sta_traffic {
+	__le32 reg0;
+#define ACK_TIMOUT_CNT		0xFF800000 /* ACK Timeout Counts */
+#define ACK_TIMOUT_CNT_SHIFT	23
+#define TRAFFIC_ACK_TYPE	0x00600000 /* ACK Type */
+#define TRAFFIC_ACK_TYPE_SHIFT	21
+#define NEW_PACKET		0x00100000 /* New Packet  */
+#define NEW_PACKET_SHIFT	20
+#define TRAFFIC_VALID		0x00080000 /* Valid */
+#define TRAFFIC_VALID_SHIFT	19
+#define RX_HDR_DESC_POINTER	0x0007FFFF /* RX Header Descripter pointer */
+#define RX_HDR_DESC_POINTER_SHIFT	 0
+
+	__le32 reg1;
+#define RX_PACKET_TIMESTAMP	0xFFFF0000 /* RX Packet Timestamp */
+#define RX_PACKET_TIMESTAMP_SHIFT	16
+#define TRAFFIC_RESERVED	0x0000E000 /* Reserved */
+#define TRAFFIC_RESERVED_SHIFT  13
+#define SV			0x00001000 /* sv */
+#define SV_SHIFT		12
+#define RX_SEQUENCE_NUM		0x00000FFF /* RX Sequence Number */
+#define RX_SEQUENCE_NUM_SHIFT	0
+
+	__le32 tx_replay_cnt_low; /* TX Replay Counter Low */
+
+	__le16 tx_replay_cnt_high; /* TX Replay Counter High */
+	__le16 rx_replay_cnt_high; /* RX Replay Counter High */
+
+	__be32 rx_replay_cnt_low; /* RX Replay Counter Low */
+} __attribute__((__packed__));
+
+/*
+ * Station Descriptors
+ */
+struct agnx_sta {
+	__le32 tx_session_keys[4]; /* Transmit Session Key (0-3) */
+	__le32 rx_session_keys[4]; /* Receive Session Key (0-3) */
+
+	__le32 reg;
+#define ID_1			0xC0000000 /* id 1 */
+#define ID_1_SHIFT		30
+#define ID_0			0x30000000 /* id 0 */
+#define ID_0_SHIFT		28
+#define ENABLE_CONCATENATION	0x0FF00000 /* Enable concatenation */
+#define ENABLE_CONCATENATION_SHIFT	20
+#define ENABLE_DECOMPRESSION	0x000FF000 /* Enable decompression */
+#define ENABLE_DECOMPRESSION_SHIFT	12
+#define STA_RESERVED		0x00000C00 /* Reserved */
+#define STA_RESERVED_SHIFT	10
+#define EAP			0x00000200 /* EAP */
+#define EAP_SHIFT		9
+#define ED_NULL			0x00000100 /* ED NULL */
+#define ED_NULL_SHIFT		8
+#define ENCRYPTION_POLICY	0x000000E0 /* Encryption Policy */
+#define ENCRYPTION_POLICY_SHIFT 5
+#define DEFINED_KEY_ID		0x00000018 /* Defined Key ID */
+#define DEFINED_KEY_ID_SHIFT	3
+#define FIXED_KEY		0x00000004 /* Fixed Key */
+#define FIXED_KEY_SHIFT		2
+#define KEY_VALID		0x00000002 /* Key Valid */
+#define KEY_VALID_SHIFT		1
+#define STATION_VALID		0x00000001 /* Station Valid */
+#define STATION_VALID_SHIFT	0
+
+	__le32 tx_aes_blks_unicast; /* TX AES Blks Unicast */
+	__le32 rx_aes_blks_unicast; /* RX AES Blks Unicast */
+
+	__le16 aes_format_err_unicast_cnt; /* AES Format Error Unicast Counts */
+	__le16 aes_replay_unicast; /* AES Replay Unicast */
+
+	__le16 aes_decrypt_err_unicast;	/* AES Decrypt Error Unicast */
+	__le16 aes_decrypt_err_default;	/* AES Decrypt Error default */
+
+	__le16 single_retry_packets; /* Single Retry Packets */
+	__le16 failed_tx_packets; /* Failed Tx Packets */
+
+	__le16 muti_retry_packets; /* Multiple Retry Packets */
+	__le16 ack_timeouts;	/* ACK Timeouts */
+
+	__le16 frag_tx_cnt;	/* Fragment TX Counts */
+	__le16 rts_brq_sent;	/* RTS Brq Sent */
+
+	__le16 tx_packets;	/* TX Packets */
+	__le16 cts_back_timeout; /* CTS Back Timeout */
+
+	__le32 phy_stats_high;	/* PHY Stats High */
+	__le32 phy_stats_low;	/* PHY Stats Low */
+
+	struct agnx_sta_traffic traffic[8];	/* Traffic Class Structure (8) */
+
+	__le16 traffic_class0_frag_success; /* Traffic Class 0 Fragment Success */
+	__le16 traffic_class1_frag_success; /* Traffic Class 1 Fragment Success */
+	__le16 traffic_class2_frag_success; /* Traffic Class 2 Fragment Success */
+	__le16 traffic_class3_frag_success; /* Traffic Class 3 Fragment Success */
+	__le16 traffic_class4_frag_success; /* Traffic Class 4 Fragment Success */
+	__le16 traffic_class5_frag_success; /* Traffic Class 5 Fragment Success */
+	__le16 traffic_class6_frag_success; /* Traffic Class 6 Fragment Success */
+	__le16 traffic_class7_frag_success; /* Traffic Class 7 Fragment Success */
+
+	__le16 num_frag_non_prime_rates; /* number of Fragments for non-prime rates */
+	__le16 ack_timeout_non_prime_rates; /* ACK Timeout for non-prime rates */
+
+} __attribute__((__packed__));
+
+
+struct agnx_beacon_hdr {
+	struct agnx_sta_power power; /* Tx Station Power Template  */
+	u8 phy_hdr[6];		/* PHY Hdr */
+	u8 frame_len_lo;	/* Frame Length Lo */
+	u8 frame_len_hi;	/* Frame Length Hi */
+	u8 mac_hdr[24];		/* MAC Header */
+	/* FIXME */
+	/* 802.11(abg) beacon */
+} __attribute__((__packed__));
+
+void hash_write(struct agnx_priv *priv, u8 *mac_addr, u8 sta_id);
+void hash_dump(struct agnx_priv *priv, u8 sta_id);
+void hash_read(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id);
+void hash_delete(struct agnx_priv *priv, u32 reghi, u32 reglo, u8 sta_id);
+
+void get_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power, unsigned int sta_idx);
+void set_sta_power(struct agnx_priv *priv, struct agnx_sta_power *power,
+		   unsigned int sta_idx);
+void get_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
+		   unsigned int sta_idx, unsigned int wq_idx);
+void set_sta_tx_wq(struct agnx_priv *priv, struct agnx_sta_tx_wq *tx_wq,
+		   unsigned int sta_idx, unsigned int wq_idx);
+void get_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx);
+void set_sta(struct agnx_priv *priv, struct agnx_sta *sta, unsigned int sta_idx);
+
+void sta_power_init(struct agnx_priv *priv, unsigned int num);
+void sta_init(struct agnx_priv *priv, unsigned int num);
+
+#endif /* AGNX_STA_H_ */

+ 168 - 0
drivers/staging/agnx/table.c

@@ -0,0 +1,168 @@
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "agnx.h"
+#include "debug.h"
+#include "phy.h"
+
+static const u32
+tx_fir_table[] = { 0x19, 0x5d, 0xce, 0x151, 0x1c3, 0x1ff, 0x1ea, 0x17c, 0xcf,
+		   0x19, 0x38e, 0x350, 0x362, 0x3ad, 0x5, 0x44, 0x59, 0x49,
+		   0x21, 0x3f7, 0x3e0, 0x3e3, 0x3f3, 0x0 };
+
+void tx_fir_table_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tx_fir_table); i++)
+		iowrite32(tx_fir_table[i], ctl + AGNX_FIR_BASE + i*4);
+} /* fir_table_setup */
+
+
+static const u32
+gain_table[] = { 0x8, 0x8, 0xf, 0x13, 0x17, 0x1b, 0x1f, 0x23, 0x27, 0x2b,
+		 0x2f, 0x33, 0x37, 0x3b, 0x3f, 0x43, 0x47, 0x4b, 0x4f,
+		 0x53, 0x57, 0x5b, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f,
+		 0x5f, 0x5f, 0x5f, 0x5f };
+
+void gain_table_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(gain_table); i++) {
+		iowrite32(gain_table[i], ctl + AGNX_GAIN_TABLE + i*4);
+		iowrite32(gain_table[i], ctl + AGNX_GAIN_TABLE + i*4 + 0x80);
+	}
+} /* gain_table_init */
+
+void monitor_gain_table_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	unsigned int i;
+
+	for (i = 0; i < 0x44; i += 4) {
+		iowrite32(0x61, ctl + AGNX_MONGCR_BASE + i);
+		iowrite32(0x61, ctl + AGNX_MONGCR_BASE + 0x200 + i);
+	}
+	for (i = 0x44; i < 0x64; i += 4) {
+		iowrite32(0x6e, ctl + AGNX_MONGCR_BASE + i);
+		iowrite32(0x6e, ctl + AGNX_MONGCR_BASE + 0x200 + i);
+	}
+	for (i = 0x64; i < 0x94; i += 4) {
+		iowrite32(0x7a, ctl + AGNX_MONGCR_BASE + i);
+		iowrite32(0x7a, ctl + AGNX_MONGCR_BASE + 0x200 + i);
+	}
+	for (i = 0x94; i < 0xdc; i += 4) {
+		iowrite32(0x87, ctl + AGNX_MONGCR_BASE + i);
+		iowrite32(0x87, ctl + AGNX_MONGCR_BASE + 0x200 + i);
+	}
+	for (i = 0xdc; i < 0x148; i += 4) {
+		iowrite32(0x95, ctl + AGNX_MONGCR_BASE + i);
+		iowrite32(0x95, ctl + AGNX_MONGCR_BASE + 0x200 + i);
+	}
+	for (i = 0x148; i < 0x1e8; i += 4) {
+		iowrite32(0xa2, ctl + AGNX_MONGCR_BASE + i);
+		iowrite32(0xa2, ctl + AGNX_MONGCR_BASE + 0x200 + i);
+	}
+	for (i = 0x1e8; i <= 0x1fc; i += 4) {
+		iowrite32(0xb0, ctl + AGNX_MONGCR_BASE + i);
+		iowrite32(0xb0, ctl + AGNX_MONGCR_BASE + 0x200 + i);
+	}
+} /* monitor_gain_table_init */
+
+
+void routing_table_init(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	unsigned int type, subtype;
+	u32 reg;
+
+	disable_receiver(priv);
+
+	for ( type = 0; type < 0x3; type++ ) {
+		for (subtype = 0; subtype < 0x10; subtype++) {
+			/* 1. Set Routing table to R/W and to Return status on Read */
+			reg = (type << ROUTAB_TYPE_SHIFT) |
+				(subtype << ROUTAB_SUBTYPE_SHIFT);
+			reg |= (1 << ROUTAB_RW_SHIFT) | (1 << ROUTAB_STATUS_SHIFT);
+			if (type == ROUTAB_TYPE_DATA) {
+				/* NULL goes to RFP */
+				if (subtype == ROUTAB_SUBTYPE_NULL)
+//					reg |= ROUTAB_ROUTE_RFP;
+					reg |= ROUTAB_ROUTE_CPU;
+				/* QOS NULL goes to CPU */
+				else if (subtype == ROUTAB_SUBTYPE_QOSNULL)
+					reg |= ROUTAB_ROUTE_CPU;
+				/* All Data and QOS data subtypes go to Encryption */
+				else if ((subtype == ROUTAB_SUBTYPE_DATA) ||
+					 (subtype == ROUTAB_SUBTYPE_DATAACK) ||
+					 (subtype == ROUTAB_SUBTYPE_DATAPOLL) ||
+					 (subtype == ROUTAB_SUBTYPE_DATAPOLLACK) ||
+					 (subtype == ROUTAB_SUBTYPE_QOSDATA) ||
+					 (subtype == ROUTAB_SUBTYPE_QOSDATAACK) ||
+					 (subtype == ROUTAB_SUBTYPE_QOSDATAPOLL) ||
+					 (subtype == ROUTAB_SUBTYPE_QOSDATAACKPOLL))
+					reg |= ROUTAB_ROUTE_ENCRY;
+//					reg |= ROUTAB_ROUTE_CPU;
+				/*Drop NULL and QOS NULL ack, poll and poll ack*/
+				else if ((subtype == ROUTAB_SUBTYPE_NULLACK) ||
+					 (subtype == ROUTAB_SUBTYPE_QOSNULLACK) ||
+					 (subtype == ROUTAB_SUBTYPE_NULLPOLL) ||
+					 (subtype == ROUTAB_SUBTYPE_QOSNULLPOLL) ||
+					 (subtype == ROUTAB_SUBTYPE_NULLPOLLACK) ||
+					 (subtype == ROUTAB_SUBTYPE_QOSNULLPOLLACK))
+//					reg |= ROUTAB_ROUTE_DROP;
+					reg |= ROUTAB_ROUTE_CPU;
+			}
+			else
+				reg |= (ROUTAB_ROUTE_CPU);
+			iowrite32(reg, ctl + AGNX_RXM_ROUTAB);
+			/* Check to verify that the status bit cleared */
+			routing_table_delay();
+		}
+	}
+	enable_receiver(priv);
+} /* routing_table_init */
+
+void tx_engine_lookup_tbl_init(struct agnx_priv *priv)
+{
+	void __iomem *data = priv->data;
+	unsigned int i;
+
+	for (i = 0; i <= 28; i += 4)
+		iowrite32(0xb00c, data + AGNX_ENGINE_LOOKUP_TBL + i);
+	for (i = 32; i <= 120; i += 8) {
+		iowrite32(0x1e58, data + AGNX_ENGINE_LOOKUP_TBL + i);
+		iowrite32(0xb00c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
+	}
+
+	for (i = 128; i <= 156; i += 4)
+		iowrite32(0x980c, data + AGNX_ENGINE_LOOKUP_TBL + i);
+	for (i = 160; i <= 248; i += 8) {
+		iowrite32(0x1858, data + AGNX_ENGINE_LOOKUP_TBL + i);
+		iowrite32(0x980c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
+	}
+
+	for (i = 256; i <= 284; i += 4)
+		iowrite32(0x980c, data + AGNX_ENGINE_LOOKUP_TBL + i);
+	for (i = 288; i <= 376; i += 8) {
+		iowrite32(0x1a58, data + AGNX_ENGINE_LOOKUP_TBL + i);
+		iowrite32(0x1858, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
+	}
+
+	for (i = 512; i <= 540; i += 4)
+		iowrite32(0xc00c, data + AGNX_ENGINE_LOOKUP_TBL + i);
+	for (i = 544; i <= 632; i += 8) {
+		iowrite32(0x2058, data + AGNX_ENGINE_LOOKUP_TBL + i);
+		iowrite32(0xc00c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
+	}
+
+	for (i = 640; i <= 668; i += 4)
+		iowrite32(0xc80c, data + AGNX_ENGINE_LOOKUP_TBL + i);
+	for (i = 672; i <= 764; i += 8) {
+		iowrite32(0x2258, data + AGNX_ENGINE_LOOKUP_TBL + i);
+		iowrite32(0xc80c, data + AGNX_ENGINE_LOOKUP_TBL + i + 4);
+	}
+}
+

+ 10 - 0
drivers/staging/agnx/table.h

@@ -0,0 +1,10 @@
+#ifndef AGNX_TABLE_H_
+#define AGNX_TABLE_H_
+
+void tx_fir_table_init(struct agnx_priv *priv);
+void gain_table_init(struct agnx_priv *priv);
+void monitor_gain_table_init(struct agnx_priv *priv);
+void routing_table_init(struct agnx_priv *priv);
+void tx_engine_lookup_tbl_init(struct agnx_priv *priv);
+
+#endif /* AGNX_TABLE_H_ */

+ 819 - 0
drivers/staging/agnx/xmit.c

@@ -0,0 +1,819 @@
+/**
+ * Airgo MIMO wireless driver
+ *
+ * Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
+
+ * Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
+ * works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "agnx.h"
+#include "debug.h"
+#include "phy.h"
+
+unsigned int rx_frame_cnt = 0;
+//unsigned int local_tx_sent_cnt = 0;
+
+static inline void disable_rx_engine(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	iowrite32(0x100, ctl + AGNX_CIR_RXCTL);
+	/* Wait for RX Control to have the Disable Rx Interrupt (0x100) set */
+	ioread32(ctl + AGNX_CIR_RXCTL);
+}
+
+static inline void enable_rx_engine(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	iowrite32(0x80, ctl + AGNX_CIR_RXCTL);
+	ioread32(ctl + AGNX_CIR_RXCTL);
+}
+
+inline void disable_rx_interrupt(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+
+	disable_rx_engine(priv);
+	reg = ioread32(ctl + AGNX_CIR_RXCFG);
+	reg &= ~0x20;
+	iowrite32(reg, ctl + AGNX_CIR_RXCFG);
+	ioread32(ctl + AGNX_CIR_RXCFG);
+}
+
+inline void enable_rx_interrupt(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+
+	reg = ioread32(ctl + AGNX_CIR_RXCFG);
+	reg |= 0x20;
+	iowrite32(reg, ctl + AGNX_CIR_RXCFG);
+	ioread32(ctl + AGNX_CIR_RXCFG);
+	enable_rx_engine(priv);
+}
+
+static inline void rx_desc_init(struct agnx_priv *priv, unsigned int idx)
+{
+	struct agnx_desc *desc = priv->rx.desc + idx;
+	struct agnx_info *info = priv->rx.info + idx;
+
+	memset(info, 0, sizeof(*info));
+
+	info->dma_len = IEEE80211_MAX_RTS_THRESHOLD + sizeof(struct agnx_hdr);
+	info->skb = dev_alloc_skb(info->dma_len);
+	if (info->skb == NULL)
+		agnx_bug("refill err");
+
+	info->mapping = pci_map_single(priv->pdev, skb_tail_pointer(info->skb),
+				       info->dma_len, PCI_DMA_FROMDEVICE);
+	memset(desc, 0, sizeof(*desc));
+	desc->dma_addr = cpu_to_be32(info->mapping);
+	/* Set the owner to the card */
+	desc->frag = cpu_to_be32(be32_to_cpu(desc->frag) | OWNER);
+}
+
+static inline void rx_desc_reinit(struct agnx_priv *priv, unsigned int idx)
+{
+	struct agnx_info *info = priv->rx.info + idx;
+
+	/* Cause ieee80211 will free the skb buffer, so we needn't to free it again?! */
+	pci_unmap_single(priv->pdev, info->mapping, info->dma_len, PCI_DMA_FROMDEVICE);
+	rx_desc_init(priv, idx);
+}
+
+static inline void rx_desc_reusing(struct agnx_priv *priv, unsigned int idx)
+{
+	struct agnx_desc *desc = priv->rx.desc + idx;
+	struct agnx_info *info = priv->rx.info + idx;
+
+	memset(desc, 0, sizeof(*desc));
+	desc->dma_addr = cpu_to_be32(info->mapping);
+	/* Set the owner to the card */
+	desc->frag = cpu_to_be32(be32_to_cpu(desc->frag) | OWNER);
+}
+
+static void rx_desc_free(struct agnx_priv *priv, unsigned int idx)
+{
+	struct agnx_desc *desc = priv->rx.desc + idx;
+	struct agnx_info *info = priv->rx.info + idx;
+
+	BUG_ON(!desc || !info);
+	if (info->mapping)
+		pci_unmap_single(priv->pdev, info->mapping, info->dma_len, PCI_DMA_FROMDEVICE);
+	if (info->skb)
+		dev_kfree_skb(info->skb);
+	memset(info, 0, sizeof(*info));
+	memset(desc, 0, sizeof(*desc));
+}
+
+static inline void __tx_desc_free(struct agnx_priv *priv,
+				  struct agnx_desc *desc, struct agnx_info *info)
+{
+	BUG_ON(!desc || !info);
+	/* TODO make sure mapping, skb and len are consistency */
+	if (info->mapping)
+		pci_unmap_single(priv->pdev, info->mapping,
+				 info->dma_len, PCI_DMA_TODEVICE);
+	if (info->type == PACKET)
+		dev_kfree_skb(info->skb);
+
+	memset(info, 0, sizeof(*info));
+	memset(desc, 0, sizeof(*desc));
+}
+
+static void txm_desc_free(struct agnx_priv *priv, unsigned int idx)
+{
+	struct agnx_desc *desc = priv->txm.desc + idx;
+	struct agnx_info *info = priv->txm.info + idx;
+
+	__tx_desc_free(priv, desc, info);
+}
+
+static void txd_desc_free(struct agnx_priv *priv, unsigned int idx)
+{
+	struct agnx_desc *desc = priv->txd.desc + idx;
+	struct agnx_info *info = priv->txd.info + idx;
+
+	__tx_desc_free(priv, desc, info);
+}
+
+int fill_rings(struct agnx_priv *priv)
+{
+	void __iomem *ctl = priv->ctl;
+	unsigned int i;
+	u32 reg;
+	AGNX_TRACE;
+
+	priv->txd.idx_sent = priv->txm.idx_sent = 0;
+	priv->rx.idx = priv->txm.idx = priv->txd.idx = 0;
+
+	for (i = 0; i < priv->rx.size; i++)
+		rx_desc_init(priv, i);
+	for (i = 0; i < priv->txm.size; i++) {
+		memset(priv->txm.desc + i, 0, sizeof(struct agnx_desc));
+		memset(priv->txm.info + i, 0, sizeof(struct agnx_info));
+	}
+	for (i = 0; i < priv->txd.size; i++) {
+		memset(priv->txd.desc + i, 0, sizeof(struct agnx_desc));
+		memset(priv->txd.info + i, 0, sizeof(struct agnx_info));
+	}
+
+	/* FIXME Set the card RX TXM and TXD address */
+	agnx_write32(ctl, AGNX_CIR_RXCMSTART, priv->rx.dma);
+	agnx_write32(ctl, AGNX_CIR_RXCMEND, priv->txm.dma);
+
+	agnx_write32(ctl, AGNX_CIR_TXMSTART, priv->txm.dma);
+	agnx_write32(ctl, AGNX_CIR_TXMEND, priv->txd.dma);
+
+	agnx_write32(ctl, AGNX_CIR_TXDSTART, priv->txd.dma);
+	agnx_write32(ctl, AGNX_CIR_TXDEND, priv->txd.dma +
+		     sizeof(struct agnx_desc) * priv->txd.size);
+
+	/* FIXME Relinquish control of rings to card */
+	reg = agnx_read32(ctl, AGNX_CIR_BLKCTL);
+	reg &= ~0x800;
+	agnx_write32(ctl, AGNX_CIR_BLKCTL, reg);
+	return 0;
+} /* fill_rings */
+
+void unfill_rings(struct agnx_priv *priv)
+{
+	unsigned long flags;
+	unsigned int i;
+	AGNX_TRACE;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	for (i = 0; i < priv->rx.size; i++)
+		rx_desc_free(priv, i);
+	for (i = 0; i < priv->txm.size; i++)
+		txm_desc_free(priv, i);
+	for (i = 0; i < priv->txd.size; i++)
+		txd_desc_free(priv, i);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/* Extract the bitrate out of a CCK PLCP header.
+   copy from bcm43xx driver */
+static inline u8 agnx_plcp_get_bitrate_cck(__be32 *phyhdr_11b)
+{
+	/* FIXME */
+	switch (*(u8 *)phyhdr_11b) {
+	case 0x0A:
+		return 0;
+	case 0x14:
+		return 1;
+	case 0x37:
+		return 2;
+	case 0x6E:
+		return 3;
+	}
+	agnx_bug("Wrong plcp rate");
+	return 0;
+}
+
+/* FIXME */
+static inline u8 agnx_plcp_get_bitrate_ofdm(__be32 *phyhdr_11g)
+{
+	u8 rate = *(u8 *)phyhdr_11g & 0xF;
+
+	printk(PFX "G mode rate is 0x%x\n", rate);
+	return rate;
+}
+
+/* FIXME */
+static void get_rx_stats(struct agnx_priv *priv, struct agnx_hdr *hdr,
+			 struct ieee80211_rx_status *stat)
+{
+	void __iomem *ctl = priv->ctl;
+	u8 *rssi;
+	u32 noise;
+	/* FIXME just for test */
+	int snr = 40;		/* signal-to-noise ratio */
+
+	memset(stat, 0, sizeof(*stat));
+	/* RSSI */
+	rssi = (u8 *)&hdr->phy_stats_lo;
+//	stat->ssi = (rssi[0] + rssi[1] + rssi[2]) / 3;
+	/* Noise */
+	noise = ioread32(ctl + AGNX_GCR_NOISE0);
+	noise += ioread32(ctl + AGNX_GCR_NOISE1);
+	noise += ioread32(ctl + AGNX_GCR_NOISE2);
+	stat->noise = noise / 3;
+	/* Signal quality */
+	//snr = stat->ssi - stat->noise;
+	if (snr >=0 && snr < 40)
+		stat->signal = 5 * snr / 2;
+	else if (snr >= 40)
+		stat->signal = 100;
+	else
+		stat->signal = 0;
+
+
+	if (hdr->_11b0 && !hdr->_11g0) {
+		stat->rate_idx = agnx_plcp_get_bitrate_cck(&hdr->_11b0);
+	} else if (!hdr->_11b0 && hdr->_11g0) {
+		printk(PFX "RX: Found G mode packet\n");
+		stat->rate_idx = agnx_plcp_get_bitrate_ofdm(&hdr->_11g0);
+	} else
+		agnx_bug("Unknown packets type");
+
+
+	stat->band = IEEE80211_BAND_2GHZ;
+	stat->freq = agnx_channels[priv->channel - 1].center_freq;
+//	stat->antenna = 3;
+//	stat->mactime = be32_to_cpu(hdr->time_stamp);
+//	stat->channel = priv->channel;
+
+}
+
+static inline void combine_hdr_frag(struct ieee80211_hdr *ieeehdr,
+				    struct sk_buff *skb)
+{
+	u16 fctl;
+	unsigned int hdrlen;
+
+	fctl = le16_to_cpu(ieeehdr->frame_control);
+	hdrlen = ieee80211_hdrlen(fctl);
+	/* FIXME */
+	if (hdrlen < (2+2+6)/*minimum hdr*/ ||
+	    hdrlen > sizeof(struct ieee80211_mgmt)) {
+		printk(KERN_ERR PFX "hdr len is %d\n", hdrlen);
+		agnx_bug("Wrong ieee80211 hdr detected");
+	}
+	skb_push(skb, hdrlen);
+	memcpy(skb->data, ieeehdr, hdrlen);
+} /* combine_hdr_frag */
+
+static inline int agnx_packet_check(struct agnx_priv *priv, struct agnx_hdr *agnxhdr,
+				    unsigned packet_len)
+{
+	if (agnx_get_bits(CRC_FAIL, CRC_FAIL_SHIFT, be32_to_cpu(agnxhdr->reg1)) == 1){
+		printk(PFX "RX: CRC check fail\n");
+		goto drop;
+	}
+	if (packet_len > 2048) {
+		printk(PFX "RX: Too long packet detected\n");
+		goto drop;
+	}
+
+	/* FIXME Just usable for Promious Mode, for Manage mode exclude FCS */
+/* 	if (packet_len - sizeof(*agnxhdr) < FCS_LEN) { */
+/* 		printk(PFX "RX: Too short packet detected\n"); */
+/* 		goto drop; */
+/* 	} */
+	return 0;
+drop:
+	priv->stats.dot11FCSErrorCount++;
+	return -1;
+}
+
+void handle_rx_irq(struct agnx_priv *priv)
+{
+	struct ieee80211_rx_status status;
+	unsigned int len;
+//	AGNX_TRACE;
+
+	do {
+		struct agnx_desc *desc;
+		u32 frag;
+		struct agnx_info *info;
+		struct agnx_hdr *hdr;
+		struct sk_buff *skb;
+		unsigned int i = priv->rx.idx % priv->rx.size;
+
+		desc = priv->rx.desc + i;
+		frag = be32_to_cpu(desc->frag);
+		if (frag & OWNER)
+			break;
+
+		info = priv->rx.info + i;
+		skb = info->skb;
+		hdr = (struct agnx_hdr *)(skb->data);
+
+		len = (frag & PACKET_LEN) >> PACKET_LEN_SHIFT;
+		if (agnx_packet_check(priv, hdr, len) == -1) {
+ 			rx_desc_reusing(priv, i);
+			continue;
+		}
+		skb_put(skb, len);
+
+		do {
+			u16 fctl;
+			fctl = le16_to_cpu(((struct ieee80211_hdr *)hdr->mac_hdr)->frame_control);
+			if ((fctl & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_BEACON)// && !(fctl & IEEE80211_STYPE_BEACON))
+				dump_ieee80211_hdr((struct ieee80211_hdr *)hdr->mac_hdr, "RX");
+		} while (0);
+
+		if (hdr->_11b0 && !hdr->_11g0) {
+/* 			int j; */
+/* 			u16 fctl = le16_to_cpu(((struct ieee80211_hdr *)hdr->mac_hdr) */
+/* 					       ->frame_control); */
+/* 			if ( (fctl & IEEE80211_FCTL_FTYPE) ==  IEEE80211_FTYPE_DATA) { */
+/* 				agnx_print_rx_hdr(hdr); */
+// 				agnx_print_sta(priv, BSSID_STAID);
+/* 				for (j = 0; j < 8; j++) */
+/* 					agnx_print_sta_tx_wq(priv, BSSID_STAID, j);		 */
+/* 			} */
+
+			get_rx_stats(priv, hdr, &status);
+			skb_pull(skb, sizeof(*hdr));
+			combine_hdr_frag((struct ieee80211_hdr *)hdr->mac_hdr, skb);
+		} else if (!hdr->_11b0 && hdr->_11g0) {
+//			int j;
+			agnx_print_rx_hdr(hdr);
+			agnx_print_sta(priv, BSSID_STAID);
+//			for (j = 0; j < 8; j++)
+			agnx_print_sta_tx_wq(priv, BSSID_STAID, 0);
+
+			print_hex_dump_bytes("agnx: RX_PACKET: ", DUMP_PREFIX_NONE,
+					     skb->data, skb->len + 8);
+
+//			if (agnx_plcp_get_bitrate_ofdm(&hdr->_11g0) == 0)
+			get_rx_stats(priv, hdr, &status);
+			skb_pull(skb, sizeof(*hdr));
+			combine_hdr_frag((struct ieee80211_hdr *)
+					 ((void *)&hdr->mac_hdr), skb);
+//			dump_ieee80211_hdr((struct ieee80211_hdr *)skb->data, "RX G");
+		} else
+			agnx_bug("Unknown packets type");
+		ieee80211_rx_irqsafe(priv->hw, skb, &status);
+		rx_desc_reinit(priv, i);
+
+	} while ( priv->rx.idx++ );
+} /* handle_rx_irq */
+
+static inline void handle_tx_irq(struct agnx_priv *priv, struct agnx_ring *ring)
+{
+	struct agnx_desc *desc;
+	struct agnx_info *info;
+	unsigned int idx;
+
+	for (idx = ring->idx_sent; idx < ring->idx; idx++) {
+		unsigned int i = idx % ring->size;
+		u32  frag;
+
+		desc = ring->desc + i;
+		info = ring->info + i;
+
+		frag = be32_to_cpu(desc->frag);
+		if (frag & OWNER) {
+			if (info->type == HEADER)
+				break;
+			else
+				agnx_bug("TX error");
+		}
+
+		pci_unmap_single(priv->pdev, info->mapping, info->dma_len, PCI_DMA_TODEVICE);
+
+		do {
+//			int j;
+			size_t len;
+			len = info->skb->len - sizeof(struct agnx_hdr) + info->hdr_len;
+			//	if (len == 614) {
+//				agnx_print_desc(desc);
+				if (info->type == PACKET) {
+//					agnx_print_tx_hdr((struct agnx_hdr *)info->skb->data);
+/* 					agnx_print_sta_power(priv, LOCAL_STAID); */
+/* 					agnx_print_sta(priv, LOCAL_STAID); */
+/* //					for (j = 0; j < 8; j++) */
+/* 					agnx_print_sta_tx_wq(priv, LOCAL_STAID, 0); */
+//					agnx_print_sta_power(priv, BSSID_STAID);
+//					agnx_print_sta(priv, BSSID_STAID);
+//					for (j = 0; j < 8; j++)
+//					agnx_print_sta_tx_wq(priv, BSSID_STAID, 0);
+				}
+//			}
+		} while (0);
+
+		if (info->type == PACKET) {
+//			dump_txm_registers(priv);
+//			dump_rxm_registers(priv);
+//			dump_bm_registers(priv);
+//			dump_cir_registers(priv);
+		}
+
+		if (info->type == PACKET) {
+//			struct ieee80211_hdr *hdr;
+			struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(info->skb);
+
+			skb_pull(info->skb, sizeof(struct agnx_hdr));
+			memcpy(skb_push(info->skb, info->hdr_len), &info->hdr, info->hdr_len);
+
+//			dump_ieee80211_hdr((struct ieee80211_hdr *)info->skb->data, "TX_HANDLE");
+/* 			print_hex_dump_bytes("agnx: TX_HANDLE: ", DUMP_PREFIX_NONE, */
+/* 					     info->skb->data, info->skb->len); */
+
+			if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK))
+				txi->flags |= IEEE80211_TX_STAT_ACK;
+
+			ieee80211_tx_status_irqsafe(priv->hw, info->skb);
+
+
+/* 				info->tx_status.queue_number = (ring->size - i) / 2; */
+/* 				ieee80211_tx_status_irqsafe(priv->hw, info->skb, &(info->tx_status)); */
+/* 			} else */
+/* 				dev_kfree_skb_irq(info->skb); */
+ 		}
+		memset(desc, 0, sizeof(*desc));
+		memset(info, 0, sizeof(*info));
+	}
+
+	ring->idx_sent = idx;
+	/* TODO fill the priv->low_level_stats */
+
+	/* ieee80211_wake_queue(priv->hw, 0); */
+}
+
+void handle_txm_irq(struct agnx_priv *priv)
+{
+	handle_tx_irq(priv, &priv->txm);
+}
+
+void handle_txd_irq(struct agnx_priv *priv)
+{
+	handle_tx_irq(priv, &priv->txd);
+}
+
+void handle_other_irq(struct agnx_priv *priv)
+{
+//	void __iomem *ctl = priv->ctl;
+	u32 status = priv->irq_status;
+	void __iomem *ctl = priv->ctl;
+	u32 reg;
+
+	if (status & IRQ_TX_BEACON) {
+		iowrite32(IRQ_TX_BEACON, ctl + AGNX_INT_STAT);
+		printk(PFX "IRQ: TX Beacon control is 0X%.8X\n", ioread32(ctl + AGNX_TXM_BEACON_CTL));
+		printk(PFX "IRQ: TX Beacon rx frame num: %d\n", rx_frame_cnt);
+	}
+	if (status & IRQ_TX_RETRY) {
+		reg = ioread32(ctl + AGNX_TXM_RETRYSTAID);
+		printk(PFX "IRQ: TX Retry, RETRY STA ID is %x\n", reg);
+	}
+	if (status & IRQ_TX_ACTIVITY)
+		printk(PFX "IRQ: TX Activity\n");
+	if (status & IRQ_RX_ACTIVITY)
+		printk(PFX "IRQ: RX Activity\n");
+	if (status & IRQ_RX_X)
+		printk(PFX "IRQ: RX X\n");
+	if (status & IRQ_RX_Y) {
+		reg = ioread32(ctl + AGNX_INT_MASK);
+		reg &= ~IRQ_RX_Y;
+		iowrite32(reg, ctl + AGNX_INT_MASK);
+		iowrite32(IRQ_RX_Y, ctl + AGNX_INT_STAT);
+		printk(PFX "IRQ: RX Y\n");
+	}
+	if (status & IRQ_RX_HASHHIT)  {
+		reg = ioread32(ctl + AGNX_INT_MASK);
+		reg &= ~IRQ_RX_HASHHIT;
+		iowrite32(reg, ctl + AGNX_INT_MASK);
+		iowrite32(IRQ_RX_HASHHIT, ctl + AGNX_INT_STAT);
+		printk(PFX "IRQ: RX Hash Hit\n");
+
+	}
+	if (status & IRQ_RX_FRAME) {
+		reg = ioread32(ctl + AGNX_INT_MASK);
+		reg &= ~IRQ_RX_FRAME;
+		iowrite32(reg, ctl + AGNX_INT_MASK);
+		iowrite32(IRQ_RX_FRAME, ctl + AGNX_INT_STAT);
+		printk(PFX "IRQ: RX Frame\n");
+ 		rx_frame_cnt++;
+	}
+	if (status & IRQ_ERR_INT) {
+		iowrite32(IRQ_ERR_INT, ctl + AGNX_INT_STAT);
+//		agnx_hw_reset(priv);
+		printk(PFX "IRQ: Error Interrupt\n");
+	}
+	if (status & IRQ_TX_QUE_FULL)
+		printk(PFX "IRQ: TX Workqueue Full\n");
+	if (status & IRQ_BANDMAN_ERR)
+		printk(PFX "IRQ: Bandwidth Management Error\n");
+	if (status & IRQ_TX_DISABLE)
+		printk(PFX "IRQ: TX Disable\n");
+	if (status & IRQ_RX_IVASESKEY)
+		printk(PFX "IRQ: RX Invalid Session Key\n");
+	if (status & IRQ_REP_THHIT)
+		printk(PFX "IRQ: Replay Threshold Hit\n");
+	if (status & IRQ_TIMER1)
+		printk(PFX "IRQ: Timer1\n");
+	if (status & IRQ_TIMER_CNT)
+		printk(PFX "IRQ: Timer Count\n");
+	if (status & IRQ_PHY_FASTINT)
+		printk(PFX "IRQ: Phy Fast Interrupt\n");
+	if (status & IRQ_PHY_SLOWINT)
+		printk(PFX "IRQ: Phy Slow Interrupt\n");
+	if (status & IRQ_OTHER)
+		printk(PFX "IRQ: 0x80000000\n");
+} /* handle_other_irq */
+
+
+static inline void route_flag_set(struct agnx_hdr *txhdr)
+{
+//	u32 reg = 0;
+
+	/* FIXME */
+/*  	reg = (0x7 << ROUTE_COMPRESSION_SHIFT) & ROUTE_COMPRESSION; */
+/* 	txhdr->reg5 = cpu_to_be32(reg); */
+ 	txhdr->reg5 = (0xa << 0x0) | (0x7 << 0x18);
+// 	txhdr->reg5 = cpu_to_be32((0xa << 0x0) | (0x7 << 0x18));
+// 	txhdr->reg5 = cpu_to_be32(0x7 << 0x0);
+}
+
+/* Return 0 if no match */
+static inline unsigned int get_power_level(unsigned int rate, unsigned int antennas_num)
+{
+	unsigned int power_level;
+
+	switch (rate) {
+	case 10:
+	case 20:
+	case 55:
+	case 60:
+	case 90:
+	case 120: power_level = 22; break;
+	case 180: power_level = 19; break;
+	case 240: power_level = 18; break;
+	case 360: power_level = 16; break;
+	case 480: power_level = 15; break;
+	case 540: power_level = 14; break;
+	default:
+		agnx_bug("Error rate setting\n");
+	}
+
+	if (power_level && (antennas_num == 2))
+		power_level -= 3;
+
+	return power_level;
+}
+
+static inline void fill_agnx_hdr(struct agnx_priv *priv, struct agnx_info *tx_info)
+{
+	struct agnx_hdr *txhdr = (struct agnx_hdr *)tx_info->skb->data;
+	size_t len;
+	u16 fc = le16_to_cpu(*(__le16 *)&tx_info->hdr);
+	u32 reg;
+
+	memset(txhdr, 0, sizeof(*txhdr));
+
+//	reg = agnx_set_bits(STATION_ID, STATION_ID_SHIFT, LOCAL_STAID);
+	reg = agnx_set_bits(STATION_ID, STATION_ID_SHIFT, BSSID_STAID);
+	reg |= agnx_set_bits(WORKQUEUE_ID, WORKQUEUE_ID_SHIFT, 0);
+	txhdr->reg4 = cpu_to_be32(reg);
+
+	/* Set the Hardware Sequence Number to 1? */
+	reg = agnx_set_bits(SEQUENCE_NUMBER, SEQUENCE_NUMBER_SHIFT, 0);
+//	reg = agnx_set_bits(SEQUENCE_NUMBER, SEQUENCE_NUMBER_SHIFT, 1);
+	reg |= agnx_set_bits(MAC_HDR_LEN, MAC_HDR_LEN_SHIFT, tx_info->hdr_len);
+	txhdr->reg1 = cpu_to_be32(reg);
+	/* Set the agnx_hdr's MAC header */
+	memcpy(txhdr->mac_hdr, &tx_info->hdr, tx_info->hdr_len);
+
+	reg = agnx_set_bits(ACK, ACK_SHIFT, 1);
+//	reg = agnx_set_bits(ACK, ACK_SHIFT, 0);
+	reg |= agnx_set_bits(MULTICAST, MULTICAST_SHIFT, 0);
+//	reg |= agnx_set_bits(MULTICAST, MULTICAST_SHIFT, 1);
+	reg |= agnx_set_bits(RELAY, RELAY_SHIFT, 0);
+	reg |= agnx_set_bits(TM, TM_SHIFT, 0);
+	txhdr->reg0 = cpu_to_be32(reg);
+
+	/* Set the long and short retry limits */
+ 	txhdr->tx.short_retry_limit = tx_info->txi->control.rates[0].count;
+ 	txhdr->tx.long_retry_limit = tx_info->txi->control.rates[0].count;
+
+	/* FIXME */
+	len = tx_info->skb->len - sizeof(*txhdr) + tx_info->hdr_len + FCS_LEN;
+	if (fc & IEEE80211_FCTL_PROTECTED)
+		len += 8;
+	len = 2398;
+	reg = agnx_set_bits(FRAG_SIZE, FRAG_SIZE_SHIFT, len);
+	len = tx_info->skb->len - sizeof(*txhdr);
+	reg |= agnx_set_bits(PAYLOAD_LEN, PAYLOAD_LEN_SHIFT, len);
+	txhdr->reg3 = cpu_to_be32(reg);
+
+	route_flag_set(txhdr);
+} /* fill_hdr */
+
+static void txm_power_set(struct agnx_priv *priv,
+			  struct ieee80211_tx_info *txi)
+{
+	struct agnx_sta_power power;
+	u32 reg;
+
+	/* FIXME */
+	if (txi->control.rates[0].idx < 0) {
+		/* For B mode Short Preamble */
+		reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211B_SHORT);
+//		control->tx_rate = -control->tx_rate;
+	} else
+		reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211G);
+//		reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211B_LONG);
+	reg |= agnx_set_bits(SIGNAL, SIGNAL_SHIFT, 0xB);
+	reg |= agnx_set_bits(RATE, RATE_SHIFT, 0xB);
+//	reg |= agnx_set_bits(POWER_LEVEL, POWER_LEVEL_SHIFT, 15);
+	reg |= agnx_set_bits(POWER_LEVEL, POWER_LEVEL_SHIFT, 20);
+	/* if rate < 11M set it to 0 */
+	reg |= agnx_set_bits(NUM_TRANSMITTERS, NUM_TRANSMITTERS_SHIFT, 1);
+//	reg |= agnx_set_bits(EDCF, EDCF_SHIFT, 1);
+//	reg |= agnx_set_bits(TIFS, TIFS_SHIFT, 1);
+
+	power.reg = reg;
+//	power.reg = cpu_to_le32(reg);
+
+//	set_sta_power(priv, &power, LOCAL_STAID);
+	set_sta_power(priv, &power, BSSID_STAID);
+}
+
+static inline int tx_packet_check(struct sk_buff *skb)
+{
+	unsigned int ieee_len = ieee80211_get_hdrlen_from_skb(skb);
+	if (skb->len > 2048) {
+		printk(KERN_ERR PFX "length is %d\n", skb->len);
+		agnx_bug("Too long TX skb");
+		return -1;
+	}
+	/* FIXME */
+	if (skb->len == ieee_len) {
+		printk(PFX "A strange TX packet\n");
+		return -1;
+		/* tx_faile_irqsafe(); */
+	}
+	return 0;
+}
+
+static int __agnx_tx(struct agnx_priv *priv, struct sk_buff *skb,
+		     struct agnx_ring *ring)
+{
+	struct agnx_desc *hdr_desc, *frag_desc;
+	struct agnx_info *hdr_info, *frag_info;
+	struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+	unsigned long flags;
+	unsigned int i;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	/* The RX interrupt need be Disable until this TX packet
+	   is handled in the next tx interrupt */
+	disable_rx_interrupt(priv);
+
+	i = ring->idx;
+	ring->idx += 2;
+/*   	if (priv->txm_idx - priv->txm_idx_sent == AGNX_TXM_RING_SIZE - 2) */
+/* 		ieee80211_stop_queue(priv->hw, 0); */
+
+	/* Set agnx header's info and desc */
+	i %= ring->size;
+	hdr_desc = ring->desc + i;
+	hdr_info = ring->info + i;
+	hdr_info->hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	memcpy(&hdr_info->hdr, skb->data, hdr_info->hdr_len);
+
+	/* Add the agnx header to the front of the SKB */
+	skb_push(skb, sizeof(struct agnx_hdr) - hdr_info->hdr_len);
+
+	hdr_info->txi = txi;
+	hdr_info->dma_len = sizeof(struct agnx_hdr);
+	hdr_info->skb = skb;
+	hdr_info->type = HEADER;
+	fill_agnx_hdr(priv, hdr_info);
+	hdr_info->mapping = pci_map_single(priv->pdev, skb->data,
+					   hdr_info->dma_len, PCI_DMA_TODEVICE);
+	do {
+		u32 frag = 0;
+		frag |= agnx_set_bits(FIRST_FRAG, FIRST_FRAG_SHIFT, 1);
+		frag |= agnx_set_bits(LAST_FRAG, LAST_FRAG_SHIFT, 0);
+		frag |= agnx_set_bits(PACKET_LEN, PACKET_LEN_SHIFT, skb->len);
+		frag |= agnx_set_bits(FIRST_FRAG_LEN, FIRST_FRAG_LEN_SHIFT, 1);
+		frag |= agnx_set_bits(OWNER, OWNER_SHIFT, 1);
+		hdr_desc->frag = cpu_to_be32(frag);
+	} while (0);
+	hdr_desc->dma_addr = cpu_to_be32(hdr_info->mapping);
+
+
+	/* Set Frag's info and desc */
+	i = (i + 1) % ring->size;
+	frag_desc = ring->desc + i;
+	frag_info = ring->info + i;
+	memcpy(frag_info, hdr_info, sizeof(struct agnx_info));
+	frag_info->type = PACKET;
+	frag_info->dma_len = skb->len - hdr_info->dma_len;
+	frag_info->mapping = pci_map_single(priv->pdev, skb->data + hdr_info->dma_len,
+					    frag_info->dma_len, PCI_DMA_TODEVICE);
+	do {
+		u32 frag = 0;
+		frag |= agnx_set_bits(FIRST_FRAG, FIRST_FRAG_SHIFT, 0);
+		frag |= agnx_set_bits(LAST_FRAG, LAST_FRAG_SHIFT, 1);
+		frag |= agnx_set_bits(PACKET_LEN, PACKET_LEN_SHIFT, skb->len);
+		frag |= agnx_set_bits(SUB_FRAG_LEN, SUB_FRAG_LEN_SHIFT, frag_info->dma_len);
+		frag_desc->frag = cpu_to_be32(frag);
+	} while (0);
+	frag_desc->dma_addr = cpu_to_be32(frag_info->mapping);
+
+	txm_power_set(priv, txi);
+
+/* 	do { */
+/* 		int j; */
+/* 		size_t len; */
+/* 		len = skb->len - hdr_info->dma_len + hdr_info->hdr_len;  */
+/* //		if (len == 614) { */
+/* 			agnx_print_desc(hdr_desc); */
+/* 			agnx_print_desc(frag_desc); */
+/* 			agnx_print_tx_hdr((struct agnx_hdr *)skb->data); */
+/* 			agnx_print_sta_power(priv, LOCAL_STAID); */
+/* 			agnx_print_sta(priv, LOCAL_STAID); */
+/* 			for (j = 0; j < 8; j++) */
+/* 				agnx_print_sta_tx_wq(priv, LOCAL_STAID, j); */
+/* 			agnx_print_sta_power(priv, BSSID_STAID); */
+/* 			agnx_print_sta(priv, BSSID_STAID); */
+/* 			for (j = 0; j < 8; j++) */
+/* 				agnx_print_sta_tx_wq(priv, BSSID_STAID, j); */
+/* 			//	} */
+/* 	} while (0); */
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/* FIXME ugly code */
+	/* Trigger TXM */
+	do {
+		u32 reg;
+		reg = (ioread32(priv->ctl + AGNX_CIR_TXMCTL));
+		reg |= 0x8;
+		iowrite32((reg), priv->ctl + AGNX_CIR_TXMCTL);
+	}while (0);
+
+	/* Trigger TXD */
+	do {
+		u32 reg;
+		reg = (ioread32(priv->ctl + AGNX_CIR_TXDCTL));
+		reg |= 0x8;
+		iowrite32((reg), priv->ctl + AGNX_CIR_TXDCTL);
+	}while (0);
+
+	return 0;
+}
+
+int _agnx_tx(struct agnx_priv *priv, struct sk_buff *skb)
+{
+	u16 fctl;
+
+	if (tx_packet_check(skb))
+		return 0;
+
+/* 	print_hex_dump_bytes("agnx: TX_PACKET: ", DUMP_PREFIX_NONE, */
+/* 			     skb->data, skb->len); */
+
+        fctl = le16_to_cpu(*((__le16 *)skb->data));
+
+	if ( (fctl & IEEE80211_FCTL_FTYPE)  == IEEE80211_FTYPE_DATA )
+		return __agnx_tx(priv, skb, &priv->txd);
+	else
+		return __agnx_tx(priv, skb, &priv->txm);
+}

+ 250 - 0
drivers/staging/agnx/xmit.h

@@ -0,0 +1,250 @@
+#ifndef AGNX_XMIT_H_
+#define AGNX_XMIT_H_
+
+#include <net/mac80211.h>
+
+struct agnx_priv;
+
+static inline u32 agnx_set_bits(u32 mask, u8 shift, u32 value)
+{
+	return (value << shift) & mask;
+}
+
+static inline u32 agnx_get_bits(u32 mask, u8 shift, u32 value)
+{
+	return (value & mask) >> shift;
+}
+
+
+struct agnx_rx {
+	__be16 rx_packet_duration; /*  RX Packet Duration */
+	__be16 replay_cnt;	/* Replay Count */
+} __attribute__((__packed__));
+
+
+struct agnx_tx {
+	u8 long_retry_limit; /* Long Retry Limit */
+	u8 short_retry_limit; /* Short Retry Limit */
+	u8 long_retry_cnt;	/* Long Retry Count */
+	u8 short_retry_cnt; /* Short Retry Count */
+} __attribute__((__packed__));
+
+
+/* Copy from bcm43xx */
+#define P4D_BYT3S(magic, nr_bytes)      u8 __p4dding##magic[nr_bytes]
+#define P4D_BYTES(line, nr_bytes)       P4D_BYT3S(line, nr_bytes)
+#define PAD_BYTES(nr_bytes)             P4D_BYTES(__LINE__, nr_bytes)
+
+#define P4D_BIT3S(magic, nr_bits)       __be32 __padding##magic:nr_bits
+#define P4D_BITS(line, nr_bits)         P4D_BIT3S(line, nr_bits)
+#define PAD_BITS(nr_bits)	        P4D_BITS(__LINE__, nr_bits)
+
+
+struct agnx_hdr {
+	__be32 reg0;
+#define RTS		        0x80000000 /* RTS */
+#define RTS_SHIFT		31
+#define MULTICAST	        0x40000000 /* multicast */
+#define MULTICAST_SHIFT		30
+#define ACK			0x30000000 /* ACK */
+#define ACK_SHIFT		28
+#define TM			0x08000000 /* TM */
+#define TM_SHIFT		27
+#define RELAY			0x04000000 /* Relay */
+#define RELAY_SHIFT		26
+/* 	PAD_BITS(4); */
+#define REVISED_FCS		0x00380000 /* revised FCS */
+#define REVISED_FCS_SHIFT	19
+#define NEXT_BUFFER_ADDR	0x0007FFFF /* Next Buffer Address */
+#define NEXT_BUFFER_ADDR_SHIFT	0
+
+	__be32 reg1;
+#define MAC_HDR_LEN		0xFC000000 /* MAC Header Length  */
+#define MAC_HDR_LEN_SHIFT	26
+#define DURATION_OVERIDE	0x02000000 /* Duration Override */
+#define DURATION_OVERIDE_SHIFT	25
+#define PHY_HDR_OVERIDE		0x01000000 /* PHY Header Override */
+#define PHY_HDR_OVERIDE_SHIFT	24
+#define CRC_FAIL		0x00800000 /* CRC fail */
+#define CRC_FAIL_SHIFT		23
+/*	PAD_BITS(1); */
+#define SEQUENCE_NUMBER		0x00200000 /* Sequence Number */
+#define SEQUENCE_NUMBER_SHIFT	21
+/*	PAD_BITS(2); */
+#define BUFF_HEAD_ADDR		0x0007FFFF /* Buffer Head Address */
+#define BUFF_HEAD_ADDR_SHIFT	0
+
+	__be32 reg2;
+#define PDU_COUNT		0xFC000000 /* PDU Count */
+#define PDU_COUNT_SHIFT		26
+/* 	PAD_BITS(3); */
+#define WEP_KEY			0x00600000 /* WEP Key # */
+#define WEP_KEY_SHIFT		21
+#define USES_WEP_KEY		0x00100000 /* Uses WEP Key */
+#define USES_WEP_KEY_SHIFT	20
+#define KEEP_ALIVE		0x00080000 /* Keep alive */
+#define KEEP_ALIVE_SHIFT	19
+#define BUFF_TAIL_ADDR		0x0007FFFF /* Buffer Tail Address */
+#define BUFF_TAIL_ADDR_SHIFT	0
+
+	__be32 reg3;
+#define CTS_11G			0x80000000	/* CTS in 11g */
+#define CTS_11G_SHIFT		31
+#define RTS_11G			0x40000000	/* RTS in 11g */
+#define RTS_11G_SHIFT		30
+/* PAD_BITS(2); */
+#define FRAG_SIZE		0x0FFF0000	/* fragment size */
+#define FRAG_SIZE_SHIFT		16
+#define PAYLOAD_LEN		0x0000FFF0	/* payload length */
+#define PAYLOAD_LEN_SHIFT	4
+#define FRAG_NUM		0x0000000F	/* number of frags */
+#define FRAG_NUM_SHIFT		0
+
+	__be32 reg4;
+/* 	PAD_BITS(4); */
+#define RELAY_STAID		0x0FFF0000 /* relayStald */
+#define RELAY_STAID_SHIFT	16
+#define STATION_ID		0x0000FFF0 /* Station ID */
+#define STATION_ID_SHIFT	4
+#define WORKQUEUE_ID		0x0000000F /* Workqueue ID */
+#define WORKQUEUE_ID_SHIFT	0
+
+	/* FIXME this register maybe is LE? */
+	__be32 reg5;
+/* 	PAD_BITS(4); */
+#define ROUTE_HOST		0x0F000000
+#define ROUTE_HOST_SHIFT	24
+#define ROUTE_CARD_CPU		0x00F00000
+#define ROUTE_CARD_CPU_SHIFT	20
+#define ROUTE_ENCRYPTION	0x000F0000
+#define ROUTE_ENCRYPTION_SHIFT	16
+#define ROUTE_TX		0x0000F000
+#define ROUTE_TX_SHIFT		12
+#define ROUTE_RX1		0x00000F00
+#define ROUTE_RX1_SHIFT		8
+#define ROUTE_RX2		0x000000F0
+#define ROUTE_RX2_SHIFT		4
+#define ROUTE_COMPRESSION	0x0000000F
+#define ROUTE_COMPRESSION_SHIFT 0
+
+	__be32 _11g0;			/* 11g */
+	__be32 _11g1;			/* 11g */
+	__be32 _11b0;			/* 11b */
+	__be32 _11b1;			/* 11b */
+	u8 mac_hdr[32];			/* MAC header */
+
+	__be16 rts_duration;		/* RTS duration */
+	__be16 last_duration;		/* Last duration */
+	__be16 sec_last_duration;	/* Second to Last duration */
+	__be16 other_duration;		/* Other duration */
+	__be16 tx_last_duration;	/* TX Last duration */
+	__be16 tx_other_duration;	/* TX Other Duration */
+	__be16 last_11g_len;		/* Length of last 11g */
+	__be16 other_11g_len;		/* Lenght of other 11g */
+
+	__be16 last_11b_len;		/* Length of last 11b */
+	__be16 other_11b_len;		/* Lenght of other 11b */
+
+
+	__be16 reg6;
+#define MBF			0xF000 /* mbf */
+#define MBF_SHIFT		12
+#define RSVD4			0x0FFF /* rsvd4 */
+#define RSVD4_SHIFT		0
+
+	__be16 rx_frag_stat;	/* RX fragmentation status */
+
+	__be32 time_stamp;	/* TimeStamp */
+	__be32 phy_stats_hi;	/* PHY stats hi */
+	__be32 phy_stats_lo;	/* PHY stats lo */
+	__be32 mic_key0;	/* MIC key 0 */
+	__be32 mic_key1;	/* MIC key 1 */
+
+	union {			/* RX/TX Union */
+		struct agnx_rx rx;
+		struct agnx_tx tx;
+	};
+
+	u8 rx_channel;		/* Recieve Channel */
+	PAD_BYTES(3);
+
+	u8 reserved[4];
+} __attribute__((__packed__));
+
+
+struct agnx_desc {
+#define PACKET_LEN		0xFFF00000
+#define PACKET_LEN_SHIFT	20
+/* ------------------------------------------------ */
+#define FIRST_PACKET_MASK	0x00080000
+#define FIRST_PACKET_MASK_SHIFT	19
+#define FIRST_RESERV2		0x00040000
+#define FIRST_RESERV2_SHIFT	18
+#define FIRST_TKIP_ERROR	0x00020000
+#define FIRST_TKIP_ERROR_SHIFT	17
+#define FIRST_TKIP_PACKET	0x00010000
+#define FIRST_TKIP_PACKET_SHIFT	16
+#define FIRST_RESERV1		0x0000F000
+#define FIRST_RESERV1_SHIFT	12
+#define FIRST_FRAG_LEN		0x00000FF8
+#define FIRST_FRAG_LEN_SHIFT	3
+/* ------------------------------------------------ */
+#define SUB_RESERV2		0x000c0000
+#define SUB_RESERV2_SHIFT	18
+#define SUB_TKIP_ERROR		0x00020000
+#define SUB_TKIP_ERROR_SHIFT	17
+#define SUB_TKIP_PACKET		0x00010000
+#define SUB_TKIP_PACKET_SHIFT	16
+#define SUB_RESERV1		0x00008000
+#define SUB_RESERV1_SHIFT	15
+#define SUB_FRAG_LEN		0x00007FF8
+#define SUB_FRAG_LEN_SHIFT	3
+/* ------------------------------------------------ */
+#define FIRST_FRAG		0x00000004
+#define FIRST_FRAG_SHIFT	2
+#define LAST_FRAG		0x00000002
+#define LAST_FRAG_SHIFT		1
+#define OWNER			0x00000001
+#define OWNER_SHIFT		0
+	__be32 frag;
+	__be32 dma_addr;
+} __attribute__((__packed__));
+
+enum {HEADER, PACKET};
+
+struct agnx_info {
+        struct sk_buff *skb;
+        dma_addr_t mapping;
+	u32 dma_len;		/* dma buffer len  */
+	/* Below fields only usful for tx */
+	u32 hdr_len;		/* ieee80211 header length */
+	unsigned int type;
+        struct ieee80211_tx_info *txi;
+        struct ieee80211_hdr hdr;
+};
+
+
+struct agnx_ring {
+	struct agnx_desc *desc;
+	dma_addr_t dma;
+	struct agnx_info *info;
+	/* Will lead to overflow when sent packet number enough? */
+	unsigned int idx;
+	unsigned int idx_sent;		/* only usful for txd and txm */
+	unsigned int size;
+};
+
+#define AGNX_RX_RING_SIZE	128
+#define AGNX_TXD_RING_SIZE	256
+#define AGNX_TXM_RING_SIZE	128
+
+void disable_rx_interrupt(struct agnx_priv *priv);
+void enable_rx_interrupt(struct agnx_priv *priv);
+int fill_rings(struct agnx_priv *priv);
+void unfill_rings(struct agnx_priv *priv);
+void handle_rx_irq(struct agnx_priv *priv);
+void handle_txd_irq(struct agnx_priv *priv);
+void handle_txm_irq(struct agnx_priv *priv);
+void handle_other_irq(struct agnx_priv *priv);
+int _agnx_tx(struct agnx_priv *priv, struct sk_buff *skb);
+#endif /* AGNX_XMIT_H_ */

+ 10 - 0
drivers/staging/altpciechdma/Kconfig

@@ -0,0 +1,10 @@
+config ALTERA_PCIE_CHDMA
+	tristate "Altera PCI Express Chaining DMA driver"
+	depends on PCI
+	default N
+	---help---
+	  A reference driver that exercises the Chaining DMA logic reference
+	  design generated along the Altera FPGA PCI Express soft or hard core,
+	  only if instantiated using the MegaWizard, not the SOPC builder, of
+	  Quartus 8.1.
+

+ 2 - 0
drivers/staging/altpciechdma/Makefile

@@ -0,0 +1,2 @@
+obj-$(CONFIG_ALTERA_PCIE_CHDMA)	+= altpciechdma.o
+

+ 15 - 0
drivers/staging/altpciechdma/TODO

@@ -0,0 +1,15 @@
+DONE:
+    - functionality similar to logic testbench
+
+TODO:
+	- checkpatch.pl cleanups.
+	- keep state of DMA engines.
+	- keep data structure that keeps state of each transfer.
+	- interrupt handler should iterate over outstanding descriptor tables.
+	- complete userspace cdev to read/write using the DMA engines.
+	- split off the DMA support functions in a module, re-usable by custom
+	  drivers.
+
+Please coordinate work with, and send patches to
+Leon Woestenberg <leon@sidebranch.com>
+

+ 1184 - 0
drivers/staging/altpciechdma/altpciechdma.c

@@ -0,0 +1,1184 @@
+/**
+ * Driver for Altera PCIe core chaining DMA reference design.
+ *
+ * Copyright (C) 2008 Leon Woestenberg  <leon.woestenberg@axon.tv>
+ * Copyright (C) 2008 Nickolas Heppermann  <heppermannwdt@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ *
+ * Rationale: This driver exercises the chaining DMA read and write engine
+ * in the reference design. It is meant as a complementary reference
+ * driver that can be used for testing early designs as well as a basis to
+ * write your custom driver.
+ *
+ * Status: Test results from Leon Woestenberg  <leon.woestenberg@axon.tv>:
+ *
+ * Sendero Board w/ Cyclone II EP2C35F672C6N, PX1011A PCIe x1 PHY on a
+ * Dell Precision 370 PC, x86, kernel 2.6.20 from Ubuntu 7.04.
+ *
+ * Sendero Board w/ Cyclone II EP2C35F672C6N, PX1011A PCIe x1 PHY on a
+ * Freescale MPC8313E-RDB board, PowerPC, 2.6.24 w/ Freescale patches.
+ *
+ * Driver tests passed with PCIe Compiler 8.1. With PCIe 8.0 the DMA
+ * loopback test had reproducable compare errors. I assume a change
+ * in the compiler or reference design, but could not find evidence nor
+ * documentation on a change or fix in that direction.
+ *
+ * The reference design does not have readable locations and thus a
+ * dummy read, used to flush PCI posted writes, cannot be performed.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+
+/* by default do not build the character device interface */
+/* XXX It is non-functional yet */
+#ifndef ALTPCIECHDMA_CDEV
+#  define ALTPCIECHDMA_CDEV 0
+#endif
+
+/* build the character device interface? */
+#if ALTPCIECHDMA_CDEV
+#  define MAX_CHDMA_SIZE (8 * 1024 * 1024)
+#  include "mapper_user_to_sg.h"
+#endif
+
+/** driver name, mimicks Altera naming of the reference design */
+#define DRV_NAME "altpciechdma"
+/** number of BARs on the device */
+#define APE_BAR_NUM (6)
+/** BAR number where the RCSLAVE memory sits */
+#define APE_BAR_RCSLAVE (0)
+/** BAR number where the Descriptor Header sits */
+#define APE_BAR_HEADER (2)
+
+/** maximum size in bytes of the descriptor table, chdma logic limit */
+#define APE_CHDMA_TABLE_SIZE (4096)
+/* single transfer must not exceed 255 table entries. worst case this can be
+ * achieved by 255 scattered pages, with only a single byte in the head and
+ * tail pages. 253 * PAGE_SIZE is a safe upper bound for the transfer size.
+ */
+#define APE_CHDMA_MAX_TRANSFER_LEN (253 * PAGE_SIZE)
+
+/**
+ * Specifies those BARs to be mapped and the length of each mapping.
+ *
+ * Zero (0) means do not map, otherwise specifies the BAR lengths to be mapped.
+ * If the actual BAR length is less, this is considered an error; then
+ * reconfigure your PCIe core.
+ *
+ * @see ug_pci_express 8.0, table 7-2 at page 7-13.
+ */
+static const unsigned long bar_min_len[APE_BAR_NUM] =
+	{ 32768, 0, 256, 0, 32768, 0 };
+
+/**
+ * Descriptor Header, controls the DMA read engine or write engine.
+ *
+ * The descriptor header is the main data structure for starting DMA transfers.
+ *
+ * It sits in End Point (FPGA) memory BAR[2] for 32-bit or BAR[3:2] for 64-bit.
+ * It references a descriptor table which exists in Root Complex (PC) memory.
+ * Writing the rclast field starts the DMA operation, thus all other structures
+ * and fields must be setup before doing so.
+ *
+ * @see ug_pci_express 8.0, tables 7-3, 7-4 and 7-5 at page 7-14.
+ * @note This header must be written in four 32-bit (PCI DWORD) writes.
+ */
+struct ape_chdma_header {
+	/**
+	 * w0 consists of two 16-bit fields:
+	 * lsb u16 number; number of descriptors in ape_chdma_table
+	 * msb u16 control; global control flags
+	 */
+	u32 w0;
+	/* bus address to ape_chdma_table in Root Complex memory */
+	u32 bdt_addr_h;
+	u32 bdt_addr_l;
+	/**
+	 * w3 consists of two 16-bit fields:
+	 * - lsb u16 rclast; last descriptor number available in Root Complex
+	 *    - zero (0) means the first descriptor is ready,
+	 *    - one (1) means two descriptors are ready, etc.
+	 * - msb u16 reserved;
+	 *
+	 * @note writing to this memory location starts the DMA operation!
+	 */
+	u32 w3;
+} __attribute__ ((packed));
+
+/**
+ * Descriptor Entry, describing a (non-scattered) single memory block transfer.
+ *
+ * There is one descriptor for each memory block involved in the transfer, a
+ * block being a contiguous address range on the bus.
+ *
+ * Multiple descriptors are chained by means of the ape_chdma_table data
+ * structure.
+ *
+ * @see ug_pci_express 8.0, tables 7-6, 7-7 and 7-8 at page 7-14 and page 7-15.
+ */
+struct ape_chdma_desc {
+	/**
+	 * w0 consists of two 16-bit fields:
+	 * number of DWORDS to transfer
+	 * - lsb u16 length;
+	 * global control
+	 * - msb u16 control;
+	 */
+	u32 w0;
+	/* address of memory in the End Point */
+	u32 ep_addr;
+	/* bus address of source or destination memory in the Root Complex */
+	u32 rc_addr_h;
+	u32 rc_addr_l;
+} __attribute__ ((packed));
+
+/**
+ * Descriptor Table, an array of descriptors describing a chained transfer.
+ *
+ * An array of descriptors, preceded by workspace for the End Point.
+ * It exists in Root Complex memory.
+ *
+ * The End Point can update its last completed descriptor number in the
+ * eplast field if requested by setting the EPLAST_ENA bit either
+ * globally in the header's or locally in any descriptor's control field.
+ *
+ * @note this structure may not exceed 4096 bytes. This results in a
+ * maximum of 4096 / (4 * 4) - 1 = 255 descriptors per chained transfer.
+ *
+ * @see ug_pci_express 8.0, tables 7-9, 7-10 and 7-11 at page 7-17 and page 7-18.
+ */
+struct ape_chdma_table {
+	/* workspace 0x00-0x0b, reserved */
+	u32 reserved1[3];
+	/* workspace 0x0c-0x0f, last descriptor handled by End Point */
+	u32 w3;
+	/* the actual array of descriptors
+    * 0x10-0x1f, 0x20-0x2f, ... 0xff0-0xfff (255 entries)
+    */
+	struct ape_chdma_desc desc[255];
+} __attribute__ ((packed));
+
+/**
+ * Altera PCI Express ('ape') board specific book keeping data
+ *
+ * Keeps state of the PCIe core and the Chaining DMA controller
+ * application.
+ */
+struct ape_dev {
+	/** the kernel pci device data structure provided by probe() */
+	struct pci_dev *pci_dev;
+	/**
+	 * kernel virtual address of the mapped BAR memory and IO regions of
+	 * the End Point. Used by map_bars()/unmap_bars().
+	 */
+	void * __iomem bar[APE_BAR_NUM];
+	/** kernel virtual address for Descriptor Table in Root Complex memory */
+	struct ape_chdma_table *table_virt;
+	/**
+	 * bus address for the Descriptor Table in Root Complex memory, in
+	 * CPU-native endianess
+	 */
+	dma_addr_t table_bus;
+	/* if the device regions could not be allocated, assume and remember it
+	 * is in use by another driver; this driver must not disable the device.
+	 */
+	int in_use;
+	/* whether this driver enabled msi for the device */
+	int msi_enabled;
+	/* whether this driver could obtain the regions */
+	int got_regions;
+	/* irq line succesfully requested by this driver, -1 otherwise */
+	int irq_line;
+	/* board revision */
+	u8 revision;
+	/* interrupt count, incremented by the interrupt handler */
+	int irq_count;
+#if ALTPCIECHDMA_CDEV
+	/* character device */
+	dev_t cdevno;
+	struct cdev cdev;
+	/* user space scatter gather mapper */
+	struct sg_mapping_t *sgm;
+#endif
+};
+
+/**
+ * Using the subsystem vendor id and subsystem id, it is possible to
+ * distinguish between different cards bases around the same
+ * (third-party) logic core.
+ *
+ * Default Altera vendor and device ID's, and some (non-reserved)
+ * ID's are now used here that are used amongst the testers/developers.
+ */
+static const struct pci_device_id ids[] = {
+	{ PCI_DEVICE(0x1172, 0xE001), },
+	{ PCI_DEVICE(0x2071, 0x2071), },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+#if ALTPCIECHDMA_CDEV
+/* prototypes for character device */
+static int sg_init(struct ape_dev *ape);
+static void sg_exit(struct ape_dev *ape);
+#endif
+
+/**
+ * altpciechdma_isr() - Interrupt handler
+ *
+ */
+static irqreturn_t altpciechdma_isr(int irq, void *dev_id)
+{
+	struct ape_dev *ape = (struct ape_dev *)dev_id;
+	if (!ape)
+		return IRQ_NONE;
+	ape->irq_count++;
+	return IRQ_HANDLED;
+}
+
+static int __devinit scan_bars(struct ape_dev *ape, struct pci_dev *dev)
+{
+	int i;
+	for (i = 0; i < APE_BAR_NUM; i++) {
+		unsigned long bar_start = pci_resource_start(dev, i);
+		if (bar_start) {
+			unsigned long bar_end = pci_resource_end(dev, i);
+			unsigned long bar_flags = pci_resource_flags(dev, i);
+			printk(KERN_DEBUG "BAR%d 0x%08lx-0x%08lx flags 0x%08lx\n",
+			  i, bar_start, bar_end, bar_flags);
+		}
+	}
+	return 0;
+}
+
+/**
+ * Unmap the BAR regions that had been mapped earlier using map_bars()
+ */
+static void unmap_bars(struct ape_dev *ape, struct pci_dev *dev)
+{
+	int i;
+	for (i = 0; i < APE_BAR_NUM; i++) {
+	  /* is this BAR mapped? */
+		if (ape->bar[i]) {
+			/* unmap BAR */
+			pci_iounmap(dev, ape->bar[i]);
+			ape->bar[i] = NULL;
+		}
+	}
+}
+
+/**
+ * Map the device memory regions into kernel virtual address space after
+ * verifying their sizes respect the minimum sizes needed, given by the
+ * bar_min_len[] array.
+ */
+static int __devinit map_bars(struct ape_dev *ape, struct pci_dev *dev)
+{
+	int rc;
+	int i;
+	/* iterate through all the BARs */
+	for (i = 0; i < APE_BAR_NUM; i++) {
+		unsigned long bar_start = pci_resource_start(dev, i);
+		unsigned long bar_end = pci_resource_end(dev, i);
+		unsigned long bar_length = bar_end - bar_start + 1;
+		ape->bar[i] = NULL;
+		/* do not map, and skip, BARs with length 0 */
+		if (!bar_min_len[i])
+			continue;
+		/* do not map BARs with address 0 */
+		if (!bar_start || !bar_end) {
+            printk(KERN_DEBUG "BAR #%d is not present?!\n", i);
+			rc = -1;
+			goto fail;
+		}
+		bar_length = bar_end - bar_start + 1;
+		/* BAR length is less than driver requires? */
+		if (bar_length < bar_min_len[i]) {
+            printk(KERN_DEBUG "BAR #%d length = %lu bytes but driver "
+            "requires at least %lu bytes\n", i, bar_length, bar_min_len[i]);
+			rc = -1;
+			goto fail;
+		}
+		/* map the device memory or IO region into kernel virtual
+		 * address space */
+		ape->bar[i] = pci_iomap(dev, i, bar_min_len[i]);
+		if (!ape->bar[i]) {
+			printk(KERN_DEBUG "Could not map BAR #%d.\n", i);
+			rc = -1;
+			goto fail;
+		}
+        printk(KERN_DEBUG "BAR[%d] mapped at 0x%p with length %lu(/%lu).\n", i,
+			ape->bar[i], bar_min_len[i], bar_length);
+	}
+	/* succesfully mapped all required BAR regions */
+	rc = 0;
+	goto success;
+fail:
+	/* unmap any BARs that we did map */
+	unmap_bars(ape, dev);
+success:
+	return rc;
+}
+
+#if 0 /* not yet implemented fully FIXME add opcode */
+static void __devinit rcslave_test(struct ape_dev *ape, struct pci_dev *dev)
+{
+	u32 *rcslave_mem = (u32 *)ape->bar[APE_BAR_RCSLAVE];
+	u32 result = 0;
+	/** this number is assumed to be different each time this test runs */
+	u32 seed = (u32)jiffies;
+	u32 value = seed;
+	int i;
+
+	/* write loop */
+	value = seed;
+	for (i = 1024; i < 32768 / 4 ; i++) {
+		printk(KERN_DEBUG "Writing 0x%08x to 0x%p.\n",
+			(u32)value, (void *)rcslave_mem + i);
+		iowrite32(value, rcslave_mem + i);
+		value++;
+	}
+	/* read-back loop */
+	value = seed;
+	for (i = 1024; i < 32768 / 4; i++) {
+		result = ioread32(rcslave_mem + i);
+		if (result != value) {
+			printk(KERN_DEBUG "Wrote 0x%08x to 0x%p, but read back 0x%08x.\n",
+				(u32)value, (void *)rcslave_mem + i, (u32)result);
+			break;
+		}
+		value++;
+	}
+}
+#endif
+
+/* obtain the 32 most significant (high) bits of a 32-bit or 64-bit address */
+#define pci_dma_h(addr) ((addr >> 16) >> 16)
+/* obtain the 32 least significant (low) bits of a 32-bit or 64-bit address */
+#define pci_dma_l(addr) (addr & 0xffffffffUL)
+
+/* ape_fill_chdma_desc() - Fill a Altera PCI Express Chaining DMA descriptor
+ *
+ * @desc pointer to descriptor to be filled
+ * @addr root complex address
+ * @ep_addr end point address
+ * @len number of bytes, must be a multiple of 4.
+ */
+static inline void ape_chdma_desc_set(struct ape_chdma_desc *desc, dma_addr_t addr, u32 ep_addr, int len)
+{
+  BUG_ON(len & 3);
+	desc->w0 = cpu_to_le32(len / 4);
+	desc->ep_addr = cpu_to_le32(ep_addr);
+	desc->rc_addr_h = cpu_to_le32(pci_dma_h(addr));
+	desc->rc_addr_l = cpu_to_le32(pci_dma_l(addr));
+}
+
+/*
+ * ape_sg_to_chdma_table() - Create a device descriptor table from a scatterlist.
+ *
+ * The scatterlist must have been mapped by pci_map_sg(sgm->sgl).
+ *
+ * @sgl scatterlist.
+ * @nents Number of entries in the scatterlist.
+ * @first Start index in the scatterlist sgm->sgl.
+ * @ep_addr End Point address for the scatter/gather transfer.
+ * @desc pointer to first descriptor
+ *
+ * Returns Number of entries in the table on success, -1 on error.
+ */
+static int ape_sg_to_chdma_table(struct scatterlist *sgl, int nents, int first, struct ape_chdma_desc *desc, u32 ep_addr)
+{
+	int i = first, j = 0;
+	/* inspect first entry */
+	dma_addr_t addr = sg_dma_address(&sgl[i]);
+	unsigned int len = sg_dma_len(&sgl[i]);
+	/* contiguous block */
+	dma_addr_t cont_addr = addr;
+	unsigned int cont_len = len;
+	/* iterate over remaining entries */
+	for (; j < 25 && i < nents - 1; i++) {
+		/* bus address of next entry i + 1 */
+		dma_addr_t next = sg_dma_address(&sgl[i + 1]);
+		/* length of this entry i */
+		len = sg_dma_len(&sgl[i]);
+		printk(KERN_DEBUG "%04d: addr=0x%08x length=0x%08x\n", i, addr, len);
+		/* entry i + 1 is non-contiguous with entry i? */
+		if (next != addr + len) {
+			/* TODO create entry here (we could overwrite i) */
+			printk(KERN_DEBUG "%4d: cont_addr=0x%08x cont_len=0x%08x\n", j, cont_addr, cont_len);
+			/* set descriptor for contiguous transfer */
+			ape_chdma_desc_set(&desc[j], cont_addr, ep_addr, cont_len);
+			/* next end point memory address */
+			ep_addr += cont_len;
+			/* start new contiguous block */
+			cont_addr = next;
+			cont_len = 0;
+			j++;
+		}
+		/* add entry i + 1 to current contiguous block */
+		cont_len += len;
+		/* goto entry i + 1 */
+		addr = next;
+	}
+	/* TODO create entry here  (we could overwrite i) */
+	printk(KERN_DEBUG "%04d: addr=0x%08x length=0x%08x\n", i, addr, len);
+	printk(KERN_DEBUG "%4d: cont_addr=0x%08x length=0x%08x\n", j, cont_addr, cont_len);
+	j++;
+	return j;
+}
+
+/* compare buffers */
+static inline int compare(u32 *p, u32 *q, int len)
+{
+	int result = -1;
+	int fail = 0;
+	int i;
+	for (i = 0; i < len / 4; i++) {
+		if (*p == *q) {
+			/* every so many u32 words, show equals */
+			if ((i & 255) == 0)
+				printk(KERN_DEBUG "[%p] = 0x%08x    [%p] = 0x%08x\n", p, *p, q, *q);
+		} else {
+			fail++;
+			/* show the first few miscompares */
+			if (fail < 10) {
+                printk(KERN_DEBUG "[%p] = 0x%08x != [%p] = 0x%08x ?!\n", p, *p, q, *q);
+            /* but stop after a while */
+            } else if (fail == 10) {
+                printk(KERN_DEBUG "---more errors follow! not printed---\n");
+		  	} else {
+				/* stop compare after this many errors */
+                break;
+            }
+		}
+		p++;
+		q++;
+	}
+	if (!fail)
+		result = 0;
+	return result;
+}
+
+/* dma_test() - Perform DMA loop back test to end point and back to root complex.
+ *
+ * Allocate a cache-coherent buffer in host memory, consisting of four pages.
+ *
+ * Fill the four memory pages such that each 32-bit word contains its own address.
+ *
+ * Now perform a loop back test, have the end point device copy the first buffer
+ * half to end point memory, then have it copy back into the second half.
+ *
+ *   Create a descriptor table to copy the first buffer half into End Point
+ *   memory. Instruct the End Point to do a DMA read using that table.
+ *
+ *   Create a descriptor table to copy End Point memory to the second buffer
+ *   half. Instruct the End Point to do a DMA write using that table.
+ *
+ * Compare results, fail or pass.
+ *
+ */
+static int __devinit dma_test(struct ape_dev *ape, struct pci_dev *dev)
+{
+	/* test result; guilty until proven innocent */
+	int result = -1;
+	/* the DMA read header sits at address 0x00 of the DMA engine BAR */
+	struct ape_chdma_header *write_header = (struct ape_chdma_header *)ape->bar[APE_BAR_HEADER];
+	/* the write DMA header sits after the read header at address 0x10 */
+	struct ape_chdma_header *read_header = write_header + 1;
+	/* virtual address of the allocated buffer */
+	u8 *buffer_virt = 0;
+	/* bus address of the allocated buffer */
+	dma_addr_t buffer_bus = 0;
+	int i, n = 0, irq_count;
+
+	/* temporary value used to construct 32-bit data words */
+	u32 w;
+
+	printk(KERN_DEBUG "bar_tests(), PAGE_SIZE = 0x%0x\n", (int)PAGE_SIZE);
+	printk(KERN_DEBUG "write_header = 0x%p.\n", write_header);
+	printk(KERN_DEBUG "read_header = 0x%p.\n", read_header);
+	printk(KERN_DEBUG "&write_header->w3 = 0x%p\n", &write_header->w3);
+	printk(KERN_DEBUG "&read_header->w3 = 0x%p\n", &read_header->w3);
+	printk(KERN_DEBUG "ape->table_virt = 0x%p.\n", ape->table_virt);
+
+	if (!write_header || !read_header || !ape->table_virt)
+        goto fail;
+
+	/* allocate and map coherently-cached memory for a DMA-able buffer */
+	/* @see 2.6.26.2/Documentation/DMA-mapping.txt line 318 */
+	buffer_virt = (u8 *)pci_alloc_consistent(dev, PAGE_SIZE * 4, &buffer_bus);
+	if (!buffer_virt) {
+		printk(KERN_DEBUG "Could not allocate coherent DMA buffer.\n");
+		goto fail;
+	}
+	printk(KERN_DEBUG "Allocated cache-coherent DMA buffer (virtual address = 0x%016llx, bus address = 0x%016llx).\n",
+		(u64)buffer_virt, (u64)buffer_bus);
+
+	/* fill first half of buffer with its virtual address as data */
+	for (i = 0; i < 4 * PAGE_SIZE; i += 4)
+#if 0
+		*(u32 *)(buffer_virt + i) = i / PAGE_SIZE + 1;
+#else
+		*(u32 *)(buffer_virt + i) = (buffer_virt + i);
+#endif
+#if 0
+  compare((u32 *)buffer_virt, (u32 *)(buffer_virt + 2 * PAGE_SIZE), 8192);
+#endif
+
+#if 0
+	/* fill second half of buffer with zeroes */
+	for (i = 2 * PAGE_SIZE; i < 4 * PAGE_SIZE; i += 4)
+		*(u32 *)(buffer_virt + i) = 0;
+#endif
+
+	/* invalidate EPLAST, outside 0-255, 0xFADE is from the testbench */
+	ape->table_virt->w3 = cpu_to_le32(0x0000FADE);
+
+	/* fill in first descriptor */
+	n = 0;
+	/* read 8192 bytes from RC buffer to EP address 4096 */
+	ape_chdma_desc_set(&ape->table_virt->desc[n], buffer_bus, 4096, 2 * PAGE_SIZE);
+#if 1
+	for (i = 0; i < 255; i++) {
+		ape_chdma_desc_set(&ape->table_virt->desc[i], buffer_bus, 4096, 2 * PAGE_SIZE);
+	}
+	/* index of last descriptor */
+	n = i - 1;
+#endif
+#if 0
+	/* fill in next descriptor */
+	n++;
+	/* read 1024 bytes from RC buffer to EP address 4096 + 1024 */
+	ape_chdma_desc_set(&ape->table_virt->desc[n], buffer_bus + 1024, 4096 + 1024, 1024);
+#endif
+
+#if 1
+	/* enable MSI after the last descriptor is completed */
+	if (ape->msi_enabled)
+		ape->table_virt->desc[n].w0 |= cpu_to_le32(1UL << 16)/*local MSI*/;
+#endif
+#if 0
+	/* dump descriptor table for debugging */
+	printk(KERN_DEBUG "Descriptor Table (Read, in Root Complex Memory, # = %d)\n", n + 1);
+	for (i = 0; i < 4 + (n + 1) * 4; i += 4) {
+		u32 *p = (u32 *)ape->table_virt;
+		p += i;
+		printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (LEN=0x%x)\n", (u32)p, (u32)p & 15, *p, 4 * le32_to_cpu(*p));
+		p++;
+		printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (EPA=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
+		p++;
+		printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (RCH=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
+		p++;
+		printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (RCL=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
+	}
+#endif
+	/* set available number of descriptors in table */
+	w = (u32)(n + 1);
+	w |= (1UL << 18)/*global EPLAST_EN*/;
+#if 0
+	if (ape->msi_enabled)
+		w |= (1UL << 17)/*global MSI*/;
+#endif
+	printk(KERN_DEBUG "writing 0x%08x to 0x%p\n", w, (void *)&read_header->w0);
+	iowrite32(w, &read_header->w0);
+
+	/* write table address (higher 32-bits) */
+	printk(KERN_DEBUG "writing 0x%08x to 0x%p\n", (u32)((ape->table_bus >> 16) >> 16), (void *)&read_header->bdt_addr_h);
+	iowrite32(pci_dma_h(ape->table_bus), &read_header->bdt_addr_h);
+
+	/* write table address (lower 32-bits) */
+	printk(KERN_DEBUG "writing 0x%08x to 0x%p\n", (u32)(ape->table_bus & 0xffffffffUL), (void *)&read_header->bdt_addr_l);
+	iowrite32(pci_dma_l(ape->table_bus), &read_header->bdt_addr_l);
+
+	/* memory write barrier */
+	wmb();
+	printk(KERN_DEBUG "Flush posted writes\n");
+	/** FIXME Add dummy read to flush posted writes but need a readable location! */
+#if 0
+	(void)ioread32();
+#endif
+
+	/* remember IRQ count before the transfer */
+	irq_count = ape->irq_count;
+	/* write number of descriptors - this starts the DMA */
+	printk(KERN_DEBUG "\nStart DMA read\n");
+	printk(KERN_DEBUG "writing 0x%08x to 0x%p\n", (u32)n, (void *)&read_header->w3);
+	iowrite32(n, &read_header->w3);
+	printk(KERN_DEBUG "EPLAST = %lu\n", le32_to_cpu(*(u32 *)&ape->table_virt->w3) & 0xffffUL);
+
+	/** memory write barrier */
+	wmb();
+	/* dummy read to flush posted writes */
+	/* FIXME Need a readable location! */
+#if 0
+	(void)ioread32();
+#endif
+	printk(KERN_DEBUG "POLL FOR READ:\n");
+	/* poll for chain completion, 1000 times 1 millisecond */
+	for (i = 0; i < 100; i++) {
+		volatile u32 *p = &ape->table_virt->w3;
+		u32 eplast = le32_to_cpu(*p) & 0xffffUL;
+		printk(KERN_DEBUG "EPLAST = %u, n = %d\n", eplast, n);
+		if (eplast == n) {
+			printk(KERN_DEBUG "DONE\n");
+            /* print IRQ count before the transfer */
+			printk(KERN_DEBUG "#IRQs during transfer: %d\n", ape->irq_count - irq_count);
+			break;
+		}
+		udelay(100);
+	}
+
+	/* invalidate EPLAST, outside 0-255, 0xFADE is from the testbench */
+	ape->table_virt->w3 = cpu_to_le32(0x0000FADE);
+
+	/* setup first descriptor */
+	n = 0;
+	ape_chdma_desc_set(&ape->table_virt->desc[n], buffer_bus + 8192, 4096, 2 * PAGE_SIZE);
+#if 1
+	for (i = 0; i < 255; i++) {
+		ape_chdma_desc_set(&ape->table_virt->desc[i], buffer_bus + 8192, 4096, 2 * PAGE_SIZE);
+	}
+	/* index of last descriptor */
+	n = i - 1;
+#endif
+#if 1 /* test variable, make a module option later */
+	if (ape->msi_enabled)
+		ape->table_virt->desc[n].w0 |= cpu_to_le32(1UL << 16)/*local MSI*/;
+#endif
+#if 0
+	/* dump descriptor table for debugging */
+	printk(KERN_DEBUG "Descriptor Table (Write, in Root Complex Memory, # = %d)\n", n + 1);
+	for (i = 0; i < 4 + (n + 1) * 4; i += 4) {
+		u32 *p = (u32 *)ape->table_virt;
+		p += i;
+		printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (LEN=0x%x)\n", (u32)p, (u32)p & 15, *p, 4 * le32_to_cpu(*p));
+		p++;
+		printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (EPA=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
+		p++;
+		printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (RCH=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
+		p++;
+		printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (RCL=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
+	}
+#endif
+
+	/* set number of available descriptors in the table */
+	w = (u32)(n + 1);
+	/* enable updates of eplast for each descriptor completion */
+	w |= (u32)(1UL << 18)/*global EPLAST_EN*/;
+#if 0 // test variable, make a module option later
+	/* enable MSI for each descriptor completion */
+	if (ape->msi_enabled)
+		w |= (1UL << 17)/*global MSI*/;
+#endif
+	iowrite32(w, &write_header->w0);
+	iowrite32(pci_dma_h(ape->table_bus), &write_header->bdt_addr_h);
+	iowrite32(pci_dma_l(ape->table_bus), &write_header->bdt_addr_l);
+
+	/** memory write barrier and flush posted writes */
+	wmb();
+	/* dummy read to flush posted writes */
+	/* FIXME Need a readable location! */
+#if 0
+	(void)ioread32();
+#endif
+	irq_count = ape->irq_count;
+
+	printk(KERN_DEBUG "\nStart DMA write\n");
+	iowrite32(n, &write_header->w3);
+
+	/** memory write barrier */
+	wmb();
+	/** dummy read to flush posted writes */
+	//(void)ioread32();
+
+	printk(KERN_DEBUG "POLL FOR WRITE:\n");
+	/* poll for completion, 1000 times 1 millisecond */
+	for (i = 0; i < 100; i++) {
+		volatile u32 *p = &ape->table_virt->w3;
+		u32 eplast = le32_to_cpu(*p) & 0xffffUL;
+		printk(KERN_DEBUG "EPLAST = %u, n = %d\n", eplast, n);
+		if (eplast == n) {
+			printk(KERN_DEBUG "DONE\n");
+			/* print IRQ count before the transfer */
+			printk(KERN_DEBUG "#IRQs during transfer: %d\n", ape->irq_count - irq_count);
+			break;
+		}
+		udelay(100);
+	}
+	/* soft-reset DMA write engine */
+	iowrite32(0x0000ffffUL, &write_header->w0);
+	/* soft-reset DMA read engine */
+	iowrite32(0x0000ffffUL, &read_header->w0);
+
+	/** memory write barrier */
+	wmb();
+	/* dummy read to flush posted writes */
+	/* FIXME Need a readable location! */
+#if 0
+	(void)ioread32();
+#endif
+	/* compare first half of buffer with second half, should be identical */
+	result = compare((u32 *)buffer_virt, (u32 *)(buffer_virt + 2 * PAGE_SIZE), 8192);
+	printk(KERN_DEBUG "DMA loop back test %s.\n", result ? "FAILED" : "PASSED");
+
+	pci_free_consistent(dev, 4 * PAGE_SIZE, buffer_virt, buffer_bus);
+fail:
+	printk(KERN_DEBUG "bar_tests() end, result %d\n", result);
+	return result;
+}
+
+/* Called when the PCI sub system thinks we can control the given device.
+ * Inspect if we can support the device and if so take control of it.
+ *
+ * Return 0 when we have taken control of the given device.
+ *
+ * - allocate board specific bookkeeping
+ * - allocate coherently-mapped memory for the descriptor table
+ * - enable the board
+ * - verify board revision
+ * - request regions
+ * - query DMA mask
+ * - obtain and request irq
+ * - map regions into kernel address space
+ */
+static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int rc = 0;
+	struct ape_dev *ape = NULL;
+	u8 irq_pin, irq_line;
+	printk(KERN_DEBUG "probe(dev = 0x%p, pciid = 0x%p)\n", dev, id);
+
+	/* allocate memory for per-board book keeping */
+	ape = kzalloc(sizeof(struct ape_dev), GFP_KERNEL);
+	if (!ape) {
+		printk(KERN_DEBUG "Could not kzalloc()ate memory.\n");
+		goto err_ape;
+	}
+	ape->pci_dev = dev;
+	dev->dev.driver_data = (void *)ape;
+	printk(KERN_DEBUG "probe() ape = 0x%p\n", ape);
+
+	printk(KERN_DEBUG "sizeof(struct ape_chdma_table) = %d.\n",
+		(int)sizeof(struct ape_chdma_table));
+	/* the reference design has a size restriction on the table size */
+	BUG_ON(sizeof(struct ape_chdma_table) > APE_CHDMA_TABLE_SIZE);
+
+	/* allocate and map coherently-cached memory for a descriptor table */
+	/* @see LDD3 page 446 */
+	ape->table_virt = (struct ape_chdma_table *)pci_alloc_consistent(dev,
+		APE_CHDMA_TABLE_SIZE, &ape->table_bus);
+	/* could not allocate table? */
+	if (!ape->table_virt) {
+		printk(KERN_DEBUG "Could not dma_alloc()ate_coherent memory.\n");
+		goto err_table;
+	}
+
+	printk(KERN_DEBUG "table_virt = 0x%16llx, table_bus = 0x%16llx.\n",
+		(u64)ape->table_virt, (u64)ape->table_bus);
+
+	/* enable device */
+	rc = pci_enable_device(dev);
+	if (rc) {
+		printk(KERN_DEBUG "pci_enable_device() failed\n");
+		goto err_enable;
+	}
+
+	/* enable bus master capability on device */
+	pci_set_master(dev);
+	/* enable message signaled interrupts */
+	rc = pci_enable_msi(dev);
+	/* could not use MSI? */
+	if (rc) {
+		/* resort to legacy interrupts */
+		printk(KERN_DEBUG "Could not enable MSI interrupting.\n");
+		ape->msi_enabled = 0;
+	/* MSI enabled, remember for cleanup */
+	} else {
+		printk(KERN_DEBUG "Enabled MSI interrupting.\n");
+		ape->msi_enabled = 1;
+	}
+
+	pci_read_config_byte(dev, PCI_REVISION_ID, &ape->revision);
+#if 0 /* example */
+	/* (for example) this driver does not support revision 0x42 */
+    if (ape->revision == 0x42) {
+		printk(KERN_DEBUG "Revision 0x42 is not supported by this driver.\n");
+		rc = -ENODEV;
+		goto err_rev;
+	}
+#endif
+	/** XXX check for native or legacy PCIe endpoint? */
+
+	rc = pci_request_regions(dev, DRV_NAME);
+	/* could not request all regions? */
+	if (rc) {
+		/* assume device is in use (and do not disable it later!) */
+		ape->in_use = 1;
+		goto err_regions;
+	}
+	ape->got_regions = 1;
+
+#if 1 // @todo For now, disable 64-bit, because I do not understand the implications (DAC!)
+	/* query for DMA transfer */
+	/* @see Documentation/DMA-mapping.txt */
+	if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) {
+		pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK);
+		/* use 64-bit DMA */
+		printk(KERN_DEBUG "Using a 64-bit DMA mask.\n");
+	} else
+#endif
+	if (!pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
+		printk(KERN_DEBUG "Could not set 64-bit DMA mask.\n");
+		pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK);
+		/* use 32-bit DMA */
+		printk(KERN_DEBUG "Using a 32-bit DMA mask.\n");
+	} else {
+		printk(KERN_DEBUG "No suitable DMA possible.\n");
+		/** @todo Choose proper error return code */
+		rc = -1;
+		goto err_mask;
+	}
+
+	rc = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin);
+	/* could not read? */
+	if (rc)
+		goto err_irq;
+	printk(KERN_DEBUG "IRQ pin #%d (0=none, 1=INTA#...4=INTD#).\n", irq_pin);
+
+	/* @see LDD3, page 318 */
+	rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_line);
+	/* could not read? */
+	if (rc) {
+		printk(KERN_DEBUG "Could not query PCI_INTERRUPT_LINE, error %d\n", rc);
+		goto err_irq;
+	}
+	printk(KERN_DEBUG "IRQ line #%d.\n", irq_line);
+#if 1
+	irq_line = dev->irq;
+	/* @see LDD3, page 259 */
+	rc = request_irq(irq_line, altpciechdma_isr, IRQF_SHARED, DRV_NAME, (void *)ape);
+	if (rc) {
+		printk(KERN_DEBUG "Could not request IRQ #%d, error %d\n", irq_line, rc);
+		ape->irq_line = -1;
+		goto err_irq;
+	}
+	/* remember which irq we allocated */
+	ape->irq_line = (int)irq_line;
+	printk(KERN_DEBUG "Succesfully requested IRQ #%d with dev_id 0x%p\n", irq_line, ape);
+#endif
+	/* show BARs */
+	scan_bars(ape, dev);
+	/* map BARs */
+	rc = map_bars(ape, dev);
+	if (rc)
+		goto err_map;
+#if ALTPCIECHDMA_CDEV
+	/* initialize character device */
+	rc = sg_init(ape);
+	if (rc)
+		goto err_cdev;
+#endif
+	/* perform DMA engines loop back test */
+	rc = dma_test(ape, dev);
+	(void)rc;
+	/* succesfully took the device */
+	rc = 0;
+	printk(KERN_DEBUG "probe() successful.\n");
+	goto end;
+err_cdev:
+	/* unmap the BARs */
+	unmap_bars(ape, dev);
+err_map:
+	/* free allocated irq */
+	if (ape->irq_line >= 0)
+		free_irq(ape->irq_line, (void *)ape);
+err_irq:
+	if (ape->msi_enabled)
+		pci_disable_msi(dev);
+	/* disable the device iff it is not in use */
+	if (!ape->in_use)
+		pci_disable_device(dev);
+	if (ape->got_regions)
+		pci_release_regions(dev);
+err_mask:
+err_regions:
+err_rev:
+/* clean up everything before device enable() */
+err_enable:
+	if (ape->table_virt)
+		pci_free_consistent(dev, APE_CHDMA_TABLE_SIZE, ape->table_virt, ape->table_bus);
+/* clean up everything before allocating descriptor table */
+err_table:
+	if (ape)
+		kfree(ape);
+err_ape:
+end:
+	return rc;
+}
+
+static void __devexit remove(struct pci_dev *dev)
+{
+	struct ape_dev *ape;
+	printk(KERN_DEBUG "remove(0x%p)\n", dev);
+	if ((dev == 0) || (dev->dev.driver_data == 0)) {
+		printk(KERN_DEBUG "remove(dev = 0x%p) dev->dev.driver_data = 0x%p\n", dev, dev->dev.driver_data);
+		return;
+	}
+	ape = (struct ape_dev *)dev->dev.driver_data;
+	printk(KERN_DEBUG "remove(dev = 0x%p) where dev->dev.driver_data = 0x%p\n", dev, ape);
+	if (ape->pci_dev != dev) {
+		printk(KERN_DEBUG "dev->dev.driver_data->pci_dev (0x%08lx) != dev (0x%08lx)\n",
+		(unsigned long)ape->pci_dev, (unsigned long)dev);
+	}
+	/* remove character device */
+#if ALTPCIECHDMA_CDEV
+	sg_exit(ape);
+#endif
+
+	if (ape->table_virt)
+		pci_free_consistent(dev, APE_CHDMA_TABLE_SIZE, ape->table_virt, ape->table_bus);
+
+	/* free IRQ
+	 * @see LDD3 page 279
+	 */
+	if (ape->irq_line >= 0) {
+		printk(KERN_DEBUG "Freeing IRQ #%d for dev_id 0x%08lx.\n",
+		ape->irq_line, (unsigned long)ape);
+		free_irq(ape->irq_line, (void *)ape);
+	}
+	/* MSI was enabled? */
+	if (ape->msi_enabled) {
+		/* Disable MSI @see Documentation/MSI-HOWTO.txt */
+		pci_disable_msi(dev);
+		ape->msi_enabled = 0;
+	}
+	/* unmap the BARs */
+	unmap_bars(ape, dev);
+	if (!ape->in_use)
+		pci_disable_device(dev);
+	if (ape->got_regions)
+		/* to be called after device disable */
+		pci_release_regions(dev);
+}
+
+#if ALTPCIECHDMA_CDEV
+
+/*
+ * Called when the device goes from unused to used.
+ */
+static int sg_open(struct inode *inode, struct file *file)
+{
+	struct ape_dev *ape;
+	printk(KERN_DEBUG DRV_NAME "_open()\n");
+	/* pointer to containing data structure of the character device inode */
+	ape = container_of(inode->i_cdev, struct ape_dev, cdev);
+	/* create a reference to our device state in the opened file */
+	file->private_data = ape;
+	/* create virtual memory mapper */
+	ape->sgm = sg_create_mapper(MAX_CHDMA_SIZE);
+	return 0;
+}
+
+/*
+ * Called when the device goes from used to unused.
+ */
+static int sg_close(struct inode *inode, struct file *file)
+{
+	/* fetch device specific data stored earlier during open */
+	struct ape_dev *ape = (struct ape_dev *)file->private_data;
+	printk(KERN_DEBUG DRV_NAME "_close()\n");
+	/* destroy virtual memory mapper */
+	sg_destroy_mapper(ape->sgm);
+	return 0;
+}
+
+static ssize_t sg_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
+{
+	/* fetch device specific data stored earlier during open */
+	struct ape_dev *ape = (struct ape_dev *)file->private_data;
+	(void)ape;
+	printk(KERN_DEBUG DRV_NAME "_read(buf=0x%p, count=%lld, pos=%llu)\n", buf, (s64)count, (u64)*pos);
+	return count;
+}
+
+/* sg_write() - Write to the device
+ *
+ * @buf userspace buffer
+ * @count number of bytes in the userspace buffer
+ *
+ * Iterate over the userspace buffer, taking at most 255 * PAGE_SIZE bytes for
+ * each DMA transfer.
+ *   For each transfer, get the user pages, build a sglist, map, build a
+ *   descriptor table. submit the transfer. wait for the interrupt handler
+ *   to wake us on completion.
+ */
+static ssize_t sg_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
+{
+	int hwnents, tents;
+	size_t transfer_len, remaining = count, done = 0;
+	u64 transfer_addr = (u64)buf;
+	/* fetch device specific data stored earlier during open */
+	struct ape_dev *ape = (struct ape_dev *)file->private_data;
+	printk(KERN_DEBUG DRV_NAME "_write(buf=0x%p, count=%lld, pos=%llu)\n",
+		buf, (s64)count, (u64)*pos);
+	/* TODO transfer boundaries at PAGE_SIZE granularity */
+	while (remaining > 0)
+	{
+		/* limit DMA transfer size */
+		transfer_len = (remaining < APE_CHDMA_MAX_TRANSFER_LEN)? remaining:
+			APE_CHDMA_MAX_TRANSFER_LEN;
+		/* get all user space buffer pages and create a scattergather list */
+		sgm_map_user_pages(ape->sgm, transfer_addr, transfer_len, 0/*read from userspace*/);
+		printk(KERN_DEBUG DRV_NAME "mapped_pages=%d\n", ape->sgm->mapped_pages);
+		/* map all entries in the scattergather list */
+		hwnents = pci_map_sg(ape->pci_dev, ape->sgm->sgl, ape->sgm->mapped_pages, DMA_TO_DEVICE);
+		printk(KERN_DEBUG DRV_NAME "hwnents=%d\n", hwnents);
+		/* build device descriptor tables and submit them to the DMA engine */
+		tents = ape_sg_to_chdma_table(ape->sgm->sgl, hwnents, 0, &ape->table_virt->desc[0], 4096);
+		printk(KERN_DEBUG DRV_NAME "tents=%d\n", hwnents);
+#if 0
+		while (tables) {
+			/* TODO build table */
+			/* TODO submit table to the device */
+			/* if engine stopped and unfinished work then start engine */
+		}
+		put ourselves on wait queue
+#endif
+
+		dma_unmap_sg(NULL, ape->sgm->sgl, ape->sgm->mapped_pages, DMA_TO_DEVICE);
+		/* dirty and free the pages */
+		sgm_unmap_user_pages(ape->sgm, 1/*dirtied*/);
+		/* book keeping */
+		transfer_addr += transfer_len;
+		remaining -= transfer_len;
+		done += transfer_len;
+	}
+	return done;
+}
+
+/*
+ * character device file operations
+ */
+static struct file_operations sg_fops = {
+  .owner = THIS_MODULE,
+  .open = sg_open,
+  .release = sg_close,
+  .read = sg_read,
+  .write = sg_write,
+};
+
+/* sg_init() - Initialize character device
+ *
+ * XXX Should ideally be tied to the device, on device probe, not module init.
+ */
+static int sg_init(struct ape_dev *ape)
+{
+	int rc;
+	printk(KERN_DEBUG DRV_NAME " sg_init()\n");
+	/* allocate a dynamically allocated character device node */
+	rc = alloc_chrdev_region(&ape->cdevno, 0/*requested minor*/, 1/*count*/, DRV_NAME);
+	/* allocation failed? */
+	if (rc < 0) {
+		printk("alloc_chrdev_region() = %d\n", rc);
+		goto fail_alloc;
+	}
+	/* couple the device file operations to the character device */
+	cdev_init(&ape->cdev, &sg_fops);
+	ape->cdev.owner = THIS_MODULE;
+	/* bring character device live */
+	rc = cdev_add(&ape->cdev, ape->cdevno, 1/*count*/);
+	if (rc < 0) {
+		printk("cdev_add() = %d\n", rc);
+		goto fail_add;
+	}
+	printk(KERN_DEBUG "altpciechdma = %d:%d\n", MAJOR(ape->cdevno), MINOR(ape->cdevno));
+	return 0;
+fail_add:
+	/* free the dynamically allocated character device node */
+    unregister_chrdev_region(ape->cdevno, 1/*count*/);
+fail_alloc:
+	return -1;
+}
+
+/* sg_exit() - Cleanup character device
+ *
+ * XXX Should ideally be tied to the device, on device remove, not module exit.
+ */
+
+static void sg_exit(struct ape_dev *ape)
+{
+	printk(KERN_DEBUG DRV_NAME " sg_exit()\n");
+	/* remove the character device */
+	cdev_del(&ape->cdev);
+	/* free the dynamically allocated character device node */
+	unregister_chrdev_region(ape->cdevno, 1/*count*/);
+}
+
+#endif /* ALTPCIECHDMA_CDEV */
+
+/* used to register the driver with the PCI kernel sub system
+ * @see LDD3 page 311
+ */
+static struct pci_driver pci_driver = {
+	.name = DRV_NAME,
+	.id_table = ids,
+	.probe = probe,
+	.remove = remove,
+	/* resume, suspend are optional */
+};
+
+/**
+ * alterapciechdma_init() - Module initialization, registers devices.
+ */
+static int __init alterapciechdma_init(void)
+{
+  int rc = 0;
+	printk(KERN_DEBUG DRV_NAME " init(), built at " __DATE__ " " __TIME__ "\n");
+	/* register this driver with the PCI bus driver */
+	rc = pci_register_driver(&pci_driver);
+	if (rc < 0)
+	  return rc;
+	return 0;
+}
+
+/**
+ * alterapciechdma_init() - Module cleanup, unregisters devices.
+ */
+static void __exit alterapciechdma_exit(void)
+{
+	printk(KERN_DEBUG DRV_NAME " exit(), built at " __DATE__ " " __TIME__ "\n");
+	/* unregister this driver from the PCI bus driver */
+	pci_unregister_driver(&pci_driver);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(alterapciechdma_init);
+module_exit(alterapciechdma_exit);
+

+ 86 - 0
drivers/staging/android/Kconfig

@@ -0,0 +1,86 @@
+menu "Android"
+
+config ANDROID
+	bool "Android Drivers"
+	default N
+	---help---
+	  Enable support for various drivers needed on the Android platform
+
+config ANDROID_BINDER_IPC
+	bool "Android Binder IPC Driver"
+	default n
+
+config ANDROID_LOGGER
+	tristate "Android log driver"
+	default n
+
+config ANDROID_RAM_CONSOLE
+	bool "Android RAM buffer console"
+	default n
+
+config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+	bool "Enable verbose console messages on Android RAM console"
+	default y
+	depends on ANDROID_RAM_CONSOLE
+
+menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	bool "Android RAM Console Enable error correction"
+	default n
+	depends on ANDROID_RAM_CONSOLE
+	select REED_SOLOMON
+	select REED_SOLOMON_ENC8
+	select REED_SOLOMON_DEC8
+
+if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+	int "Android RAM Console Data data size"
+	default 128
+	help
+	  Must be a power of 2.
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+	int "Android RAM Console ECC size"
+	default 16
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+	int "Android RAM Console Symbol size"
+	default 8
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+	hex "Android RAM Console Polynomial"
+	default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
+	default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
+	default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
+	default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
+	default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
+
+endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_EARLY_INIT
+	bool "Start Android RAM console early"
+	default n
+	depends on ANDROID_RAM_CONSOLE
+
+config ANDROID_RAM_CONSOLE_EARLY_ADDR
+	hex "Android RAM console virtual address"
+	default 0
+	depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_RAM_CONSOLE_EARLY_SIZE
+	hex "Android RAM console buffer size"
+	default 0
+	depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_TIMED_GPIO
+	tristate "Android timed gpio driver"
+	depends on GENERIC_GPIO
+	default n
+
+config ANDROID_LOW_MEMORY_KILLER
+	bool "Android Low Memory Killer"
+	default N
+	---help---
+	  Register processes to be killed when memory is low
+
+endmenu

+ 5 - 0
drivers/staging/android/Makefile

@@ -0,0 +1,5 @@
+obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o
+obj-$(CONFIG_ANDROID_LOGGER)		+= logger.o
+obj-$(CONFIG_ANDROID_RAM_CONSOLE)	+= ram_console.o
+obj-$(CONFIG_ANDROID_TIMED_GPIO)	+= timed_gpio.o
+obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)	+= lowmemorykiller.o

+ 10 - 0
drivers/staging/android/TODO

@@ -0,0 +1,10 @@
+TODO:
+	- checkpatch.pl cleanups
+	- sparse fixes
+	- rename files to be not so "generic"
+	- make sure things build as modules properly
+	- add proper arch dependancies as needed
+	- audit userspace interfaces to make sure they are sane
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
+Brian Swetland <swetland@google.com>

+ 3503 - 0
drivers/staging/android/binder.c

@@ -0,0 +1,3503 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include "binder.h"
+
+static DEFINE_MUTEX(binder_lock);
+static HLIST_HEAD(binder_procs);
+static struct binder_node *binder_context_mgr_node;
+static uid_t binder_context_mgr_uid = -1;
+static int binder_last_id;
+static struct proc_dir_entry *binder_proc_dir_entry_root;
+static struct proc_dir_entry *binder_proc_dir_entry_proc;
+static struct hlist_head binder_dead_nodes;
+
+static int binder_read_proc_proc(
+	char *page, char **start, off_t off, int count, int *eof, void *data);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K                               0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M                               0x400000
+#endif
+
+#ifndef __i386__
+#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE | VM_EXEC)
+#else
+#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
+#endif
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+	BINDER_DEBUG_USER_ERROR             = 1U << 0,
+	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
+	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
+	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
+	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
+	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
+	BINDER_DEBUG_READ_WRITE             = 1U << 6,
+	BINDER_DEBUG_USER_REFS              = 1U << 7,
+	BINDER_DEBUG_THREADS                = 1U << 8,
+	BINDER_DEBUG_TRANSACTION            = 1U << 9,
+	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
+	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
+	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
+	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
+	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
+	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+static int binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+static int binder_set_stop_on_user_error(
+	const char *val, struct kernel_param *kp)
+{
+	int ret;
+	ret = param_set_int(val, kp);
+	if (binder_stop_on_user_error < 2)
+		wake_up(&binder_user_error_wait);
+	return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_user_error(x...) \
+	do { \
+		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+			printk(KERN_INFO x); \
+		if (binder_stop_on_user_error) \
+			binder_stop_on_user_error = 2; \
+	} while (0)
+
+enum {
+	BINDER_STAT_PROC,
+	BINDER_STAT_THREAD,
+	BINDER_STAT_NODE,
+	BINDER_STAT_REF,
+	BINDER_STAT_DEATH,
+	BINDER_STAT_TRANSACTION,
+	BINDER_STAT_TRANSACTION_COMPLETE,
+	BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+	int obj_created[BINDER_STAT_COUNT];
+	int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+struct binder_transaction_log_entry {
+	int debug_id;
+	int call_type;
+	int from_proc;
+	int from_thread;
+	int target_handle;
+	int to_proc;
+	int to_thread;
+	int to_node;
+	int data_size;
+	int offsets_size;
+};
+struct binder_transaction_log {
+	int next;
+	int full;
+	struct binder_transaction_log_entry entry[32];
+};
+struct binder_transaction_log binder_transaction_log;
+struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+	struct binder_transaction_log *log)
+{
+	struct binder_transaction_log_entry *e;
+	e = &log->entry[log->next];
+	memset(e, 0, sizeof(*e));
+	log->next++;
+	if (log->next == ARRAY_SIZE(log->entry)) {
+		log->next = 0;
+		log->full = 1;
+	}
+	return e;
+}
+
+struct binder_work {
+	struct list_head entry;
+	enum {
+		BINDER_WORK_TRANSACTION = 1,
+		BINDER_WORK_TRANSACTION_COMPLETE,
+		BINDER_WORK_NODE,
+		BINDER_WORK_DEAD_BINDER,
+		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+	} type;
+};
+
+struct binder_node {
+	int debug_id;
+	struct binder_work work;
+	union {
+		struct rb_node rb_node;
+		struct hlist_node dead_node;
+	};
+	struct binder_proc *proc;
+	struct hlist_head refs;
+	int internal_strong_refs;
+	int local_weak_refs;
+	int local_strong_refs;
+	void __user *ptr;
+	void __user *cookie;
+	unsigned has_strong_ref : 1;
+	unsigned pending_strong_ref : 1;
+	unsigned has_weak_ref : 1;
+	unsigned pending_weak_ref : 1;
+	unsigned has_async_transaction : 1;
+	unsigned accept_fds : 1;
+	int min_priority : 8;
+	struct list_head async_todo;
+};
+
+struct binder_ref_death {
+	struct binder_work work;
+	void __user *cookie;
+};
+
+struct binder_ref {
+	/* Lookups needed: */
+	/*   node + proc => ref (transaction) */
+	/*   desc + proc => ref (transaction, inc/dec ref) */
+	/*   node => refs + procs (proc exit) */
+	int debug_id;
+	struct rb_node rb_node_desc;
+	struct rb_node rb_node_node;
+	struct hlist_node node_entry;
+	struct binder_proc *proc;
+	struct binder_node *node;
+	uint32_t desc;
+	int strong;
+	int weak;
+	struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+	struct list_head entry; /* free and allocated entries by addesss */
+	struct rb_node rb_node; /* free entry by size or allocated entry */
+				/* by address */
+	unsigned free : 1;
+	unsigned allow_user_free : 1;
+	unsigned async_transaction : 1;
+	unsigned debug_id : 29;
+
+	struct binder_transaction *transaction;
+
+	struct binder_node *target_node;
+	size_t data_size;
+	size_t offsets_size;
+	uint8_t data[0];
+};
+
+struct binder_proc {
+	struct hlist_node proc_node;
+	struct rb_root threads;
+	struct rb_root nodes;
+	struct rb_root refs_by_desc;
+	struct rb_root refs_by_node;
+	int pid;
+	struct vm_area_struct *vma;
+	struct task_struct *tsk;
+	void *buffer;
+	size_t user_buffer_offset;
+
+	struct list_head buffers;
+	struct rb_root free_buffers;
+	struct rb_root allocated_buffers;
+	size_t free_async_space;
+
+	struct page **pages;
+	size_t buffer_size;
+	uint32_t buffer_free;
+	struct list_head todo;
+	wait_queue_head_t wait;
+	struct binder_stats stats;
+	struct list_head delivered_death;
+	int max_threads;
+	int requested_threads;
+	int requested_threads_started;
+	int ready_threads;
+	long default_priority;
+};
+
+enum {
+	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
+	BINDER_LOOPER_STATE_ENTERED     = 0x02,
+	BINDER_LOOPER_STATE_EXITED      = 0x04,
+	BINDER_LOOPER_STATE_INVALID     = 0x08,
+	BINDER_LOOPER_STATE_WAITING     = 0x10,
+	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+	struct binder_proc *proc;
+	struct rb_node rb_node;
+	int pid;
+	int looper;
+	struct binder_transaction *transaction_stack;
+	struct list_head todo;
+	uint32_t return_error; /* Write failed, return error code in read buf */
+	uint32_t return_error2; /* Write failed, return error code in read */
+		/* buffer. Used when sending a reply to a dead process that */
+		/* we are also waiting on */
+	wait_queue_head_t wait;
+	struct binder_stats stats;
+};
+
+struct binder_transaction {
+	int debug_id;
+	struct binder_work work;
+	struct binder_thread *from;
+	struct binder_transaction *from_parent;
+	struct binder_proc *to_proc;
+	struct binder_thread *to_thread;
+	struct binder_transaction *to_parent;
+	unsigned need_reply : 1;
+	/*unsigned is_dead : 1;*/ /* not used at the moment */
+
+	struct binder_buffer *buffer;
+	unsigned int	code;
+	unsigned int	flags;
+	long	priority;
+	long	saved_priority;
+	uid_t	sender_euid;
+};
+
+/*
+ * copied from get_unused_fd_flags
+ */
+int task_get_unused_fd_flags(struct task_struct *tsk, int flags)
+{
+	struct files_struct *files = get_files_struct(tsk);
+	int fd, error;
+	struct fdtable *fdt;
+	unsigned long rlim_cur;
+
+	if (files == NULL)
+		return -ESRCH;
+
+	error = -EMFILE;
+	spin_lock(&files->file_lock);
+
+repeat:
+	fdt = files_fdtable(files);
+	fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
+				files->next_fd);
+
+	/*
+	 * N.B. For clone tasks sharing a files structure, this test
+	 * will limit the total number of files that can be opened.
+	 */
+	rcu_read_lock();
+	if (tsk->signal)
+		rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
+	else
+		rlim_cur = 0;
+	rcu_read_unlock();
+	if (fd >= rlim_cur)
+		goto out;
+
+	/* Do we need to expand the fd array or fd set?  */
+	error = expand_files(files, fd);
+	if (error < 0)
+		goto out;
+
+	if (error) {
+		/*
+		 * If we needed to expand the fs array we
+		 * might have blocked - try again.
+		 */
+		error = -EMFILE;
+		goto repeat;
+	}
+
+	FD_SET(fd, fdt->open_fds);
+	if (flags & O_CLOEXEC)
+		FD_SET(fd, fdt->close_on_exec);
+	else
+		FD_CLR(fd, fdt->close_on_exec);
+	files->next_fd = fd + 1;
+#if 1
+	/* Sanity check */
+	if (fdt->fd[fd] != NULL) {
+		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
+		fdt->fd[fd] = NULL;
+	}
+#endif
+	error = fd;
+
+out:
+	spin_unlock(&files->file_lock);
+	put_files_struct(files);
+	return error;
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+	struct task_struct *tsk, unsigned int fd, struct file *file)
+{
+	struct files_struct *files = get_files_struct(tsk);
+	struct fdtable *fdt;
+
+	if (files == NULL)
+		return;
+
+	spin_lock(&files->file_lock);
+	fdt = files_fdtable(files);
+	BUG_ON(fdt->fd[fd] != NULL);
+	rcu_assign_pointer(fdt->fd[fd], file);
+	spin_unlock(&files->file_lock);
+	put_files_struct(files);
+}
+
+/*
+ * copied from __put_unused_fd in open.c
+ */
+static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+{
+	struct fdtable *fdt = files_fdtable(files);
+	__FD_CLR(fd, fdt->open_fds);
+	if (fd < files->next_fd)
+		files->next_fd = fd;
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct task_struct *tsk, unsigned int fd)
+{
+	struct file *filp;
+	struct files_struct *files = get_files_struct(tsk);
+	struct fdtable *fdt;
+	int retval;
+
+	if (files == NULL)
+		return -ESRCH;
+
+	spin_lock(&files->file_lock);
+	fdt = files_fdtable(files);
+	if (fd >= fdt->max_fds)
+		goto out_unlock;
+	filp = fdt->fd[fd];
+	if (!filp)
+		goto out_unlock;
+	rcu_assign_pointer(fdt->fd[fd], NULL);
+	FD_CLR(fd, fdt->close_on_exec);
+	__put_unused_fd(files, fd);
+	spin_unlock(&files->file_lock);
+	retval = filp_close(filp, files);
+
+	/* can't restart close syscall because file table entry was cleared */
+	if (unlikely(retval == -ERESTARTSYS ||
+		     retval == -ERESTARTNOINTR ||
+		     retval == -ERESTARTNOHAND ||
+		     retval == -ERESTART_RESTARTBLOCK))
+		retval = -EINTR;
+
+	put_files_struct(files);
+	return retval;
+
+out_unlock:
+	spin_unlock(&files->file_lock);
+	put_files_struct(files);
+	return -EBADF;
+}
+
+static void binder_set_nice(long nice)
+{
+	long min_nice;
+	if (can_nice(current, nice)) {
+		set_user_nice(current, nice);
+		return;
+	}
+	min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+	if (binder_debug_mask & BINDER_DEBUG_PRIORITY_CAP)
+		printk(KERN_INFO "binder: %d: nice value %ld not allowed use "
+		       "%ld instead\n", current->pid, nice, min_nice);
+	set_user_nice(current, min_nice);
+	if (min_nice < 20)
+		return;
+	binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(
+	struct binder_proc *proc, struct binder_buffer *buffer)
+{
+	if (list_is_last(&buffer->entry, &proc->buffers))
+		return proc->buffer + proc->buffer_size - (void *)buffer->data;
+	else
+		return (size_t)list_entry(buffer->entry.next,
+			struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(
+	struct binder_proc *proc, struct binder_buffer *new_buffer)
+{
+	struct rb_node **p = &proc->free_buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_buffer *buffer;
+	size_t buffer_size;
+	size_t new_buffer_size;
+
+	BUG_ON(!new_buffer->free);
+
+	new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+		printk(KERN_INFO "binder: %d: add free buffer, size %zd, "
+		       "at %p\n", proc->pid, new_buffer_size, new_buffer);
+
+	while (*p) {
+		parent = *p;
+		buffer = rb_entry(parent, struct binder_buffer, rb_node);
+		BUG_ON(!buffer->free);
+
+		buffer_size = binder_buffer_size(proc, buffer);
+
+		if (new_buffer_size < buffer_size)
+			p = &parent->rb_left;
+		else
+			p = &parent->rb_right;
+	}
+	rb_link_node(&new_buffer->rb_node, parent, p);
+	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(
+	struct binder_proc *proc, struct binder_buffer *new_buffer)
+{
+	struct rb_node **p = &proc->allocated_buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_buffer *buffer;
+
+	BUG_ON(new_buffer->free);
+
+	while (*p) {
+		parent = *p;
+		buffer = rb_entry(parent, struct binder_buffer, rb_node);
+		BUG_ON(buffer->free);
+
+		if (new_buffer < buffer)
+			p = &parent->rb_left;
+		else if (new_buffer > buffer)
+			p = &parent->rb_right;
+		else
+			BUG();
+	}
+	rb_link_node(&new_buffer->rb_node, parent, p);
+	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(
+	struct binder_proc *proc, void __user *user_ptr)
+{
+	struct rb_node *n = proc->allocated_buffers.rb_node;
+	struct binder_buffer *buffer;
+	struct binder_buffer *kern_ptr;
+
+	kern_ptr = user_ptr - proc->user_buffer_offset
+		- offsetof(struct binder_buffer, data);
+
+	while (n) {
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+		BUG_ON(buffer->free);
+
+		if (kern_ptr < buffer)
+			n = n->rb_left;
+		else if (kern_ptr > buffer)
+			n = n->rb_right;
+		else
+			return buffer;
+	}
+	return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+	void *start, void *end, struct vm_area_struct *vma)
+{
+	void *page_addr;
+	unsigned long user_page_addr;
+	struct vm_struct tmp_area;
+	struct page **page;
+	struct mm_struct *mm;
+
+	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+		printk(KERN_INFO "binder: %d: %s pages %p-%p\n",
+		       proc->pid, allocate ? "allocate" : "free", start, end);
+
+	if (end <= start)
+		return 0;
+
+	if (vma)
+		mm = NULL;
+	else
+		mm = get_task_mm(proc->tsk);
+
+	if (mm) {
+		down_write(&mm->mmap_sem);
+		vma = proc->vma;
+	}
+
+	if (allocate == 0)
+		goto free_range;
+
+	if (vma == NULL) {
+		printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
+		       "map pages in userspace, no vma\n", proc->pid);
+		goto err_no_vma;
+	}
+
+	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+		int ret;
+		struct page **page_array_ptr;
+		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+		BUG_ON(*page);
+		*page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (*page == NULL) {
+			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+			       "for page at %p\n", proc->pid, page_addr);
+			goto err_alloc_page_failed;
+		}
+		tmp_area.addr = page_addr;
+		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+		page_array_ptr = page;
+		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+		if (ret) {
+			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+			       "to map page at %p in kernel\n",
+			       proc->pid, page_addr);
+			goto err_map_kernel_failed;
+		}
+		user_page_addr = (size_t)page_addr + proc->user_buffer_offset;
+		ret = vm_insert_page(vma, user_page_addr, page[0]);
+		if (ret) {
+			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+			       "to map page at %lx in userspace\n",
+			       proc->pid, user_page_addr);
+			goto err_vm_insert_page_failed;
+		}
+		/* vm_insert_page does not seem to increment the refcount */
+	}
+	if (mm) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+	return 0;
+
+free_range:
+	for (page_addr = end - PAGE_SIZE; page_addr >= start;
+	     page_addr -= PAGE_SIZE) {
+		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+		if (vma)
+			zap_page_range(vma, (size_t)page_addr +
+				proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+		__free_page(*page);
+		*page = NULL;
+err_alloc_page_failed:
+		;
+	}
+err_no_vma:
+	if (mm) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+	return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+	size_t data_size, size_t offsets_size, int is_async)
+{
+	struct rb_node *n = proc->free_buffers.rb_node;
+	struct binder_buffer *buffer;
+	size_t buffer_size;
+	struct rb_node *best_fit = NULL;
+	void *has_page_addr;
+	void *end_page_addr;
+	size_t size;
+
+	if (proc->vma == NULL) {
+		printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
+		       proc->pid);
+		return NULL;
+	}
+
+	size = ALIGN(data_size, sizeof(void *)) +
+		ALIGN(offsets_size, sizeof(void *));
+
+	if (size < data_size || size < offsets_size) {
+		binder_user_error("binder: %d: got transaction with invalid "
+			"size %zd-%zd\n", proc->pid, data_size, offsets_size);
+		return NULL;
+	}
+
+	if (is_async &&
+	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
+		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+			printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd f"
+			       "ailed, no async space left\n", proc->pid, size);
+		return NULL;
+	}
+
+	while (n) {
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+		BUG_ON(!buffer->free);
+		buffer_size = binder_buffer_size(proc, buffer);
+
+		if (size < buffer_size) {
+			best_fit = n;
+			n = n->rb_left;
+		} else if (size > buffer_size)
+			n = n->rb_right;
+		else {
+			best_fit = n;
+			break;
+		}
+	}
+	if (best_fit == NULL) {
+		printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
+		       "no address space\n", proc->pid, size);
+		return NULL;
+	}
+	if (n == NULL) {
+		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+		buffer_size = binder_buffer_size(proc, buffer);
+	}
+	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+		printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd got buff"
+		       "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
+
+	has_page_addr =
+		(void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK);
+	if (n == NULL) {
+		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+			buffer_size = size; /* no room for other buffers */
+		else
+			buffer_size = size + sizeof(struct binder_buffer);
+	}
+	end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data + buffer_size);
+	if (end_page_addr > has_page_addr)
+		end_page_addr = has_page_addr;
+	if (binder_update_page_range(proc, 1,
+	    (void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL))
+		return NULL;
+
+	rb_erase(best_fit, &proc->free_buffers);
+	buffer->free = 0;
+	binder_insert_allocated_buffer(proc, buffer);
+	if (buffer_size != size) {
+		struct binder_buffer *new_buffer = (void *)buffer->data + size;
+		list_add(&new_buffer->entry, &buffer->entry);
+		new_buffer->free = 1;
+		binder_insert_free_buffer(proc, new_buffer);
+	}
+	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+		printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd got "
+		       "%p\n", proc->pid, size, buffer);
+	buffer->data_size = data_size;
+	buffer->offsets_size = offsets_size;
+	buffer->async_transaction = is_async;
+	if (is_async) {
+		proc->free_async_space -= size + sizeof(struct binder_buffer);
+		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
+			printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd "
+			       "async free %zd\n", proc->pid, size,
+			       proc->free_async_space);
+	}
+
+	return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+	return (void *)((size_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+	return (void *)(((size_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(
+	struct binder_proc *proc, struct binder_buffer *buffer)
+{
+	struct binder_buffer *prev, *next = NULL;
+	int free_page_end = 1;
+	int free_page_start = 1;
+
+	BUG_ON(proc->buffers.next == &buffer->entry);
+	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+	BUG_ON(!prev->free);
+	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+		free_page_start = 0;
+		if (buffer_end_page(prev) == buffer_end_page(buffer))
+			free_page_end = 0;
+		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+			printk(KERN_INFO "binder: %d: merge free, buffer %p "
+			       "share page with %p\n", proc->pid, buffer, prev);
+	}
+
+	if (!list_is_last(&buffer->entry, &proc->buffers)) {
+		next = list_entry(buffer->entry.next,
+				  struct binder_buffer, entry);
+		if (buffer_start_page(next) == buffer_end_page(buffer)) {
+			free_page_end = 0;
+			if (buffer_start_page(next) ==
+			    buffer_start_page(buffer))
+				free_page_start = 0;
+			if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+				printk(KERN_INFO "binder: %d: merge free, "
+				       "buffer %p share page with %p\n",
+				       proc->pid, buffer, prev);
+		}
+	}
+	list_del(&buffer->entry);
+	if (free_page_start || free_page_end) {
+		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+			printk(KERN_INFO "binder: %d: merge free, buffer %p do "
+			       "not share page%s%s with with %p or %p\n",
+			       proc->pid, buffer, free_page_start ? "" : " end",
+			       free_page_end ? "" : " start", prev, next);
+		binder_update_page_range(proc, 0, free_page_start ?
+			buffer_start_page(buffer) : buffer_end_page(buffer),
+			(free_page_end ? buffer_end_page(buffer) :
+			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+	}
+}
+
+static void binder_free_buf(
+	struct binder_proc *proc, struct binder_buffer *buffer)
+{
+	size_t size, buffer_size;
+
+	buffer_size = binder_buffer_size(proc, buffer);
+
+	size = ALIGN(buffer->data_size, sizeof(void *)) +
+		ALIGN(buffer->offsets_size, sizeof(void *));
+	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+		printk(KERN_INFO "binder: %d: binder_free_buf %p size %zd buffer"
+		       "_size %zd\n", proc->pid, buffer, size, buffer_size);
+
+	BUG_ON(buffer->free);
+	BUG_ON(size > buffer_size);
+	BUG_ON(buffer->transaction != NULL);
+	BUG_ON((void *)buffer < proc->buffer);
+	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+	if (buffer->async_transaction) {
+		proc->free_async_space += size + sizeof(struct binder_buffer);
+		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
+			printk(KERN_INFO "binder: %d: binder_free_buf size %zd "
+			       "async free %zd\n", proc->pid, size,
+			       proc->free_async_space);
+	}
+
+	binder_update_page_range(proc, 0,
+		(void *)PAGE_ALIGN((size_t)buffer->data),
+		(void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK),
+		NULL);
+	rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+	buffer->free = 1;
+	if (!list_is_last(&buffer->entry, &proc->buffers)) {
+		struct binder_buffer *next = list_entry(buffer->entry.next,
+						struct binder_buffer, entry);
+		if (next->free) {
+			rb_erase(&next->rb_node, &proc->free_buffers);
+			binder_delete_free_buffer(proc, next);
+		}
+	}
+	if (proc->buffers.next != &buffer->entry) {
+		struct binder_buffer *prev = list_entry(buffer->entry.prev,
+						struct binder_buffer, entry);
+		if (prev->free) {
+			binder_delete_free_buffer(proc, buffer);
+			rb_erase(&prev->rb_node, &proc->free_buffers);
+			buffer = prev;
+		}
+	}
+	binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *
+binder_get_node(struct binder_proc *proc, void __user *ptr)
+{
+	struct rb_node *n = proc->nodes.rb_node;
+	struct binder_node *node;
+
+	while (n) {
+		node = rb_entry(n, struct binder_node, rb_node);
+
+		if (ptr < node->ptr)
+			n = n->rb_left;
+		else if (ptr > node->ptr)
+			n = n->rb_right;
+		else
+			return node;
+	}
+	return NULL;
+}
+
+static struct binder_node *
+binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie)
+{
+	struct rb_node **p = &proc->nodes.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_node *node;
+
+	while (*p) {
+		parent = *p;
+		node = rb_entry(parent, struct binder_node, rb_node);
+
+		if (ptr < node->ptr)
+			p = &(*p)->rb_left;
+		else if (ptr > node->ptr)
+			p = &(*p)->rb_right;
+		else
+			return NULL;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (node == NULL)
+		return NULL;
+	binder_stats.obj_created[BINDER_STAT_NODE]++;
+	rb_link_node(&node->rb_node, parent, p);
+	rb_insert_color(&node->rb_node, &proc->nodes);
+	node->debug_id = ++binder_last_id;
+	node->proc = proc;
+	node->ptr = ptr;
+	node->cookie = cookie;
+	node->work.type = BINDER_WORK_NODE;
+	INIT_LIST_HEAD(&node->work.entry);
+	INIT_LIST_HEAD(&node->async_todo);
+	if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+		printk(KERN_INFO "binder: %d:%d node %d u%p c%p created\n",
+		       proc->pid, current->pid, node->debug_id,
+		       node->ptr, node->cookie);
+	return node;
+}
+
+static int
+binder_inc_node(struct binder_node *node, int strong, int internal,
+		struct list_head *target_list)
+{
+	if (strong) {
+		if (internal) {
+			if (target_list == NULL &&
+			    node->internal_strong_refs == 0 &&
+			    !(node == binder_context_mgr_node &&
+			    node->has_strong_ref)) {
+				printk(KERN_ERR "binder: invalid inc strong "
+					"node for %d\n", node->debug_id);
+				return -EINVAL;
+			}
+			node->internal_strong_refs++;
+		} else
+			node->local_strong_refs++;
+		if (!node->has_strong_ref && target_list) {
+			list_del_init(&node->work.entry);
+			list_add_tail(&node->work.entry, target_list);
+		}
+	} else {
+		if (!internal)
+			node->local_weak_refs++;
+		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+			if (target_list == NULL) {
+				printk(KERN_ERR "binder: invalid inc weak node "
+					"for %d\n", node->debug_id);
+				return -EINVAL;
+			}
+			list_add_tail(&node->work.entry, target_list);
+		}
+	}
+	return 0;
+}
+
+static int
+binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+	if (strong) {
+		if (internal)
+			node->internal_strong_refs--;
+		else
+			node->local_strong_refs--;
+		if (node->local_strong_refs || node->internal_strong_refs)
+			return 0;
+	} else {
+		if (!internal)
+			node->local_weak_refs--;
+		if (node->local_weak_refs || !hlist_empty(&node->refs))
+			return 0;
+	}
+	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+		if (list_empty(&node->work.entry)) {
+			list_add_tail(&node->work.entry, &node->proc->todo);
+			wake_up_interruptible(&node->proc->wait);
+		}
+	} else {
+		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+		    !node->local_weak_refs) {
+			list_del_init(&node->work.entry);
+			if (node->proc) {
+				rb_erase(&node->rb_node, &node->proc->nodes);
+				if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+					printk(KERN_INFO "binder: refless node %d deleted\n", node->debug_id);
+			} else {
+				hlist_del(&node->dead_node);
+				if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+					printk(KERN_INFO "binder: dead node %d deleted\n", node->debug_id);
+			}
+			kfree(node);
+			binder_stats.obj_deleted[BINDER_STAT_NODE]++;
+		}
+	}
+
+	return 0;
+}
+
+
+static struct binder_ref *
+binder_get_ref(struct binder_proc *proc, uint32_t desc)
+{
+	struct rb_node *n = proc->refs_by_desc.rb_node;
+	struct binder_ref *ref;
+
+	while (n) {
+		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+		if (desc < ref->desc)
+			n = n->rb_left;
+		else if (desc > ref->desc)
+			n = n->rb_right;
+		else
+			return ref;
+	}
+	return NULL;
+}
+
+static struct binder_ref *
+binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node)
+{
+	struct rb_node *n;
+	struct rb_node **p = &proc->refs_by_node.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_ref *ref, *new_ref;
+
+	while (*p) {
+		parent = *p;
+		ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+		if (node < ref->node)
+			p = &(*p)->rb_left;
+		else if (node > ref->node)
+			p = &(*p)->rb_right;
+		else
+			return ref;
+	}
+	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	if (new_ref == NULL)
+		return NULL;
+	binder_stats.obj_created[BINDER_STAT_REF]++;
+	new_ref->debug_id = ++binder_last_id;
+	new_ref->proc = proc;
+	new_ref->node = node;
+	rb_link_node(&new_ref->rb_node_node, parent, p);
+	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+		if (ref->desc > new_ref->desc)
+			break;
+		new_ref->desc = ref->desc + 1;
+	}
+
+	p = &proc->refs_by_desc.rb_node;
+	while (*p) {
+		parent = *p;
+		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+		if (new_ref->desc < ref->desc)
+			p = &(*p)->rb_left;
+		else if (new_ref->desc > ref->desc)
+			p = &(*p)->rb_right;
+		else
+			BUG();
+	}
+	rb_link_node(&new_ref->rb_node_desc, parent, p);
+	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+	if (node) {
+		hlist_add_head(&new_ref->node_entry, &node->refs);
+		if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+			printk(KERN_INFO "binder: %d new ref %d desc %d for "
+				"node %d\n", proc->pid, new_ref->debug_id,
+				new_ref->desc, node->debug_id);
+	} else {
+		if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+			printk(KERN_INFO "binder: %d new ref %d desc %d for "
+				"dead node\n", proc->pid, new_ref->debug_id,
+				new_ref->desc);
+	}
+	return new_ref;
+}
+
+static void
+binder_delete_ref(struct binder_ref *ref)
+{
+	if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+		printk(KERN_INFO "binder: %d delete ref %d desc %d for "
+			"node %d\n", ref->proc->pid, ref->debug_id,
+			ref->desc, ref->node->debug_id);
+	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+	if (ref->strong)
+		binder_dec_node(ref->node, 1, 1);
+	hlist_del(&ref->node_entry);
+	binder_dec_node(ref->node, 0, 1);
+	if (ref->death) {
+		if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+			printk(KERN_INFO "binder: %d delete ref %d desc %d "
+				"has death notification\n", ref->proc->pid,
+				ref->debug_id, ref->desc);
+		list_del(&ref->death->work.entry);
+		kfree(ref->death);
+		binder_stats.obj_deleted[BINDER_STAT_DEATH]++;
+	}
+	kfree(ref);
+	binder_stats.obj_deleted[BINDER_STAT_REF]++;
+}
+
+static int
+binder_inc_ref(
+	struct binder_ref *ref, int strong, struct list_head *target_list)
+{
+	int ret;
+	if (strong) {
+		if (ref->strong == 0) {
+			ret = binder_inc_node(ref->node, 1, 1, target_list);
+			if (ret)
+				return ret;
+		}
+		ref->strong++;
+	} else {
+		if (ref->weak == 0) {
+			ret = binder_inc_node(ref->node, 0, 1, target_list);
+			if (ret)
+				return ret;
+		}
+		ref->weak++;
+	}
+	return 0;
+}
+
+
+static int
+binder_dec_ref(struct binder_ref *ref, int strong)
+{
+	if (strong) {
+		if (ref->strong == 0) {
+			binder_user_error("binder: %d invalid dec strong, "
+					  "ref %d desc %d s %d w %d\n",
+					  ref->proc->pid, ref->debug_id,
+					  ref->desc, ref->strong, ref->weak);
+			return -EINVAL;
+		}
+		ref->strong--;
+		if (ref->strong == 0) {
+			int ret;
+			ret = binder_dec_node(ref->node, strong, 1);
+			if (ret)
+				return ret;
+		}
+	} else {
+		if (ref->weak == 0) {
+			binder_user_error("binder: %d invalid dec weak, "
+					  "ref %d desc %d s %d w %d\n",
+					  ref->proc->pid, ref->debug_id,
+					  ref->desc, ref->strong, ref->weak);
+			return -EINVAL;
+		}
+		ref->weak--;
+	}
+	if (ref->strong == 0 && ref->weak == 0)
+		binder_delete_ref(ref);
+	return 0;
+}
+
+static void
+binder_pop_transaction(
+	struct binder_thread *target_thread, struct binder_transaction *t)
+{
+	if (target_thread) {
+		BUG_ON(target_thread->transaction_stack != t);
+		BUG_ON(target_thread->transaction_stack->from != target_thread);
+		target_thread->transaction_stack =
+			target_thread->transaction_stack->from_parent;
+		t->from = NULL;
+	}
+	t->need_reply = 0;
+	if (t->buffer)
+		t->buffer->transaction = NULL;
+	kfree(t);
+	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
+}
+
+static void
+binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code)
+{
+	struct binder_thread *target_thread;
+	BUG_ON(t->flags & TF_ONE_WAY);
+	while (1) {
+		target_thread = t->from;
+		if (target_thread) {
+			if (target_thread->return_error != BR_OK &&
+			   target_thread->return_error2 == BR_OK) {
+				target_thread->return_error2 =
+					target_thread->return_error;
+				target_thread->return_error = BR_OK;
+			}
+			if (target_thread->return_error == BR_OK) {
+				if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
+					printk(KERN_INFO "binder: send failed reply for transaction %d to %d:%d\n",
+					       t->debug_id, target_thread->proc->pid, target_thread->pid);
+
+				binder_pop_transaction(target_thread, t);
+				target_thread->return_error = error_code;
+				wake_up_interruptible(&target_thread->wait);
+			} else {
+				printk(KERN_ERR "binder: reply failed, target "
+					"thread, %d:%d, has error code %d "
+					"already\n", target_thread->proc->pid,
+					target_thread->pid,
+					target_thread->return_error);
+			}
+			return;
+		} else {
+			struct binder_transaction *next = t->from_parent;
+
+			if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
+				printk(KERN_INFO "binder: send failed reply "
+					"for transaction %d, target dead\n",
+					t->debug_id);
+
+			binder_pop_transaction(target_thread, t);
+			if (next == NULL) {
+				if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+					printk(KERN_INFO "binder: reply failed,"
+						" no target thread at root\n");
+				return;
+			}
+			t = next;
+			if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+				printk(KERN_INFO "binder: reply failed, no targ"
+					"et thread -- retry %d\n", t->debug_id);
+		}
+	}
+}
+
+static void
+binder_transaction_buffer_release(struct binder_proc *proc,
+			struct binder_buffer *buffer, size_t *failed_at);
+
+static void
+binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
+	struct binder_transaction_data *tr, int reply)
+{
+	struct binder_transaction *t;
+	struct binder_work *tcomplete;
+	size_t *offp, *off_end;
+	struct binder_proc *target_proc;
+	struct binder_thread *target_thread = NULL;
+	struct binder_node *target_node = NULL;
+	struct list_head *target_list;
+	wait_queue_head_t *target_wait;
+	struct binder_transaction *in_reply_to = NULL;
+	struct binder_transaction_log_entry *e;
+	uint32_t return_error;
+
+	e = binder_transaction_log_add(&binder_transaction_log);
+	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+	e->from_proc = proc->pid;
+	e->from_thread = thread->pid;
+	e->target_handle = tr->target.handle;
+	e->data_size = tr->data_size;
+	e->offsets_size = tr->offsets_size;
+
+	if (reply) {
+		in_reply_to = thread->transaction_stack;
+		if (in_reply_to == NULL) {
+			binder_user_error("binder: %d:%d got reply transaction "
+					  "with no transaction stack\n",
+					  proc->pid, thread->pid);
+			return_error = BR_FAILED_REPLY;
+			goto err_empty_call_stack;
+		}
+		binder_set_nice(in_reply_to->saved_priority);
+		if (in_reply_to->to_thread != thread) {
+			binder_user_error("binder: %d:%d got reply transaction "
+				"with bad transaction stack,"
+				" transaction %d has target %d:%d\n",
+				proc->pid, thread->pid, in_reply_to->debug_id,
+				in_reply_to->to_proc ?
+				in_reply_to->to_proc->pid : 0,
+				in_reply_to->to_thread ?
+				in_reply_to->to_thread->pid : 0);
+			return_error = BR_FAILED_REPLY;
+			in_reply_to = NULL;
+			goto err_bad_call_stack;
+		}
+		thread->transaction_stack = in_reply_to->to_parent;
+		target_thread = in_reply_to->from;
+		if (target_thread == NULL) {
+			return_error = BR_DEAD_REPLY;
+			goto err_dead_binder;
+		}
+		if (target_thread->transaction_stack != in_reply_to) {
+			binder_user_error("binder: %d:%d got reply transaction "
+				"with bad target transaction stack %d, "
+				"expected %d\n",
+				proc->pid, thread->pid,
+				target_thread->transaction_stack ?
+				target_thread->transaction_stack->debug_id : 0,
+				in_reply_to->debug_id);
+			return_error = BR_FAILED_REPLY;
+			in_reply_to = NULL;
+			target_thread = NULL;
+			goto err_dead_binder;
+		}
+		target_proc = target_thread->proc;
+	} else {
+		if (tr->target.handle) {
+			struct binder_ref *ref;
+			ref = binder_get_ref(proc, tr->target.handle);
+			if (ref == NULL) {
+				binder_user_error("binder: %d:%d got "
+					"transaction to invalid handle\n",
+					proc->pid, thread->pid);
+				return_error = BR_FAILED_REPLY;
+				goto err_invalid_target_handle;
+			}
+			target_node = ref->node;
+		} else {
+			target_node = binder_context_mgr_node;
+			if (target_node == NULL) {
+				return_error = BR_DEAD_REPLY;
+				goto err_no_context_mgr_node;
+			}
+		}
+		e->to_node = target_node->debug_id;
+		target_proc = target_node->proc;
+		if (target_proc == NULL) {
+			return_error = BR_DEAD_REPLY;
+			goto err_dead_binder;
+		}
+		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+			struct binder_transaction *tmp;
+			tmp = thread->transaction_stack;
+			while (tmp) {
+				if (tmp->from && tmp->from->proc == target_proc)
+					target_thread = tmp->from;
+				tmp = tmp->from_parent;
+			}
+		}
+	}
+	if (target_thread) {
+		e->to_thread = target_thread->pid;
+		target_list = &target_thread->todo;
+		target_wait = &target_thread->wait;
+	} else {
+		target_list = &target_proc->todo;
+		target_wait = &target_proc->wait;
+	}
+	e->to_proc = target_proc->pid;
+
+	/* TODO: reuse incoming transaction for reply */
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (t == NULL) {
+		return_error = BR_FAILED_REPLY;
+		goto err_alloc_t_failed;
+	}
+	binder_stats.obj_created[BINDER_STAT_TRANSACTION]++;
+
+	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+	if (tcomplete == NULL) {
+		return_error = BR_FAILED_REPLY;
+		goto err_alloc_tcomplete_failed;
+	}
+	binder_stats.obj_created[BINDER_STAT_TRANSACTION_COMPLETE]++;
+
+	t->debug_id = ++binder_last_id;
+	e->debug_id = t->debug_id;
+
+	if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) {
+		if (reply)
+			printk(KERN_INFO "binder: %d:%d BC_REPLY %d -> %d:%d, "
+			       "data %p-%p size %zd-%zd\n",
+			       proc->pid, thread->pid, t->debug_id,
+			       target_proc->pid, target_thread->pid,
+			       tr->data.ptr.buffer, tr->data.ptr.offsets,
+			       tr->data_size, tr->offsets_size);
+		else
+			printk(KERN_INFO "binder: %d:%d BC_TRANSACTION %d -> "
+			       "%d - node %d, data %p-%p size %zd-%zd\n",
+			       proc->pid, thread->pid, t->debug_id,
+			       target_proc->pid, target_node->debug_id,
+			       tr->data.ptr.buffer, tr->data.ptr.offsets,
+			       tr->data_size, tr->offsets_size);
+	}
+
+	if (!reply && !(tr->flags & TF_ONE_WAY))
+		t->from = thread;
+	else
+		t->from = NULL;
+	t->sender_euid = proc->tsk->cred->euid;
+	t->to_proc = target_proc;
+	t->to_thread = target_thread;
+	t->code = tr->code;
+	t->flags = tr->flags;
+	t->priority = task_nice(current);
+	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+	if (t->buffer == NULL) {
+		return_error = BR_FAILED_REPLY;
+		goto err_binder_alloc_buf_failed;
+	}
+	t->buffer->allow_user_free = 0;
+	t->buffer->debug_id = t->debug_id;
+	t->buffer->transaction = t;
+	t->buffer->target_node = target_node;
+	if (target_node)
+		binder_inc_node(target_node, 1, 0, NULL);
+
+	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+
+	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+		binder_user_error("binder: %d:%d got transaction with invalid "
+			"data ptr\n", proc->pid, thread->pid);
+		return_error = BR_FAILED_REPLY;
+		goto err_copy_data_failed;
+	}
+	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+		binder_user_error("binder: %d:%d got transaction with invalid "
+			"offsets ptr\n", proc->pid, thread->pid);
+		return_error = BR_FAILED_REPLY;
+		goto err_copy_data_failed;
+	}
+	off_end = (void *)offp + tr->offsets_size;
+	for (; offp < off_end; offp++) {
+		struct flat_binder_object *fp;
+		if (*offp > t->buffer->data_size - sizeof(*fp)) {
+			binder_user_error("binder: %d:%d got transaction with "
+				"invalid offset, %zd\n",
+				proc->pid, thread->pid, *offp);
+			return_error = BR_FAILED_REPLY;
+			goto err_bad_offset;
+		}
+		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+		switch (fp->type) {
+		case BINDER_TYPE_BINDER:
+		case BINDER_TYPE_WEAK_BINDER: {
+			struct binder_ref *ref;
+			struct binder_node *node = binder_get_node(proc, fp->binder);
+			if (node == NULL) {
+				node = binder_new_node(proc, fp->binder, fp->cookie);
+				if (node == NULL) {
+					return_error = BR_FAILED_REPLY;
+					goto err_binder_new_node_failed;
+				}
+				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+			}
+			if (fp->cookie != node->cookie) {
+				binder_user_error("binder: %d:%d sending u%p "
+					"node %d, cookie mismatch %p != %p\n",
+					proc->pid, thread->pid,
+					fp->binder, node->debug_id,
+					fp->cookie, node->cookie);
+				goto err_binder_get_ref_for_node_failed;
+			}
+			ref = binder_get_ref_for_node(target_proc, node);
+			if (ref == NULL) {
+				return_error = BR_FAILED_REPLY;
+				goto err_binder_get_ref_for_node_failed;
+			}
+			if (fp->type == BINDER_TYPE_BINDER)
+				fp->type = BINDER_TYPE_HANDLE;
+			else
+				fp->type = BINDER_TYPE_WEAK_HANDLE;
+			fp->handle = ref->desc;
+			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
+			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+				printk(KERN_INFO "        node %d u%p -> ref %d desc %d\n",
+				       node->debug_id, node->ptr, ref->debug_id, ref->desc);
+		} break;
+		case BINDER_TYPE_HANDLE:
+		case BINDER_TYPE_WEAK_HANDLE: {
+			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+			if (ref == NULL) {
+				binder_user_error("binder: %d:%d got "
+					"transaction with invalid "
+					"handle, %ld\n", proc->pid,
+					thread->pid, fp->handle);
+				return_error = BR_FAILED_REPLY;
+				goto err_binder_get_ref_failed;
+			}
+			if (ref->node->proc == target_proc) {
+				if (fp->type == BINDER_TYPE_HANDLE)
+					fp->type = BINDER_TYPE_BINDER;
+				else
+					fp->type = BINDER_TYPE_WEAK_BINDER;
+				fp->binder = ref->node->ptr;
+				fp->cookie = ref->node->cookie;
+				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+				if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+					printk(KERN_INFO "        ref %d desc %d -> node %d u%p\n",
+					       ref->debug_id, ref->desc, ref->node->debug_id, ref->node->ptr);
+			} else {
+				struct binder_ref *new_ref;
+				new_ref = binder_get_ref_for_node(target_proc, ref->node);
+				if (new_ref == NULL) {
+					return_error = BR_FAILED_REPLY;
+					goto err_binder_get_ref_for_node_failed;
+				}
+				fp->handle = new_ref->desc;
+				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+				if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+					printk(KERN_INFO "        ref %d desc %d -> ref %d desc %d (node %d)\n",
+					       ref->debug_id, ref->desc, new_ref->debug_id, new_ref->desc, ref->node->debug_id);
+			}
+		} break;
+
+		case BINDER_TYPE_FD: {
+			int target_fd;
+			struct file *file;
+
+			if (reply) {
+				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+					binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
+						proc->pid, thread->pid, fp->handle);
+					return_error = BR_FAILED_REPLY;
+					goto err_fd_not_allowed;
+				}
+			} else if (!target_node->accept_fds) {
+				binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
+					proc->pid, thread->pid, fp->handle);
+				return_error = BR_FAILED_REPLY;
+				goto err_fd_not_allowed;
+			}
+
+			file = fget(fp->handle);
+			if (file == NULL) {
+				binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
+					proc->pid, thread->pid, fp->handle);
+				return_error = BR_FAILED_REPLY;
+				goto err_fget_failed;
+			}
+			target_fd = task_get_unused_fd_flags(target_proc->tsk, O_CLOEXEC);
+			if (target_fd < 0) {
+				fput(file);
+				return_error = BR_FAILED_REPLY;
+				goto err_get_unused_fd_failed;
+			}
+			task_fd_install(target_proc->tsk, target_fd, file);
+			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+				printk(KERN_INFO "        fd %ld -> %d\n", fp->handle, target_fd);
+			/* TODO: fput? */
+			fp->handle = target_fd;
+		} break;
+
+		default:
+			binder_user_error("binder: %d:%d got transactio"
+				"n with invalid object type, %lx\n",
+				proc->pid, thread->pid, fp->type);
+			return_error = BR_FAILED_REPLY;
+			goto err_bad_object_type;
+		}
+	}
+	if (reply) {
+		BUG_ON(t->buffer->async_transaction != 0);
+		binder_pop_transaction(target_thread, in_reply_to);
+	} else if (!(t->flags & TF_ONE_WAY)) {
+		BUG_ON(t->buffer->async_transaction != 0);
+		t->need_reply = 1;
+		t->from_parent = thread->transaction_stack;
+		thread->transaction_stack = t;
+	} else {
+		BUG_ON(target_node == NULL);
+		BUG_ON(t->buffer->async_transaction != 1);
+		if (target_node->has_async_transaction) {
+			target_list = &target_node->async_todo;
+			target_wait = NULL;
+		} else
+			target_node->has_async_transaction = 1;
+	}
+	t->work.type = BINDER_WORK_TRANSACTION;
+	list_add_tail(&t->work.entry, target_list);
+	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+	list_add_tail(&tcomplete->entry, &thread->todo);
+	if (target_wait)
+		wake_up_interruptible(target_wait);
+	return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+	binder_transaction_buffer_release(target_proc, t->buffer, offp);
+	t->buffer->transaction = NULL;
+	binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+	kfree(tcomplete);
+	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
+err_alloc_tcomplete_failed:
+	kfree(t);
+	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+	if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
+		printk(KERN_INFO "binder: %d:%d transaction failed %d, size"
+				"%zd-%zd\n",
+			   proc->pid, thread->pid, return_error,
+			   tr->data_size, tr->offsets_size);
+
+	{
+		struct binder_transaction_log_entry *fe;
+		fe = binder_transaction_log_add(&binder_transaction_log_failed);
+		*fe = *e;
+	}
+
+	BUG_ON(thread->return_error != BR_OK);
+	if (in_reply_to) {
+		thread->return_error = BR_TRANSACTION_COMPLETE;
+		binder_send_failed_reply(in_reply_to, return_error);
+	} else
+		thread->return_error = return_error;
+}
+
+static void
+binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, size_t *failed_at)
+{
+	size_t *offp, *off_end;
+	int debug_id = buffer->debug_id;
+
+	if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+		printk(KERN_INFO "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
+			   proc->pid, buffer->debug_id,
+			   buffer->data_size, buffer->offsets_size, failed_at);
+
+	if (buffer->target_node)
+		binder_dec_node(buffer->target_node, 1, 0);
+
+	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+	if (failed_at)
+		off_end = failed_at;
+	else
+		off_end = (void *)offp + buffer->offsets_size;
+	for (; offp < off_end; offp++) {
+		struct flat_binder_object *fp;
+		if (*offp > buffer->data_size - sizeof(*fp)) {
+			printk(KERN_ERR "binder: transaction release %d bad"
+					"offset %zd, size %zd\n", debug_id, *offp, buffer->data_size);
+			continue;
+		}
+		fp = (struct flat_binder_object *)(buffer->data + *offp);
+		switch (fp->type) {
+		case BINDER_TYPE_BINDER:
+		case BINDER_TYPE_WEAK_BINDER: {
+			struct binder_node *node = binder_get_node(proc, fp->binder);
+			if (node == NULL) {
+				printk(KERN_ERR "binder: transaction release %d bad node %p\n", debug_id, fp->binder);
+				break;
+			}
+			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+				printk(KERN_INFO "        node %d u%p\n",
+				       node->debug_id, node->ptr);
+			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+		} break;
+		case BINDER_TYPE_HANDLE:
+		case BINDER_TYPE_WEAK_HANDLE: {
+			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+			if (ref == NULL) {
+				printk(KERN_ERR "binder: transaction release %d bad handle %ld\n", debug_id, fp->handle);
+				break;
+			}
+			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+				printk(KERN_INFO "        ref %d desc %d (node %d)\n",
+				       ref->debug_id, ref->desc, ref->node->debug_id);
+			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+		} break;
+
+		case BINDER_TYPE_FD:
+			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+				printk(KERN_INFO "        fd %ld\n", fp->handle);
+			if (failed_at)
+				task_close_fd(proc->tsk, fp->handle);
+			break;
+
+		default:
+			printk(KERN_ERR "binder: transaction release %d bad object type %lx\n", debug_id, fp->type);
+			break;
+		}
+	}
+}
+
+int
+binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+		    void __user *buffer, int size, signed long *consumed)
+{
+	uint32_t cmd;
+	void __user *ptr = buffer + *consumed;
+	void __user *end = buffer + size;
+
+	while (ptr < end && thread->return_error == BR_OK) {
+		if (get_user(cmd, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+			binder_stats.bc[_IOC_NR(cmd)]++;
+			proc->stats.bc[_IOC_NR(cmd)]++;
+			thread->stats.bc[_IOC_NR(cmd)]++;
+		}
+		switch (cmd) {
+		case BC_INCREFS:
+		case BC_ACQUIRE:
+		case BC_RELEASE:
+		case BC_DECREFS: {
+			uint32_t target;
+			struct binder_ref *ref;
+			const char *debug_string;
+
+			if (get_user(target, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (target == 0 && binder_context_mgr_node &&
+			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+				ref = binder_get_ref_for_node(proc,
+					       binder_context_mgr_node);
+				if (ref->desc != target) {
+					binder_user_error("binder: %d:"
+						"%d tried to acquire "
+						"reference to desc 0, "
+						"got %d instead\n",
+						proc->pid, thread->pid,
+						ref->desc);
+				}
+			} else
+				ref = binder_get_ref(proc, target);
+			if (ref == NULL) {
+				binder_user_error("binder: %d:%d refcou"
+					"nt change on invalid ref %d\n",
+					proc->pid, thread->pid, target);
+				break;
+			}
+			switch (cmd) {
+			case BC_INCREFS:
+				debug_string = "IncRefs";
+				binder_inc_ref(ref, 0, NULL);
+				break;
+			case BC_ACQUIRE:
+				debug_string = "Acquire";
+				binder_inc_ref(ref, 1, NULL);
+				break;
+			case BC_RELEASE:
+				debug_string = "Release";
+				binder_dec_ref(ref, 1);
+				break;
+			case BC_DECREFS:
+			default:
+				debug_string = "DecRefs";
+				binder_dec_ref(ref, 0);
+				break;
+			}
+			if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
+				printk(KERN_INFO "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
+				       proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+			break;
+		}
+		case BC_INCREFS_DONE:
+		case BC_ACQUIRE_DONE: {
+			void __user *node_ptr;
+			void *cookie;
+			struct binder_node *node;
+
+			if (get_user(node_ptr, (void * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+			if (get_user(cookie, (void * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+			node = binder_get_node(proc, node_ptr);
+			if (node == NULL) {
+				binder_user_error("binder: %d:%d "
+					"%s u%p no match\n",
+					proc->pid, thread->pid,
+					cmd == BC_INCREFS_DONE ?
+					"BC_INCREFS_DONE" :
+					"BC_ACQUIRE_DONE",
+					node_ptr);
+				break;
+			}
+			if (cookie != node->cookie) {
+				binder_user_error("binder: %d:%d %s u%p node %d"
+					" cookie mismatch %p != %p\n",
+					proc->pid, thread->pid,
+					cmd == BC_INCREFS_DONE ?
+					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+					node_ptr, node->debug_id,
+					cookie, node->cookie);
+				break;
+			}
+			if (cmd == BC_ACQUIRE_DONE) {
+				if (node->pending_strong_ref == 0) {
+					binder_user_error("binder: %d:%d "
+						"BC_ACQUIRE_DONE node %d has "
+						"no pending acquire request\n",
+						proc->pid, thread->pid,
+						node->debug_id);
+					break;
+				}
+				node->pending_strong_ref = 0;
+			} else {
+				if (node->pending_weak_ref == 0) {
+					binder_user_error("binder: %d:%d "
+						"BC_INCREFS_DONE node %d has "
+						"no pending increfs request\n",
+						proc->pid, thread->pid,
+						node->debug_id);
+					break;
+				}
+				node->pending_weak_ref = 0;
+			}
+			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+			if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
+				printk(KERN_INFO "binder: %d:%d %s node %d ls %d lw %d\n",
+				       proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node->local_strong_refs, node->local_weak_refs);
+			break;
+		}
+		case BC_ATTEMPT_ACQUIRE:
+			printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
+			return -EINVAL;
+		case BC_ACQUIRE_RESULT:
+			printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
+			return -EINVAL;
+
+		case BC_FREE_BUFFER: {
+			void __user *data_ptr;
+			struct binder_buffer *buffer;
+
+			if (get_user(data_ptr, (void * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+
+			buffer = binder_buffer_lookup(proc, data_ptr);
+			if (buffer == NULL) {
+				binder_user_error("binder: %d:%d "
+					"BC_FREE_BUFFER u%p no match\n",
+					proc->pid, thread->pid, data_ptr);
+				break;
+			}
+			if (!buffer->allow_user_free) {
+				binder_user_error("binder: %d:%d "
+					"BC_FREE_BUFFER u%p matched "
+					"unreturned buffer\n",
+					proc->pid, thread->pid, data_ptr);
+				break;
+			}
+			if (binder_debug_mask & BINDER_DEBUG_FREE_BUFFER)
+				printk(KERN_INFO "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
+				       proc->pid, thread->pid, data_ptr, buffer->debug_id,
+				       buffer->transaction ? "active" : "finished");
+
+			if (buffer->transaction) {
+				buffer->transaction->buffer = NULL;
+				buffer->transaction = NULL;
+			}
+			if (buffer->async_transaction && buffer->target_node) {
+				BUG_ON(!buffer->target_node->has_async_transaction);
+				if (list_empty(&buffer->target_node->async_todo))
+					buffer->target_node->has_async_transaction = 0;
+				else
+					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+			}
+			binder_transaction_buffer_release(proc, buffer, NULL);
+			binder_free_buf(proc, buffer);
+			break;
+		}
+
+		case BC_TRANSACTION:
+		case BC_REPLY: {
+			struct binder_transaction_data tr;
+
+			if (copy_from_user(&tr, ptr, sizeof(tr)))
+				return -EFAULT;
+			ptr += sizeof(tr);
+			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+			break;
+		}
+
+		case BC_REGISTER_LOOPER:
+			if (binder_debug_mask & BINDER_DEBUG_THREADS)
+				printk(KERN_INFO "binder: %d:%d BC_REGISTER_LOOPER\n",
+				       proc->pid, thread->pid);
+			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+				thread->looper |= BINDER_LOOPER_STATE_INVALID;
+				binder_user_error("binder: %d:%d ERROR:"
+					" BC_REGISTER_LOOPER called "
+					"after BC_ENTER_LOOPER\n",
+					proc->pid, thread->pid);
+			} else if (proc->requested_threads == 0) {
+				thread->looper |= BINDER_LOOPER_STATE_INVALID;
+				binder_user_error("binder: %d:%d ERROR:"
+					" BC_REGISTER_LOOPER called "
+					"without request\n",
+					proc->pid, thread->pid);
+			} else {
+				proc->requested_threads--;
+				proc->requested_threads_started++;
+			}
+			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+			break;
+		case BC_ENTER_LOOPER:
+			if (binder_debug_mask & BINDER_DEBUG_THREADS)
+				printk(KERN_INFO "binder: %d:%d BC_ENTER_LOOPER\n",
+				       proc->pid, thread->pid);
+			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+				thread->looper |= BINDER_LOOPER_STATE_INVALID;
+				binder_user_error("binder: %d:%d ERROR:"
+					" BC_ENTER_LOOPER called after "
+					"BC_REGISTER_LOOPER\n",
+					proc->pid, thread->pid);
+			}
+			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+			break;
+		case BC_EXIT_LOOPER:
+			if (binder_debug_mask & BINDER_DEBUG_THREADS)
+				printk(KERN_INFO "binder: %d:%d BC_EXIT_LOOPER\n",
+				       proc->pid, thread->pid);
+			thread->looper |= BINDER_LOOPER_STATE_EXITED;
+			break;
+
+		case BC_REQUEST_DEATH_NOTIFICATION:
+		case BC_CLEAR_DEATH_NOTIFICATION: {
+			uint32_t target;
+			void __user *cookie;
+			struct binder_ref *ref;
+			struct binder_ref_death *death;
+
+			if (get_user(target, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (get_user(cookie, (void __user * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+			ref = binder_get_ref(proc, target);
+			if (ref == NULL) {
+				binder_user_error("binder: %d:%d %s "
+					"invalid ref %d\n",
+					proc->pid, thread->pid,
+					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+					"BC_REQUEST_DEATH_NOTIFICATION" :
+					"BC_CLEAR_DEATH_NOTIFICATION",
+					target);
+				break;
+			}
+
+			if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION)
+				printk(KERN_INFO "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+				       proc->pid, thread->pid,
+				       cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+				       "BC_REQUEST_DEATH_NOTIFICATION" :
+				       "BC_CLEAR_DEATH_NOTIFICATION",
+				       cookie, ref->debug_id, ref->desc,
+				       ref->strong, ref->weak, ref->node->debug_id);
+
+			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+				if (ref->death) {
+					binder_user_error("binder: %d:%"
+						"d BC_REQUEST_DEATH_NOTI"
+						"FICATION death notific"
+						"ation already set\n",
+						proc->pid, thread->pid);
+					break;
+				}
+				death = kzalloc(sizeof(*death), GFP_KERNEL);
+				if (death == NULL) {
+					thread->return_error = BR_ERROR;
+					if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
+						printk(KERN_INFO "binder: %d:%d "
+							"BC_REQUEST_DEATH_NOTIFICATION failed\n",
+							proc->pid, thread->pid);
+					break;
+				}
+				binder_stats.obj_created[BINDER_STAT_DEATH]++;
+				INIT_LIST_HEAD(&death->work.entry);
+				death->cookie = cookie;
+				ref->death = death;
+				if (ref->node->proc == NULL) {
+					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+						list_add_tail(&ref->death->work.entry, &thread->todo);
+					} else {
+						list_add_tail(&ref->death->work.entry, &proc->todo);
+						wake_up_interruptible(&proc->wait);
+					}
+				}
+			} else {
+				if (ref->death == NULL) {
+					binder_user_error("binder: %d:%"
+						"d BC_CLEAR_DEATH_NOTIFI"
+						"CATION death notificat"
+						"ion not active\n",
+						proc->pid, thread->pid);
+					break;
+				}
+				death = ref->death;
+				if (death->cookie != cookie) {
+					binder_user_error("binder: %d:%"
+						"d BC_CLEAR_DEATH_NOTIFI"
+						"CATION death notificat"
+						"ion cookie mismatch "
+						"%p != %p\n",
+						proc->pid, thread->pid,
+						death->cookie, cookie);
+					break;
+				}
+				ref->death = NULL;
+				if (list_empty(&death->work.entry)) {
+					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+						list_add_tail(&death->work.entry, &thread->todo);
+					} else {
+						list_add_tail(&death->work.entry, &proc->todo);
+						wake_up_interruptible(&proc->wait);
+					}
+				} else {
+					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+				}
+			}
+		} break;
+		case BC_DEAD_BINDER_DONE: {
+			struct binder_work *w;
+			void __user *cookie;
+			struct binder_ref_death *death = NULL;
+			if (get_user(cookie, (void __user * __user *)ptr))
+				return -EFAULT;
+
+			ptr += sizeof(void *);
+			list_for_each_entry(w, &proc->delivered_death, entry) {
+				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+				if (tmp_death->cookie == cookie) {
+					death = tmp_death;
+					break;
+				}
+			}
+			if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+				printk(KERN_INFO "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
+				       proc->pid, thread->pid, cookie, death);
+			if (death == NULL) {
+				binder_user_error("binder: %d:%d BC_DEAD"
+					"_BINDER_DONE %p not found\n",
+					proc->pid, thread->pid, cookie);
+				break;
+			}
+
+			list_del_init(&death->work.entry);
+			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+					list_add_tail(&death->work.entry, &thread->todo);
+				} else {
+					list_add_tail(&death->work.entry, &proc->todo);
+					wake_up_interruptible(&proc->wait);
+				}
+			}
+		} break;
+
+		default:
+			printk(KERN_ERR "binder: %d:%d unknown command %d\n", proc->pid, thread->pid, cmd);
+			return -EINVAL;
+		}
+		*consumed = ptr - buffer;
+	}
+	return 0;
+}
+
+void
+binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd)
+{
+	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+		binder_stats.br[_IOC_NR(cmd)]++;
+		proc->stats.br[_IOC_NR(cmd)]++;
+		thread->stats.br[_IOC_NR(cmd)]++;
+	}
+}
+
+static int
+binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread)
+{
+	return !list_empty(&proc->todo) || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int
+binder_has_thread_work(struct binder_thread *thread)
+{
+	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
+		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int
+binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
+	void  __user *buffer, int size, signed long *consumed, int non_block)
+{
+	void __user *ptr = buffer + *consumed;
+	void __user *end = buffer + size;
+
+	int ret = 0;
+	int wait_for_proc_work;
+
+	if (*consumed == 0) {
+		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+	}
+
+retry:
+	wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
+
+	if (thread->return_error != BR_OK && ptr < end) {
+		if (thread->return_error2 != BR_OK) {
+			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (ptr == end)
+				goto done;
+			thread->return_error2 = BR_OK;
+		}
+		if (put_user(thread->return_error, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+		thread->return_error = BR_OK;
+		goto done;
+	}
+
+
+	thread->looper |= BINDER_LOOPER_STATE_WAITING;
+	if (wait_for_proc_work)
+		proc->ready_threads++;
+	mutex_unlock(&binder_lock);
+	if (wait_for_proc_work) {
+		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+					BINDER_LOOPER_STATE_ENTERED))) {
+			binder_user_error("binder: %d:%d ERROR: Thread waiting "
+				"for process work before calling BC_REGISTER_"
+				"LOOPER or BC_ENTER_LOOPER (state %x)\n",
+				proc->pid, thread->pid, thread->looper);
+			wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+		}
+		binder_set_nice(proc->default_priority);
+		if (non_block) {
+			if (!binder_has_proc_work(proc, thread))
+				ret = -EAGAIN;
+		} else
+			ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+	} else {
+		if (non_block) {
+			if (!binder_has_thread_work(thread))
+				ret = -EAGAIN;
+		} else
+			ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+	}
+	mutex_lock(&binder_lock);
+	if (wait_for_proc_work)
+		proc->ready_threads--;
+	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+	if (ret)
+		return ret;
+
+	while (1) {
+		uint32_t cmd;
+		struct binder_transaction_data tr;
+		struct binder_work *w;
+		struct binder_transaction *t = NULL;
+
+		if (!list_empty(&thread->todo))
+			w = list_first_entry(&thread->todo, struct binder_work, entry);
+		else if (!list_empty(&proc->todo) && wait_for_proc_work)
+			w = list_first_entry(&proc->todo, struct binder_work, entry);
+		else {
+			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
+				goto retry;
+			break;
+		}
+
+		if (end - ptr < sizeof(tr) + 4)
+			break;
+
+		switch (w->type) {
+		case BINDER_WORK_TRANSACTION: {
+			t = container_of(w, struct binder_transaction, work);
+		} break;
+		case BINDER_WORK_TRANSACTION_COMPLETE: {
+			cmd = BR_TRANSACTION_COMPLETE;
+			if (put_user(cmd, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+
+			binder_stat_br(proc, thread, cmd);
+			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE)
+				printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
+				       proc->pid, thread->pid);
+
+			list_del(&w->entry);
+			kfree(w);
+			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
+		} break;
+		case BINDER_WORK_NODE: {
+			struct binder_node *node = container_of(w, struct binder_node, work);
+			uint32_t cmd = BR_NOOP;
+			const char *cmd_name;
+			int strong = node->internal_strong_refs || node->local_strong_refs;
+			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+			if (weak && !node->has_weak_ref) {
+				cmd = BR_INCREFS;
+				cmd_name = "BR_INCREFS";
+				node->has_weak_ref = 1;
+				node->pending_weak_ref = 1;
+				node->local_weak_refs++;
+			} else if (strong && !node->has_strong_ref) {
+				cmd = BR_ACQUIRE;
+				cmd_name = "BR_ACQUIRE";
+				node->has_strong_ref = 1;
+				node->pending_strong_ref = 1;
+				node->local_strong_refs++;
+			} else if (!strong && node->has_strong_ref) {
+				cmd = BR_RELEASE;
+				cmd_name = "BR_RELEASE";
+				node->has_strong_ref = 0;
+			} else if (!weak && node->has_weak_ref) {
+				cmd = BR_DECREFS;
+				cmd_name = "BR_DECREFS";
+				node->has_weak_ref = 0;
+			}
+			if (cmd != BR_NOOP) {
+				if (put_user(cmd, (uint32_t __user *)ptr))
+					return -EFAULT;
+				ptr += sizeof(uint32_t);
+				if (put_user(node->ptr, (void * __user *)ptr))
+					return -EFAULT;
+				ptr += sizeof(void *);
+				if (put_user(node->cookie, (void * __user *)ptr))
+					return -EFAULT;
+				ptr += sizeof(void *);
+
+				binder_stat_br(proc, thread, cmd);
+				if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
+					printk(KERN_INFO "binder: %d:%d %s %d u%p c%p\n",
+					       proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+			} else {
+				list_del_init(&w->entry);
+				if (!weak && !strong) {
+					if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+						printk(KERN_INFO "binder: %d:%d node %d u%p c%p deleted\n",
+						       proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie);
+					rb_erase(&node->rb_node, &proc->nodes);
+					kfree(node);
+					binder_stats.obj_deleted[BINDER_STAT_NODE]++;
+				} else {
+					if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
+						printk(KERN_INFO "binder: %d:%d node %d u%p c%p state unchanged\n",
+						       proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie);
+				}
+			}
+		} break;
+		case BINDER_WORK_DEAD_BINDER:
+		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+			struct binder_ref_death *death = container_of(w, struct binder_ref_death, work);
+			uint32_t cmd;
+			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+			else
+				cmd = BR_DEAD_BINDER;
+			if (put_user(cmd, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (put_user(death->cookie, (void * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+			if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION)
+				printk(KERN_INFO "binder: %d:%d %s %p\n",
+				       proc->pid, thread->pid,
+				       cmd == BR_DEAD_BINDER ?
+				       "BR_DEAD_BINDER" :
+				       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+				       death->cookie);
+
+			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+				list_del(&w->entry);
+				kfree(death);
+				binder_stats.obj_deleted[BINDER_STAT_DEATH]++;
+			} else
+				list_move(&w->entry, &proc->delivered_death);
+			if (cmd == BR_DEAD_BINDER)
+				goto done; /* DEAD_BINDER notifications can cause transactions */
+		} break;
+		}
+
+		if (!t)
+			continue;
+
+		BUG_ON(t->buffer == NULL);
+		if (t->buffer->target_node) {
+			struct binder_node *target_node = t->buffer->target_node;
+			tr.target.ptr = target_node->ptr;
+			tr.cookie =  target_node->cookie;
+			t->saved_priority = task_nice(current);
+			if (t->priority < target_node->min_priority &&
+			    !(t->flags & TF_ONE_WAY))
+				binder_set_nice(t->priority);
+			else if (!(t->flags & TF_ONE_WAY) ||
+				 t->saved_priority > target_node->min_priority)
+				binder_set_nice(target_node->min_priority);
+			cmd = BR_TRANSACTION;
+		} else {
+			tr.target.ptr = NULL;
+			tr.cookie = NULL;
+			cmd = BR_REPLY;
+		}
+		tr.code = t->code;
+		tr.flags = t->flags;
+		tr.sender_euid = t->sender_euid;
+
+		if (t->from) {
+			struct task_struct *sender = t->from->proc->tsk;
+			tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
+		} else {
+			tr.sender_pid = 0;
+		}
+
+		tr.data_size = t->buffer->data_size;
+		tr.offsets_size = t->buffer->offsets_size;
+		tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset);
+		tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
+
+		if (put_user(cmd, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+		if (copy_to_user(ptr, &tr, sizeof(tr)))
+			return -EFAULT;
+		ptr += sizeof(tr);
+
+		binder_stat_br(proc, thread, cmd);
+		if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
+			printk(KERN_INFO "binder: %d:%d %s %d %d:%d, cmd %d"
+				"size %zd-%zd ptr %p-%p\n",
+			       proc->pid, thread->pid,
+			       (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY",
+			       t->debug_id, t->from ? t->from->proc->pid : 0,
+			       t->from ? t->from->pid : 0, cmd,
+			       t->buffer->data_size, t->buffer->offsets_size,
+			       tr.data.ptr.buffer, tr.data.ptr.offsets);
+
+		list_del(&t->work.entry);
+		t->buffer->allow_user_free = 1;
+		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+			t->to_parent = thread->transaction_stack;
+			t->to_thread = thread;
+			thread->transaction_stack = t;
+		} else {
+			t->buffer->transaction = NULL;
+			kfree(t);
+			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
+		}
+		break;
+	}
+
+done:
+
+	*consumed = ptr - buffer;
+	if (proc->requested_threads + proc->ready_threads == 0 &&
+	    proc->requested_threads_started < proc->max_threads &&
+	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
+	     /*spawn a new thread if we leave this out */) {
+		proc->requested_threads++;
+		if (binder_debug_mask & BINDER_DEBUG_THREADS)
+			printk(KERN_INFO "binder: %d:%d BR_SPAWN_LOOPER\n",
+			       proc->pid, thread->pid);
+		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+	struct binder_work *w;
+	while (!list_empty(list)) {
+		w = list_first_entry(list, struct binder_work, entry);
+		list_del_init(&w->entry);
+		switch (w->type) {
+		case BINDER_WORK_TRANSACTION: {
+			struct binder_transaction *t = container_of(w, struct binder_transaction, work);
+			if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
+				binder_send_failed_reply(t, BR_DEAD_REPLY);
+		} break;
+		case BINDER_WORK_TRANSACTION_COMPLETE: {
+			kfree(w);
+			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
+		} break;
+		default:
+			break;
+		}
+	}
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+	struct binder_thread *thread = NULL;
+	struct rb_node *parent = NULL;
+	struct rb_node **p = &proc->threads.rb_node;
+
+	while (*p) {
+		parent = *p;
+		thread = rb_entry(parent, struct binder_thread, rb_node);
+
+		if (current->pid < thread->pid)
+			p = &(*p)->rb_left;
+		else if (current->pid > thread->pid)
+			p = &(*p)->rb_right;
+		else
+			break;
+	}
+	if (*p == NULL) {
+		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+		if (thread == NULL)
+			return NULL;
+		binder_stats.obj_created[BINDER_STAT_THREAD]++;
+		thread->proc = proc;
+		thread->pid = current->pid;
+		init_waitqueue_head(&thread->wait);
+		INIT_LIST_HEAD(&thread->todo);
+		rb_link_node(&thread->rb_node, parent, p);
+		rb_insert_color(&thread->rb_node, &proc->threads);
+		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+		thread->return_error = BR_OK;
+		thread->return_error2 = BR_OK;
+	}
+	return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread)
+{
+	struct binder_transaction *t;
+	struct binder_transaction *send_reply = NULL;
+	int active_transactions = 0;
+
+	rb_erase(&thread->rb_node, &proc->threads);
+	t = thread->transaction_stack;
+	if (t && t->to_thread == thread)
+		send_reply = t;
+	while (t) {
+		active_transactions++;
+		if (binder_debug_mask & BINDER_DEBUG_DEAD_TRANSACTION)
+			printk(KERN_INFO "binder: release %d:%d transaction %d %s, still active\n",
+			       proc->pid, thread->pid, t->debug_id, (t->to_thread == thread) ? "in" : "out");
+		if (t->to_thread == thread) {
+			t->to_proc = NULL;
+			t->to_thread = NULL;
+			if (t->buffer) {
+				t->buffer->transaction = NULL;
+				t->buffer = NULL;
+			}
+			t = t->to_parent;
+		} else if (t->from == thread) {
+			t->from = NULL;
+			t = t->from_parent;
+		} else
+			BUG();
+	}
+	if (send_reply)
+		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+	binder_release_work(&thread->todo);
+	kfree(thread);
+	binder_stats.obj_deleted[BINDER_STAT_THREAD]++;
+	return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	struct binder_proc *proc = filp->private_data;
+	struct binder_thread *thread = NULL;
+	int wait_for_proc_work;
+
+	mutex_lock(&binder_lock);
+	thread = binder_get_thread(proc);
+
+	wait_for_proc_work = thread->transaction_stack == NULL &&
+		list_empty(&thread->todo) && thread->return_error == BR_OK;
+	mutex_unlock(&binder_lock);
+
+	if (wait_for_proc_work) {
+		if (binder_has_proc_work(proc, thread))
+			return POLLIN;
+		poll_wait(filp, &proc->wait, wait);
+		if (binder_has_proc_work(proc, thread))
+			return POLLIN;
+	} else {
+		if (binder_has_thread_work(thread))
+			return POLLIN;
+		poll_wait(filp, &thread->wait, wait);
+		if (binder_has_thread_work(thread))
+			return POLLIN;
+	}
+	return 0;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int ret;
+	struct binder_proc *proc = filp->private_data;
+	struct binder_thread *thread;
+	unsigned int size = _IOC_SIZE(cmd);
+	void __user *ubuf = (void __user *)arg;
+
+	/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
+
+	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+	if (ret)
+		return ret;
+
+	mutex_lock(&binder_lock);
+	thread = binder_get_thread(proc);
+	if (thread == NULL) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	switch (cmd) {
+	case BINDER_WRITE_READ: {
+		struct binder_write_read bwr;
+		if (size != sizeof(struct binder_write_read)) {
+			ret = -EINVAL;
+			goto err;
+		}
+		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+			ret = -EFAULT;
+			goto err;
+		}
+		if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
+			printk(KERN_INFO "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
+			       proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer);
+		if (bwr.write_size > 0) {
+			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+			if (ret < 0) {
+				bwr.read_consumed = 0;
+				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+					ret = -EFAULT;
+				goto err;
+			}
+		}
+		if (bwr.read_size > 0) {
+			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+			if (!list_empty(&proc->todo))
+				wake_up_interruptible(&proc->wait);
+			if (ret < 0) {
+				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+					ret = -EFAULT;
+				goto err;
+			}
+		}
+		if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
+			printk(KERN_INFO "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
+			       proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size);
+		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+			ret = -EFAULT;
+			goto err;
+		}
+		break;
+	}
+	case BINDER_SET_MAX_THREADS:
+		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+			ret = -EINVAL;
+			goto err;
+		}
+		break;
+	case BINDER_SET_CONTEXT_MGR:
+		if (binder_context_mgr_node != NULL) {
+			printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
+			ret = -EBUSY;
+			goto err;
+		}
+		if (binder_context_mgr_uid != -1) {
+			if (binder_context_mgr_uid != current->cred->euid) {
+				printk(KERN_ERR "binder: BINDER_SET_"
+				       "CONTEXT_MGR bad uid %d != %d\n",
+				       current->cred->euid,
+				       binder_context_mgr_uid);
+				ret = -EPERM;
+				goto err;
+			}
+		} else
+			binder_context_mgr_uid = current->cred->euid;
+		binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+		if (binder_context_mgr_node == NULL) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		binder_context_mgr_node->local_weak_refs++;
+		binder_context_mgr_node->local_strong_refs++;
+		binder_context_mgr_node->has_strong_ref = 1;
+		binder_context_mgr_node->has_weak_ref = 1;
+		break;
+	case BINDER_THREAD_EXIT:
+		if (binder_debug_mask & BINDER_DEBUG_THREADS)
+			printk(KERN_INFO "binder: %d:%d exit\n",
+			       proc->pid, thread->pid);
+		binder_free_thread(proc, thread);
+		thread = NULL;
+		break;
+	case BINDER_VERSION:
+		if (size != sizeof(struct binder_version)) {
+			ret = -EINVAL;
+			goto err;
+		}
+		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
+			ret = -EINVAL;
+			goto err;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		goto err;
+	}
+	ret = 0;
+err:
+	if (thread)
+		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+	mutex_unlock(&binder_lock);
+	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+	if (ret && ret != -ERESTARTSYS)
+		printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
+	return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+	struct binder_proc *proc = vma->vm_private_data;
+	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+		printk(KERN_INFO "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot);
+	dump_stack();
+}
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+	struct binder_proc *proc = vma->vm_private_data;
+	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+		printk(KERN_INFO "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot);
+	proc->vma = NULL;
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+	.open = binder_vma_open,
+	.close = binder_vma_close,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+	struct vm_struct *area;
+	struct binder_proc *proc = filp->private_data;
+	const char *failure_string;
+	struct binder_buffer *buffer;
+
+	if ((vma->vm_end - vma->vm_start) > SZ_4M)
+		vma->vm_end = vma->vm_start + SZ_4M;
+
+	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+		printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot);
+
+	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+		ret = -EPERM;
+		failure_string = "bad vm_flags";
+		goto err_bad_arg;
+	}
+	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+
+	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+	if (area == NULL) {
+		ret = -ENOMEM;
+		failure_string = "get_vm_area";
+		goto err_get_vm_area_failed;
+	}
+	proc->buffer = area->addr;
+	proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer;
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+	if (cache_is_vipt_aliasing()) {
+		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+			printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+			vma->vm_start += PAGE_SIZE;
+		}
+	}
+#endif
+	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
+	if (proc->pages == NULL) {
+		ret = -ENOMEM;
+		failure_string = "alloc page array";
+		goto err_alloc_pages_failed;
+	}
+	proc->buffer_size = vma->vm_end - vma->vm_start;
+
+	vma->vm_ops = &binder_vm_ops;
+	vma->vm_private_data = proc;
+
+	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+		ret = -ENOMEM;
+		failure_string = "alloc small buf";
+		goto err_alloc_small_buf_failed;
+	}
+	buffer = proc->buffer;
+	INIT_LIST_HEAD(&proc->buffers);
+	list_add(&buffer->entry, &proc->buffers);
+	buffer->free = 1;
+	binder_insert_free_buffer(proc, buffer);
+	proc->free_async_space = proc->buffer_size / 2;
+	barrier();
+	proc->vma = vma;
+
+	/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+	return 0;
+
+err_alloc_small_buf_failed:
+	kfree(proc->pages);
+err_alloc_pages_failed:
+	vfree(proc->buffer);
+err_get_vm_area_failed:
+	mutex_unlock(&binder_lock);
+err_bad_arg:
+	printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+	return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+	struct binder_proc *proc;
+
+	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+		printk(KERN_INFO "binder_open: %d:%d\n", current->group_leader->pid, current->pid);
+
+	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+	if (proc == NULL)
+		return -ENOMEM;
+	get_task_struct(current);
+	proc->tsk = current;
+	INIT_LIST_HEAD(&proc->todo);
+	init_waitqueue_head(&proc->wait);
+	proc->default_priority = task_nice(current);
+	mutex_lock(&binder_lock);
+	binder_stats.obj_created[BINDER_STAT_PROC]++;
+	hlist_add_head(&proc->proc_node, &binder_procs);
+	proc->pid = current->group_leader->pid;
+	INIT_LIST_HEAD(&proc->delivered_death);
+	filp->private_data = proc;
+	mutex_unlock(&binder_lock);
+
+	if (binder_proc_dir_entry_proc) {
+		char strbuf[11];
+		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+		create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc);
+	}
+
+	return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+	struct rb_node *n;
+	struct binder_proc *proc = filp->private_data;
+	int wake_count = 0;
+
+	mutex_lock(&binder_lock);
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+			wake_up_interruptible(&thread->wait);
+			wake_count++;
+		}
+	}
+	wake_up_interruptible_all(&proc->wait);
+	mutex_unlock(&binder_lock);
+
+	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+		printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count);
+
+	return 0;
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+	struct hlist_node *pos;
+	struct binder_transaction *t;
+	struct rb_node *n;
+	struct binder_proc *proc = filp->private_data;
+	int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
+
+	if (binder_proc_dir_entry_proc) {
+		char strbuf[11];
+		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+		remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
+	}
+	mutex_lock(&binder_lock);
+	hlist_del(&proc->proc_node);
+	if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+		if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+			printk(KERN_INFO "binder_release: %d context_mgr_node gone\n", proc->pid);
+		binder_context_mgr_node = NULL;
+	}
+
+	threads = 0;
+	active_transactions = 0;
+	while ((n = rb_first(&proc->threads))) {
+		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+		threads++;
+		active_transactions += binder_free_thread(proc, thread);
+	}
+	nodes = 0;
+	incoming_refs = 0;
+	while ((n = rb_first(&proc->nodes))) {
+		struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
+
+		nodes++;
+		rb_erase(&node->rb_node, &proc->nodes);
+		list_del_init(&node->work.entry);
+		if (hlist_empty(&node->refs)) {
+			kfree(node);
+			binder_stats.obj_deleted[BINDER_STAT_NODE]++;
+		} else {
+			struct binder_ref *ref;
+			int death = 0;
+
+			node->proc = NULL;
+			node->local_strong_refs = 0;
+			node->local_weak_refs = 0;
+			hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+			hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+				incoming_refs++;
+				if (ref->death) {
+					death++;
+					if (list_empty(&ref->death->work.entry)) {
+						ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+						list_add_tail(&ref->death->work.entry, &ref->proc->todo);
+						wake_up_interruptible(&ref->proc->wait);
+					} else
+						BUG();
+				}
+			}
+			if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
+				printk(KERN_INFO "binder: node %d now dead, refs %d, death %d\n", node->debug_id, incoming_refs, death);
+		}
+	}
+	outgoing_refs = 0;
+	while ((n = rb_first(&proc->refs_by_desc))) {
+		struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc);
+		outgoing_refs++;
+		binder_delete_ref(ref);
+	}
+	binder_release_work(&proc->todo);
+	buffers = 0;
+
+	while ((n = rb_first(&proc->allocated_buffers))) {
+		struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, rb_node);
+		t = buffer->transaction;
+		if (t) {
+			t->buffer = NULL;
+			buffer->transaction = NULL;
+			printk(KERN_ERR "binder: release proc %d, transaction %d, not freed\n", proc->pid, t->debug_id);
+			/*BUG();*/
+		}
+		binder_free_buf(proc, buffer);
+		buffers++;
+	}
+
+	binder_stats.obj_deleted[BINDER_STAT_PROC]++;
+	mutex_unlock(&binder_lock);
+
+	page_count = 0;
+	if (proc->pages) {
+		int i;
+		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+			if (proc->pages[i]) {
+				if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
+					printk(KERN_INFO "binder_release: %d: page %d at %p not freed\n", proc->pid, i, proc->buffer + i * PAGE_SIZE);
+				__free_page(proc->pages[i]);
+				page_count++;
+			}
+		}
+		kfree(proc->pages);
+		vfree(proc->buffer);
+	}
+
+	put_task_struct(proc->tsk);
+
+	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
+		printk(KERN_INFO "binder_release: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+		       proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count);
+
+	kfree(proc);
+	return 0;
+}
+
+static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t)
+{
+	buf += snprintf(buf, end - buf, "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+			prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0,
+			t->from ? t->from->pid : 0,
+			t->to_proc ? t->to_proc->pid : 0,
+			t->to_thread ? t->to_thread->pid : 0,
+			t->code, t->flags, t->priority, t->need_reply);
+	if (buf >= end)
+		return buf;
+	if (t->buffer == NULL) {
+		buf += snprintf(buf, end - buf, " buffer free\n");
+		return buf;
+	}
+	if (t->buffer->target_node) {
+		buf += snprintf(buf, end - buf, " node %d",
+				t->buffer->target_node->debug_id);
+		if (buf >= end)
+			return buf;
+	}
+	buf += snprintf(buf, end - buf, " size %zd:%zd data %p\n",
+			t->buffer->data_size, t->buffer->offsets_size,
+			t->buffer->data);
+	return buf;
+}
+
+static char *print_binder_buffer(char *buf, char *end, const char *prefix, struct binder_buffer *buffer)
+{
+	buf += snprintf(buf, end - buf, "%s %d: %p size %zd:%zd %s\n",
+			prefix, buffer->debug_id, buffer->data,
+			buffer->data_size, buffer->offsets_size,
+			buffer->transaction ? "active" : "delivered");
+	return buf;
+}
+
+static char *print_binder_work(char *buf, char *end, const char *prefix,
+	const char *transaction_prefix, struct binder_work *w)
+{
+	struct binder_node *node;
+	struct binder_transaction *t;
+
+	switch (w->type) {
+	case BINDER_WORK_TRANSACTION:
+		t = container_of(w, struct binder_transaction, work);
+		buf = print_binder_transaction(buf, end, transaction_prefix, t);
+		break;
+	case BINDER_WORK_TRANSACTION_COMPLETE:
+		buf += snprintf(buf, end - buf,
+				"%stransaction complete\n", prefix);
+		break;
+	case BINDER_WORK_NODE:
+		node = container_of(w, struct binder_node, work);
+		buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n",
+				prefix, node->debug_id, node->ptr, node->cookie);
+		break;
+	case BINDER_WORK_DEAD_BINDER:
+		buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix);
+		break;
+	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+		buf += snprintf(buf, end - buf,
+				"%shas cleared dead binder\n", prefix);
+		break;
+	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+		buf += snprintf(buf, end - buf,
+				"%shas cleared death notification\n", prefix);
+		break;
+	default:
+		buf += snprintf(buf, end - buf, "%sunknown work: type %d\n",
+				prefix, w->type);
+		break;
+	}
+	return buf;
+}
+
+static char *print_binder_thread(char *buf, char *end, struct binder_thread *thread, int print_always)
+{
+	struct binder_transaction *t;
+	struct binder_work *w;
+	char *start_buf = buf;
+	char *header_buf;
+
+	buf += snprintf(buf, end - buf, "  thread %d: l %02x\n", thread->pid, thread->looper);
+	header_buf = buf;
+	t = thread->transaction_stack;
+	while (t) {
+		if (buf >= end)
+			break;
+		if (t->from == thread) {
+			buf = print_binder_transaction(buf, end, "    outgoing transaction", t);
+			t = t->from_parent;
+		} else if (t->to_thread == thread) {
+			buf = print_binder_transaction(buf, end, "    incoming transaction", t);
+			t = t->to_parent;
+		} else {
+			buf = print_binder_transaction(buf, end, "    bad transaction", t);
+			t = NULL;
+		}
+	}
+	list_for_each_entry(w, &thread->todo, entry) {
+		if (buf >= end)
+			break;
+		buf = print_binder_work(buf, end, "    ",
+					"    pending transaction", w);
+	}
+	if (!print_always && buf == header_buf)
+		buf = start_buf;
+	return buf;
+}
+
+static char *print_binder_node(char *buf, char *end, struct binder_node *node)
+{
+	struct binder_ref *ref;
+	struct hlist_node *pos;
+	struct binder_work *w;
+	int count;
+	count = 0;
+	hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+		count++;
+
+	buf += snprintf(buf, end - buf, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
+			node->debug_id, node->ptr, node->cookie,
+			node->has_strong_ref, node->has_weak_ref,
+			node->local_strong_refs, node->local_weak_refs,
+			node->internal_strong_refs, count);
+	if (buf >= end)
+		return buf;
+	if (count) {
+		buf += snprintf(buf, end - buf, " proc");
+		if (buf >= end)
+			return buf;
+		hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+			buf += snprintf(buf, end - buf, " %d", ref->proc->pid);
+			if (buf >= end)
+				return buf;
+		}
+	}
+	buf += snprintf(buf, end - buf, "\n");
+	list_for_each_entry(w, &node->async_todo, entry) {
+		if (buf >= end)
+			break;
+		buf = print_binder_work(buf, end, "    ",
+					"    pending async transaction", w);
+	}
+	return buf;
+}
+
+static char *print_binder_ref(char *buf, char *end, struct binder_ref *ref)
+{
+	buf += snprintf(buf, end - buf, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
+			ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+			ref->node->debug_id, ref->strong, ref->weak, ref->death);
+	return buf;
+}
+
+static char *print_binder_proc(char *buf, char *end, struct binder_proc *proc, int print_all)
+{
+	struct binder_work *w;
+	struct rb_node *n;
+	char *start_buf = buf;
+	char *header_buf;
+
+	buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
+	header_buf = buf;
+
+	for (n = rb_first(&proc->threads); n != NULL && buf < end; n = rb_next(n))
+		buf = print_binder_thread(buf, end, rb_entry(n, struct binder_thread, rb_node), print_all);
+	for (n = rb_first(&proc->nodes); n != NULL && buf < end; n = rb_next(n)) {
+		struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
+		if (print_all || node->has_async_transaction)
+			buf = print_binder_node(buf, end, node);
+	}
+	if (print_all) {
+		for (n = rb_first(&proc->refs_by_desc); n != NULL && buf < end; n = rb_next(n))
+			buf = print_binder_ref(buf, end, rb_entry(n, struct binder_ref, rb_node_desc));
+	}
+	for (n = rb_first(&proc->allocated_buffers); n != NULL && buf < end; n = rb_next(n))
+		buf = print_binder_buffer(buf, end, "  buffer", rb_entry(n, struct binder_buffer, rb_node));
+	list_for_each_entry(w, &proc->todo, entry) {
+		if (buf >= end)
+			break;
+		buf = print_binder_work(buf, end, "  ",
+					"  pending transaction", w);
+	}
+	list_for_each_entry(w, &proc->delivered_death, entry) {
+		if (buf >= end)
+			break;
+		buf += snprintf(buf, end - buf, "  has delivered dead binder\n");
+		break;
+	}
+	if (!print_all && buf == header_buf)
+		buf = start_buf;
+	return buf;
+}
+
+static const char *binder_return_strings[] = {
+	"BR_ERROR",
+	"BR_OK",
+	"BR_TRANSACTION",
+	"BR_REPLY",
+	"BR_ACQUIRE_RESULT",
+	"BR_DEAD_REPLY",
+	"BR_TRANSACTION_COMPLETE",
+	"BR_INCREFS",
+	"BR_ACQUIRE",
+	"BR_RELEASE",
+	"BR_DECREFS",
+	"BR_ATTEMPT_ACQUIRE",
+	"BR_NOOP",
+	"BR_SPAWN_LOOPER",
+	"BR_FINISHED",
+	"BR_DEAD_BINDER",
+	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
+	"BR_FAILED_REPLY"
+};
+
+static const char *binder_command_strings[] = {
+	"BC_TRANSACTION",
+	"BC_REPLY",
+	"BC_ACQUIRE_RESULT",
+	"BC_FREE_BUFFER",
+	"BC_INCREFS",
+	"BC_ACQUIRE",
+	"BC_RELEASE",
+	"BC_DECREFS",
+	"BC_INCREFS_DONE",
+	"BC_ACQUIRE_DONE",
+	"BC_ATTEMPT_ACQUIRE",
+	"BC_REGISTER_LOOPER",
+	"BC_ENTER_LOOPER",
+	"BC_EXIT_LOOPER",
+	"BC_REQUEST_DEATH_NOTIFICATION",
+	"BC_CLEAR_DEATH_NOTIFICATION",
+	"BC_DEAD_BINDER_DONE"
+};
+
+static const char *binder_objstat_strings[] = {
+	"proc",
+	"thread",
+	"node",
+	"ref",
+	"death",
+	"transaction",
+	"transaction_complete"
+};
+
+static char *print_binder_stats(char *buf, char *end, const char *prefix, struct binder_stats *stats)
+{
+	int i;
+
+	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings));
+	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+		if (stats->bc[i])
+			buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
+					binder_command_strings[i], stats->bc[i]);
+		if (buf >= end)
+			return buf;
+	}
+
+	BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings));
+	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+		if (stats->br[i])
+			buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
+					binder_return_strings[i], stats->br[i]);
+		if (buf >= end)
+			return buf;
+	}
+
+	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings));
+	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted));
+	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+		if (stats->obj_created[i] || stats->obj_deleted[i])
+			buf += snprintf(buf, end - buf, "%s%s: active %d total %d\n", prefix,
+					binder_objstat_strings[i],
+					stats->obj_created[i] - stats->obj_deleted[i],
+					stats->obj_created[i]);
+		if (buf >= end)
+			return buf;
+	}
+	return buf;
+}
+
+static char *print_binder_proc_stats(char *buf, char *end, struct binder_proc *proc)
+{
+	struct binder_work *w;
+	struct rb_node *n;
+	int count, strong, weak;
+
+	buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
+	if (buf >= end)
+		return buf;
+	count = 0;
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+		count++;
+	buf += snprintf(buf, end - buf, "  threads: %d\n", count);
+	if (buf >= end)
+		return buf;
+	buf += snprintf(buf, end - buf, "  requested threads: %d+%d/%d\n"
+			"  ready threads %d\n"
+			"  free async space %zd\n", proc->requested_threads,
+			proc->requested_threads_started, proc->max_threads,
+			proc->ready_threads, proc->free_async_space);
+	if (buf >= end)
+		return buf;
+	count = 0;
+	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+		count++;
+	buf += snprintf(buf, end - buf, "  nodes: %d\n", count);
+	if (buf >= end)
+		return buf;
+	count = 0;
+	strong = 0;
+	weak = 0;
+	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+		struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc);
+		count++;
+		strong += ref->strong;
+		weak += ref->weak;
+	}
+	buf += snprintf(buf, end - buf, "  refs: %d s %d w %d\n", count, strong, weak);
+	if (buf >= end)
+		return buf;
+
+	count = 0;
+	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+		count++;
+	buf += snprintf(buf, end - buf, "  buffers: %d\n", count);
+	if (buf >= end)
+		return buf;
+
+	count = 0;
+	list_for_each_entry(w, &proc->todo, entry) {
+		switch (w->type) {
+		case BINDER_WORK_TRANSACTION:
+			count++;
+			break;
+		default:
+			break;
+		}
+	}
+	buf += snprintf(buf, end - buf, "  pending transactions: %d\n", count);
+	if (buf >= end)
+		return buf;
+
+	buf = print_binder_stats(buf, end, "  ", &proc->stats);
+
+	return buf;
+}
+
+
+static int binder_read_proc_state(
+	char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	struct binder_proc *proc;
+	struct hlist_node *pos;
+	struct binder_node *node;
+	int len = 0;
+	char *buf = page;
+	char *end = page + PAGE_SIZE;
+	int do_lock = !binder_debug_no_lock;
+
+	if (off)
+		return 0;
+
+	if (do_lock)
+		mutex_lock(&binder_lock);
+
+	buf += snprintf(buf, end - buf, "binder state:\n");
+
+	if (!hlist_empty(&binder_dead_nodes))
+		buf += snprintf(buf, end - buf, "dead nodes:\n");
+	hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) {
+		if (buf >= end)
+			break;
+		buf = print_binder_node(buf, end, node);
+	}
+
+	hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
+		if (buf >= end)
+			break;
+		buf = print_binder_proc(buf, end, proc, 1);
+	}
+	if (do_lock)
+		mutex_unlock(&binder_lock);
+	if (buf > page + PAGE_SIZE)
+		buf = page + PAGE_SIZE;
+
+	*start = page + off;
+
+	len = buf - page;
+	if (len > off)
+		len -= off;
+	else
+		len = 0;
+
+	return len < count ? len  : count;
+}
+
+static int binder_read_proc_stats(
+	char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	struct binder_proc *proc;
+	struct hlist_node *pos;
+	int len = 0;
+	char *p = page;
+	int do_lock = !binder_debug_no_lock;
+
+	if (off)
+		return 0;
+
+	if (do_lock)
+		mutex_lock(&binder_lock);
+
+	p += snprintf(p, PAGE_SIZE, "binder stats:\n");
+
+	p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats);
+
+	hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
+		if (p >= page + PAGE_SIZE)
+			break;
+		p = print_binder_proc_stats(p, page + PAGE_SIZE, proc);
+	}
+	if (do_lock)
+		mutex_unlock(&binder_lock);
+	if (p > page + PAGE_SIZE)
+		p = page + PAGE_SIZE;
+
+	*start = page + off;
+
+	len = p - page;
+	if (len > off)
+		len -= off;
+	else
+		len = 0;
+
+	return len < count ? len  : count;
+}
+
+static int binder_read_proc_transactions(
+	char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	struct binder_proc *proc;
+	struct hlist_node *pos;
+	int len = 0;
+	char *buf = page;
+	char *end = page + PAGE_SIZE;
+	int do_lock = !binder_debug_no_lock;
+
+	if (off)
+		return 0;
+
+	if (do_lock)
+		mutex_lock(&binder_lock);
+
+	buf += snprintf(buf, end - buf, "binder transactions:\n");
+	hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
+		if (buf >= end)
+			break;
+		buf = print_binder_proc(buf, end, proc, 0);
+	}
+	if (do_lock)
+		mutex_unlock(&binder_lock);
+	if (buf > page + PAGE_SIZE)
+		buf = page + PAGE_SIZE;
+
+	*start = page + off;
+
+	len = buf - page;
+	if (len > off)
+		len -= off;
+	else
+		len = 0;
+
+	return len < count ? len  : count;
+}
+
+static int binder_read_proc_proc(
+	char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	struct binder_proc *proc = data;
+	int len = 0;
+	char *p = page;
+	int do_lock = !binder_debug_no_lock;
+
+	if (off)
+		return 0;
+
+	if (do_lock)
+		mutex_lock(&binder_lock);
+	p += snprintf(p, PAGE_SIZE, "binder proc state:\n");
+	p = print_binder_proc(p, page + PAGE_SIZE, proc, 1);
+	if (do_lock)
+		mutex_unlock(&binder_lock);
+
+	if (p > page + PAGE_SIZE)
+		p = page + PAGE_SIZE;
+	*start = page + off;
+
+	len = p - page;
+	if (len > off)
+		len -= off;
+	else
+		len = 0;
+
+	return len < count ? len  : count;
+}
+
+static char *print_binder_transaction_log_entry(char *buf, char *end, struct binder_transaction_log_entry *e)
+{
+	buf += snprintf(buf, end - buf, "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+			e->debug_id, (e->call_type == 2) ? "reply" :
+			((e->call_type == 1) ? "async" : "call "), e->from_proc,
+			e->from_thread, e->to_proc, e->to_thread, e->to_node,
+			e->target_handle, e->data_size, e->offsets_size);
+	return buf;
+}
+
+static int binder_read_proc_transaction_log(
+	char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	struct binder_transaction_log *log = data;
+	int len = 0;
+	int i;
+	char *buf = page;
+	char *end = page + PAGE_SIZE;
+
+	if (off)
+		return 0;
+
+	if (log->full) {
+		for (i = log->next; i < ARRAY_SIZE(log->entry); i++) {
+			if (buf >= end)
+				break;
+			buf = print_binder_transaction_log_entry(buf, end, &log->entry[i]);
+		}
+	}
+	for (i = 0; i < log->next; i++) {
+		if (buf >= end)
+			break;
+		buf = print_binder_transaction_log_entry(buf, end, &log->entry[i]);
+	}
+
+	*start = page + off;
+
+	len = buf - page;
+	if (len > off)
+		len -= off;
+	else
+		len = 0;
+
+	return len < count ? len  : count;
+}
+
+static struct file_operations binder_fops = {
+	.owner = THIS_MODULE,
+	.poll = binder_poll,
+	.unlocked_ioctl = binder_ioctl,
+	.mmap = binder_mmap,
+	.open = binder_open,
+	.flush = binder_flush,
+	.release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "binder",
+	.fops = &binder_fops
+};
+
+static int __init binder_init(void)
+{
+	int ret;
+
+	binder_proc_dir_entry_root = proc_mkdir("binder", NULL);
+	if (binder_proc_dir_entry_root)
+		binder_proc_dir_entry_proc = proc_mkdir("proc", binder_proc_dir_entry_root);
+	ret = misc_register(&binder_miscdev);
+	if (binder_proc_dir_entry_root) {
+		create_proc_read_entry("state", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_state, NULL);
+		create_proc_read_entry("stats", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_stats, NULL);
+		create_proc_read_entry("transactions", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transactions, NULL);
+		create_proc_read_entry("transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log);
+		create_proc_read_entry("failed_transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log_failed);
+	}
+	return ret;
+}
+
+device_initcall(binder_init);
+
+MODULE_LICENSE("GPL v2");

+ 330 - 0
drivers/staging/android/binder.h

@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_H
+#define _LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes.  The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur.  The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+	/* 8 bytes for large_flat_header. */
+	unsigned long		type;
+	unsigned long		flags;
+
+	/* 8 bytes of data. */
+	union {
+		void		*binder;	/* local object */
+		signed long	handle;		/* remote object */
+	};
+
+	/* extra data associated with local object */
+	void			*cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses apropriately.
+ */
+
+struct binder_write_read {
+	signed long	write_size;	/* bytes to write */
+	signed long	write_consumed;	/* bytes consumed by driver */
+	unsigned long	write_buffer;
+	signed long	read_size;	/* bytes to read */
+	signed long	read_consumed;	/* bytes consumed by driver */
+	unsigned long	read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+	/* driver protocol version -- increment with incompatible change */
+	signed long	protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ   		_IOWR('b', 1, struct binder_write_read)
+#define	BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, int64_t)
+#define	BINDER_SET_MAX_THREADS		_IOW('b', 5, size_t)
+#define	BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, int)
+#define	BINDER_SET_CONTEXT_MGR		_IOW('b', 7, int)
+#define	BINDER_THREAD_EXIT		_IOW('b', 8, int)
+#define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted.  This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process.  That is, the process is being destroyed.
+ * You should handle this by exiting from your process.  Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */
+	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
+	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
+	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
+	 * identifying the target and contents of the transaction.
+	 */
+	union {
+		size_t	handle;	/* target descriptor of command transaction */
+		void	*ptr;	/* target descriptor of return transaction */
+	} target;
+	void		*cookie;	/* target object cookie */
+	unsigned int	code;		/* transaction command */
+
+	/* General information about the transaction. */
+	unsigned int	flags;
+	pid_t		sender_pid;
+	uid_t		sender_euid;
+	size_t		data_size;	/* number of bytes of data */
+	size_t		offsets_size;	/* number of bytes of offsets */
+
+	/* If this transaction is inline, the data immediately
+	 * follows here; otherwise, it ends with a pointer to
+	 * the data buffer.
+	 */
+	union {
+		struct {
+			/* transaction data */
+			const void	*buffer;
+			/* offsets from buffer to flat_binder_object structs */
+			const void	*offsets;
+		} ptr;
+		uint8_t	buf[8];
+	} data;
+};
+
+struct binder_ptr_cookie {
+	void *ptr;
+	void *cookie;
+};
+
+struct binder_pri_desc {
+	int priority;
+	int desc;
+};
+
+struct binder_pri_ptr_cookie {
+	int priority;
+	void *ptr;
+	void *cookie;
+};
+
+enum BinderDriverReturnProtocol {
+	BR_ERROR = _IOR('r', 0, int),
+	/*
+	 * int: error code
+	 */
+
+	BR_OK = _IO('r', 1),
+	/* No parameters! */
+
+	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+	/*
+	 * binder_transaction_data: the received command.
+	 */
+
+	BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+	/*
+	 * not currently supported
+	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+	 * Else the remote object has acquired a primary reference.
+	 */
+
+	BR_DEAD_REPLY = _IO('r', 5),
+	/*
+	 * The target of the last transaction (either a bcTRANSACTION or
+	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
+	 */
+
+	BR_TRANSACTION_COMPLETE = _IO('r', 6),
+	/*
+	 * No parameters... always refers to the last transaction requested
+	 * (including replies).  Note that this will be sent even for
+	 * asynchronous transactions.
+	 */
+
+	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+	/*
+	 * void *:	ptr to binder
+	 * void *: cookie for binder
+	 */
+
+	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+	/*
+	 * not currently supported
+	 * int:	priority
+	 * void *: ptr to binder
+	 * void *: cookie for binder
+	 */
+
+	BR_NOOP = _IO('r', 12),
+	/*
+	 * No parameters.  Do nothing and examine the next command.  It exists
+	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+	 */
+
+	BR_SPAWN_LOOPER = _IO('r', 13),
+	/*
+	 * No parameters.  The driver has determined that a process has no
+	 * threads waiting to service incomming transactions.  When a process
+	 * receives this command, it must spawn a new service thread and
+	 * register it via bcENTER_LOOPER.
+	 */
+
+	BR_FINISHED = _IO('r', 14),
+	/*
+	 * not currently supported
+	 * stop threadpool thread
+	 */
+
+	BR_DEAD_BINDER = _IOR('r', 15, void *),
+	/*
+	 * void *: cookie
+	 */
+	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+	/*
+	 * void *: cookie
+	 */
+
+	BR_FAILED_REPLY = _IO('r', 17),
+	/*
+	 * The the last transaction (either a bcTRANSACTION or
+	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
+	 */
+};
+
+enum BinderDriverCommandProtocol {
+	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+	BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+	/*
+	 * binder_transaction_data: the sent command.
+	 */
+
+	BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+	/*
+	 * not currently supported
+	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+	 * Else you have acquired a primary reference on the object.
+	 */
+
+	BC_FREE_BUFFER = _IOW('c', 3, int),
+	/*
+	 * void *: ptr to transaction data received on a read
+	 */
+
+	BC_INCREFS = _IOW('c', 4, int),
+	BC_ACQUIRE = _IOW('c', 5, int),
+	BC_RELEASE = _IOW('c', 6, int),
+	BC_DECREFS = _IOW('c', 7, int),
+	/*
+	 * int:	descriptor
+	 */
+
+	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+	/*
+	 * void *: ptr to binder
+	 * void *: cookie for binder
+	 */
+
+	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+	/*
+	 * not currently supported
+	 * int: priority
+	 * int: descriptor
+	 */
+
+	BC_REGISTER_LOOPER = _IO('c', 11),
+	/*
+	 * No parameters.
+	 * Register a spawned looper thread with the device.
+	 */
+
+	BC_ENTER_LOOPER = _IO('c', 12),
+	BC_EXIT_LOOPER = _IO('c', 13),
+	/*
+	 * No parameters.
+	 * These two commands are sent as an application-level thread
+	 * enters and exits the binder loop, respectively.  They are
+	 * used so the binder can have an accurate count of the number
+	 * of looping threads it has available.
+	 */
+
+	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+	/*
+	 * void *: ptr to binder
+	 * void *: cookie
+	 */
+
+	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+	/*
+	 * void *: ptr to binder
+	 * void *: cookie
+	 */
+
+	BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+	/*
+	 * void *: cookie
+	 */
+};
+
+#endif /* _LINUX_BINDER_H */
+

+ 607 - 0
drivers/staging/android/logger.c

@@ -0,0 +1,607 @@
+/*
+ * drivers/misc/logger.c
+ *
+ * A Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/time.h>
+#include "logger.h"
+
+#include <asm/ioctls.h>
+
+/*
+ * struct logger_log - represents a specific log, such as 'main' or 'radio'
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+	unsigned char *		buffer;	/* the ring buffer itself */
+	struct miscdevice	misc;	/* misc device representing the log */
+	wait_queue_head_t	wq;	/* wait queue for readers */
+	struct list_head	readers; /* this log's readers */
+	struct mutex		mutex;	/* mutex protecting buffer */
+	size_t			w_off;	/* current write head offset */
+	size_t			head;	/* new readers start here */
+	size_t			size;	/* size of the log */
+};
+
+/*
+ * struct logger_reader - a logging device open for reading
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct logger_reader {
+	struct logger_log *	log;	/* associated log */
+	struct list_head	list;	/* entry in logger_log's list */
+	size_t			r_off;	/* current read head offset */
+};
+
+/* logger_offset - returns index 'n' into the log via (optimized) modulus */
+#define logger_offset(n)	((n) & (log->size - 1))
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ * 	1) Need to quickly obtain the associated log during an I/O operation
+ * 	2) Readers need to maintain state (logger_reader)
+ * 	3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->logger_reader->logger_log.
+ * For a writer, we don't want to maintain a logger_reader, so we just go
+ * file->logger_log. Thus what file->private_data points at depends on whether
+ * or not the file was opened for reading. This function hides that dirtiness.
+ */
+static inline struct logger_log * file_get_log(struct file *file)
+{
+	if (file->f_mode & FMODE_READ) {
+		struct logger_reader *reader = file->private_data;
+		return reader->log;
+	} else
+		return file->private_data;
+}
+
+/*
+ * get_entry_len - Grabs the length of the payload of the next entry starting
+ * from 'off'.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_len(struct logger_log *log, size_t off)
+{
+	__u16 val;
+
+	switch (log->size - off) {
+	case 1:
+		memcpy(&val, log->buffer + off, 1);
+		memcpy(((char *) &val) + 1, log->buffer, 1);
+		break;
+	default:
+		memcpy(&val, log->buffer + off, 2);
+	}
+
+	return sizeof(struct logger_entry) + val;
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_log *log,
+				   struct logger_reader *reader,
+				   char __user *buf,
+				   size_t count)
+{
+	size_t len;
+
+	/*
+	 * We read from the log in two disjoint operations. First, we read from
+	 * the current read head offset up to 'count' bytes or to the end of
+	 * the log, whichever comes first.
+	 */
+	len = min(count, log->size - reader->r_off);
+	if (copy_to_user(buf, log->buffer + reader->r_off, len))
+		return -EFAULT;
+
+	/*
+	 * Second, we read any remaining bytes, starting back at the head of
+	 * the log.
+	 */
+	if (count != len)
+		if (copy_to_user(buf + len, log->buffer, count - len))
+			return -EFAULT;
+
+	reader->r_off = logger_offset(reader->r_off + count);
+
+	return count;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ * 	- O_NONBLOCK works
+ * 	- If there are no log entries to read, blocks until log is written to
+ * 	- Atomically reads exactly one log entry
+ *
+ * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+			   size_t count, loff_t *pos)
+{
+	struct logger_reader *reader = file->private_data;
+	struct logger_log *log = reader->log;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+start:
+	while (1) {
+		prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+		mutex_lock(&log->mutex);
+		ret = (log->w_off == reader->r_off);
+		mutex_unlock(&log->mutex);
+		if (!ret)
+			break;
+
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+
+		schedule();
+	}
+
+	finish_wait(&log->wq, &wait);
+	if (ret)
+		return ret;
+
+	mutex_lock(&log->mutex);
+
+	/* is there still something to read or did we race? */
+	if (unlikely(log->w_off == reader->r_off)) {
+		mutex_unlock(&log->mutex);
+		goto start;
+	}
+
+	/* get the size of the next entry */
+	ret = get_entry_len(log, reader->r_off);
+	if (count < ret) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* get exactly one entry from the log */
+	ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+	mutex_unlock(&log->mutex);
+
+	return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
+{
+	size_t count = 0;
+
+	do {
+		size_t nr = get_entry_len(log, off);
+		off = logger_offset(off + nr);
+		count += nr;
+	} while (count < len);
+
+	return off;
+}
+
+/*
+ * clock_interval - is a < c < b in mod-space? Put another way, does the line
+ * from a to b cross c?
+ */
+static inline int clock_interval(size_t a, size_t b, size_t c)
+{
+	if (b < a) {
+		if (a < c || b >= c)
+			return 1;
+	} else {
+		if (a < c && b >= c)
+			return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct logger_log *log, size_t len)
+{
+	size_t old = log->w_off;
+	size_t new = logger_offset(old + len);
+	struct logger_reader *reader;
+
+	if (clock_interval(old, new, log->head))
+		log->head = get_next_entry(log, log->head, len);
+
+	list_for_each_entry(reader, &log->readers, list)
+		if (clock_interval(old, new, reader->r_off))
+			reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf, size_t count)
+{
+	size_t len;
+
+	len = min(count, log->size - log->w_off);
+	memcpy(log->buffer + log->w_off, buf, len);
+
+	if (count != len)
+		memcpy(log->buffer, buf + len, count - len);
+
+	log->w_off = logger_offset(log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+				      const void __user *buf, size_t count)
+{
+	size_t len;
+
+	len = min(count, log->size - log->w_off);
+	if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+		return -EFAULT;
+
+	if (count != len)
+		if (copy_from_user(log->buffer, buf + len, count - len))
+			return -EFAULT;
+
+	log->w_off = logger_offset(log->w_off + count);
+
+	return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+			 unsigned long nr_segs, loff_t ppos)
+{
+	struct logger_log *log = file_get_log(iocb->ki_filp);
+	size_t orig = log->w_off;
+	struct logger_entry header;
+	struct timespec now;
+	ssize_t ret = 0;
+
+	now = current_kernel_time();
+
+	header.pid = current->tgid;
+	header.tid = current->pid;
+	header.sec = now.tv_sec;
+	header.nsec = now.tv_nsec;
+	header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+
+	/* null writes succeed, return zero */
+	if (unlikely(!header.len))
+		return 0;
+
+	mutex_lock(&log->mutex);
+
+	/*
+	 * Fix up any readers, pulling them forward to the first readable
+	 * entry after (what will be) the new write offset. We do this now
+	 * because if we partially fail, we can end up with clobbered log
+	 * entries that encroach on readable buffer.
+	 */
+	fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+	do_write_log(log, &header, sizeof(struct logger_entry));
+
+	while (nr_segs-- > 0) {
+		size_t len;
+		ssize_t nr;
+
+		/* figure out how much of this vector we can keep */
+		len = min_t(size_t, iov->iov_len, header.len - ret);
+
+		/* write out this segment's payload */
+		nr = do_write_log_from_user(log, iov->iov_base, len);
+		if (unlikely(nr < 0)) {
+			log->w_off = orig;
+			mutex_unlock(&log->mutex);
+			return nr;
+		}
+
+		iov++;
+		ret += nr;
+	}
+
+	mutex_unlock(&log->mutex);
+
+	/* wake up any blocked readers */
+	wake_up_interruptible(&log->wq);
+
+	return ret;
+}
+
+static struct logger_log * get_log_from_minor(int);
+
+/*
+ * logger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that way!
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+	struct logger_log *log;
+	int ret;
+
+	ret = nonseekable_open(inode, file);
+	if (ret)
+		return ret;
+
+	log = get_log_from_minor(MINOR(inode->i_rdev));
+	if (!log)
+		return -ENODEV;
+
+	if (file->f_mode & FMODE_READ) {
+		struct logger_reader *reader;
+
+		reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+		if (!reader)
+			return -ENOMEM;
+
+		reader->log = log;
+		INIT_LIST_HEAD(&reader->list);
+
+		mutex_lock(&log->mutex);
+		reader->r_off = log->head;
+		list_add_tail(&reader->list, &log->readers);
+		mutex_unlock(&log->mutex);
+
+		file->private_data = reader;
+	} else
+		file->private_data = log;
+
+	return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that way!
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+	if (file->f_mode & FMODE_READ) {
+		struct logger_reader *reader = file->private_data;
+		list_del(&reader->list);
+		kfree(reader);
+	}
+
+	return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+	struct logger_reader *reader;
+	struct logger_log *log;
+	unsigned int ret = POLLOUT | POLLWRNORM;
+
+	if (!(file->f_mode & FMODE_READ))
+		return ret;
+
+	reader = file->private_data;
+	log = reader->log;
+
+	poll_wait(file, &log->wq, wait);
+
+	mutex_lock(&log->mutex);
+	if (log->w_off != reader->r_off)
+		ret |= POLLIN | POLLRDNORM;
+	mutex_unlock(&log->mutex);
+
+	return ret;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct logger_log *log = file_get_log(file);
+	struct logger_reader *reader;
+	long ret = -ENOTTY;
+
+	mutex_lock(&log->mutex);
+
+	switch (cmd) {
+	case LOGGER_GET_LOG_BUF_SIZE:
+		ret = log->size;
+		break;
+	case LOGGER_GET_LOG_LEN:
+		if (!(file->f_mode & FMODE_READ)) {
+			ret = -EBADF;
+			break;
+		}
+		reader = file->private_data;
+		if (log->w_off >= reader->r_off)
+			ret = log->w_off - reader->r_off;
+		else
+			ret = (log->size - reader->r_off) + log->w_off;
+		break;
+	case LOGGER_GET_NEXT_ENTRY_LEN:
+		if (!(file->f_mode & FMODE_READ)) {
+			ret = -EBADF;
+			break;
+		}
+		reader = file->private_data;
+		if (log->w_off != reader->r_off)
+			ret = get_entry_len(log, reader->r_off);
+		else
+			ret = 0;
+		break;
+	case LOGGER_FLUSH_LOG:
+		if (!(file->f_mode & FMODE_WRITE)) {
+			ret = -EBADF;
+			break;
+		}
+		list_for_each_entry(reader, &log->readers, list)
+			reader->r_off = log->w_off;
+		log->head = log->w_off;
+		ret = 0;
+		break;
+	}
+
+	mutex_unlock(&log->mutex);
+
+	return ret;
+}
+
+static struct file_operations logger_fops = {
+	.owner = THIS_MODULE,
+	.read = logger_read,
+	.aio_write = logger_aio_write,
+	.poll = logger_poll,
+	.unlocked_ioctl = logger_ioctl,
+	.compat_ioctl = logger_ioctl,
+	.open = logger_open,
+	.release = logger_release,
+};
+
+/*
+ * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
+ * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
+ * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ */
+#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
+static unsigned char _buf_ ## VAR[SIZE]; \
+static struct logger_log VAR = { \
+	.buffer = _buf_ ## VAR, \
+	.misc = { \
+		.minor = MISC_DYNAMIC_MINOR, \
+		.name = NAME, \
+		.fops = &logger_fops, \
+		.parent = NULL, \
+	}, \
+	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
+	.readers = LIST_HEAD_INIT(VAR .readers), \
+	.mutex = __MUTEX_INITIALIZER(VAR .mutex), \
+	.w_off = 0, \
+	.head = 0, \
+	.size = SIZE, \
+};
+
+DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024)
+DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
+DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024)
+
+static struct logger_log * get_log_from_minor(int minor)
+{
+	if (log_main.misc.minor == minor)
+		return &log_main;
+	if (log_events.misc.minor == minor)
+		return &log_events;
+	if (log_radio.misc.minor == minor)
+		return &log_radio;
+	return NULL;
+}
+
+static int __init init_log(struct logger_log *log)
+{
+	int ret;
+
+	ret = misc_register(&log->misc);
+	if (unlikely(ret)) {
+		printk(KERN_ERR "logger: failed to register misc "
+		       "device for log '%s'!\n", log->misc.name);
+		return ret;
+	}
+
+	printk(KERN_INFO "logger: created %luK log '%s'\n",
+	       (unsigned long) log->size >> 10, log->misc.name);
+
+	return 0;
+}
+
+static int __init logger_init(void)
+{
+	int ret;
+
+	ret = init_log(&log_main);
+	if (unlikely(ret))
+		goto out;
+
+	ret = init_log(&log_events);
+	if (unlikely(ret))
+		goto out;
+
+	ret = init_log(&log_radio);
+	if (unlikely(ret))
+		goto out;
+
+out:
+	return ret;
+}
+device_initcall(logger_init);

+ 48 - 0
drivers/staging/android/logger.h

@@ -0,0 +1,48 @@
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <rlove@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct logger_entry {
+	__u16		len;	/* length of the payload */
+	__u16		__pad;	/* no matter what, we get 2 bytes of padding */
+	__s32		pid;	/* generating process's pid */
+	__s32		tid;	/* generating process's tid */
+	__s32		sec;	/* seconds since Epoch */
+	__s32		nsec;	/* nanoseconds */
+	char		msg[0];	/* the entry's payload */
+};
+
+#define LOGGER_LOG_RADIO	"log_radio"	/* radio-related messages */
+#define LOGGER_LOG_EVENTS	"log_events"	/* system/hardware events */
+#define LOGGER_LOG_MAIN		"log_main"	/* everything else */
+
+#define LOGGER_ENTRY_MAX_LEN		(4*1024)
+#define LOGGER_ENTRY_MAX_PAYLOAD	\
+	(LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+
+#define __LOGGERIO	0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE		_IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN		_IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN	_IO(__LOGGERIO, 3) /* next entry len */
+#define LOGGER_FLUSH_LOG		_IO(__LOGGERIO, 4) /* flush log */
+
+#endif /* _LINUX_LOGGER_H */

+ 119 - 0
drivers/staging/android/lowmemorykiller.c

@@ -0,0 +1,119 @@
+/* drivers/misc/lowmemorykiller.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+
+static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask);
+
+static struct shrinker lowmem_shrinker = {
+	.shrink = lowmem_shrink,
+	.seeks = DEFAULT_SEEKS * 16
+};
+static uint32_t lowmem_debug_level = 2;
+static int lowmem_adj[6] = {
+	0,
+	1,
+	6,
+	12,
+};
+static int lowmem_adj_size = 4;
+static size_t lowmem_minfree[6] = {
+	3*512, // 6MB
+	2*1024, // 8MB
+	4*1024, // 16MB
+	16*1024, // 64MB
+};
+static int lowmem_minfree_size = 4;
+
+#define lowmem_print(level, x...) do { if(lowmem_debug_level >= (level)) printk(x); } while(0)
+
+module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size, S_IRUGO | S_IWUSR);
+module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+
+static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+	struct task_struct *p;
+	struct task_struct *selected = NULL;
+	int rem = 0;
+	int tasksize;
+	int i;
+	int min_adj = OOM_ADJUST_MAX + 1;
+	int selected_tasksize = 0;
+	int array_size = ARRAY_SIZE(lowmem_adj);
+	int other_free = global_page_state(NR_FREE_PAGES) + global_page_state(NR_FILE_PAGES);
+	if(lowmem_adj_size < array_size)
+		array_size = lowmem_adj_size;
+	if(lowmem_minfree_size < array_size)
+		array_size = lowmem_minfree_size;
+	for(i = 0; i < array_size; i++) {
+		if(other_free < lowmem_minfree[i]) {
+			min_adj = lowmem_adj[i];
+			break;
+		}
+	}
+	if(nr_to_scan > 0)
+		lowmem_print(3, "lowmem_shrink %d, %x, ofree %d, ma %d\n", nr_to_scan, gfp_mask, other_free, min_adj);
+	read_lock(&tasklist_lock);
+	for_each_process(p) {
+		if(p->oomkilladj >= 0 && p->mm) {
+			tasksize = get_mm_rss(p->mm);
+			if(nr_to_scan > 0 && tasksize > 0 && p->oomkilladj >= min_adj) {
+				if(selected == NULL ||
+				   p->oomkilladj > selected->oomkilladj ||
+				   (p->oomkilladj == selected->oomkilladj &&
+				    tasksize > selected_tasksize)) {
+					selected = p;
+					selected_tasksize = tasksize;
+					lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
+					             p->pid, p->comm, p->oomkilladj, tasksize);
+				}
+			}
+			rem += tasksize;
+		}
+	}
+	if(selected != NULL) {
+		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
+		             selected->pid, selected->comm,
+		             selected->oomkilladj, selected_tasksize);
+		force_sig(SIGKILL, selected);
+		rem -= selected_tasksize;
+	}
+	lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", nr_to_scan, gfp_mask, rem);
+	read_unlock(&tasklist_lock);
+	return rem;
+}
+
+static int __init lowmem_init(void)
+{
+	register_shrinker(&lowmem_shrinker);
+	return 0;
+}
+
+static void __exit lowmem_exit(void)
+{
+	unregister_shrinker(&lowmem_shrinker);
+}
+
+module_init(lowmem_init);
+module_exit(lowmem_exit);
+
+MODULE_LICENSE("GPL");
+

+ 395 - 0
drivers/staging/android/ram_console.c

@@ -0,0 +1,395 @@
+/* drivers/android/ram_console.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+#include <linux/rslib.h>
+#endif
+
+struct ram_console_buffer {
+	uint32_t    sig;
+	uint32_t    start;
+	uint32_t    size;
+	uint8_t     data[0];
+};
+
+#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static char __initdata
+	ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
+#endif
+static char *ram_console_old_log;
+static size_t ram_console_old_log_size;
+
+static struct ram_console_buffer *ram_console_buffer;
+static size_t ram_console_buffer_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static char *ram_console_par_buffer;
+static struct rs_control *ram_console_rs_decoder;
+static int ram_console_corrected_bytes;
+static int ram_console_bad_blocks;
+#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+#endif
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
+{
+	int i;
+	uint16_t par[ECC_SIZE];
+	/* Initialize the parity buffer */
+	memset(par, 0, sizeof(par));
+	encode_rs8(ram_console_rs_decoder, data, len, par, 0);
+	for (i = 0; i < ECC_SIZE; i++)
+		ecc[i] = par[i];
+}
+
+static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
+{
+	int i;
+	uint16_t par[ECC_SIZE];
+	for (i = 0; i < ECC_SIZE; i++)
+		par[i] = ecc[i];
+	return decode_rs8(ram_console_rs_decoder, data, par, len,
+				NULL, 0, NULL, 0, NULL);
+}
+#endif
+
+static void ram_console_update(const char *s, unsigned int count)
+{
+	struct ram_console_buffer *buffer = ram_console_buffer;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
+	uint8_t *block;
+	uint8_t *par;
+	int size = ECC_BLOCK_SIZE;
+#endif
+	memcpy(buffer->data + buffer->start, s, count);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
+	par = ram_console_par_buffer +
+	      (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
+	do {
+		if (block + ECC_BLOCK_SIZE > buffer_end)
+			size = buffer_end - block;
+		ram_console_encode_rs8(block, size, par);
+		block += ECC_BLOCK_SIZE;
+		par += ECC_SIZE;
+	} while (block < buffer->data + buffer->start + count);
+#endif
+}
+
+static void ram_console_update_header(void)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	struct ram_console_buffer *buffer = ram_console_buffer;
+	uint8_t *par;
+	par = ram_console_par_buffer +
+	      DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+	ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
+#endif
+}
+
+static void
+ram_console_write(struct console *console, const char *s, unsigned int count)
+{
+	int rem;
+	struct ram_console_buffer *buffer = ram_console_buffer;
+
+	if (count > ram_console_buffer_size) {
+		s += count - ram_console_buffer_size;
+		count = ram_console_buffer_size;
+	}
+	rem = ram_console_buffer_size - buffer->start;
+	if (rem < count) {
+		ram_console_update(s, rem);
+		s += rem;
+		count -= rem;
+		buffer->start = 0;
+		buffer->size = ram_console_buffer_size;
+	}
+	ram_console_update(s, count);
+
+	buffer->start += count;
+	if (buffer->size < ram_console_buffer_size)
+		buffer->size += count;
+	ram_console_update_header();
+}
+
+static struct console ram_console = {
+	.name	= "ram",
+	.write	= ram_console_write,
+	.flags	= CON_PRINTBUFFER | CON_ENABLED,
+	.index	= -1,
+};
+
+static void __init
+ram_console_save_old(struct ram_console_buffer *buffer, char *dest)
+{
+	size_t old_log_size = buffer->size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	uint8_t *block;
+	uint8_t *par;
+	char strbuf[80];
+	int strbuf_len;
+
+	block = buffer->data;
+	par = ram_console_par_buffer;
+	while (block < buffer->data + buffer->size) {
+		int numerr;
+		int size = ECC_BLOCK_SIZE;
+		if (block + size > buffer->data + ram_console_buffer_size)
+			size = buffer->data + ram_console_buffer_size - block;
+		numerr = ram_console_decode_rs8(block, size, par);
+		if (numerr > 0) {
+#if 0
+			printk(KERN_INFO "ram_console: error in block %p, %d\n",
+			       block, numerr);
+#endif
+			ram_console_corrected_bytes += numerr;
+		} else if (numerr < 0) {
+#if 0
+			printk(KERN_INFO "ram_console: uncorrectable error in "
+			       "block %p\n", block);
+#endif
+			ram_console_bad_blocks++;
+		}
+		block += ECC_BLOCK_SIZE;
+		par += ECC_SIZE;
+	}
+	if (ram_console_corrected_bytes || ram_console_bad_blocks)
+		strbuf_len = snprintf(strbuf, sizeof(strbuf),
+			"\n%d Corrected bytes, %d unrecoverable blocks\n",
+			ram_console_corrected_bytes, ram_console_bad_blocks);
+	else
+		strbuf_len = snprintf(strbuf, sizeof(strbuf),
+				      "\nNo errors detected\n");
+	if (strbuf_len >= sizeof(strbuf))
+		strbuf_len = sizeof(strbuf) - 1;
+	old_log_size += strbuf_len;
+#endif
+
+	if (dest == NULL) {
+		dest = kmalloc(old_log_size, GFP_KERNEL);
+		if (dest == NULL) {
+			printk(KERN_ERR
+			       "ram_console: failed to allocate buffer\n");
+			return;
+		}
+	}
+
+	ram_console_old_log = dest;
+	ram_console_old_log_size = old_log_size;
+	memcpy(ram_console_old_log,
+	       &buffer->data[buffer->start], buffer->size - buffer->start);
+	memcpy(ram_console_old_log + buffer->size - buffer->start,
+	       &buffer->data[0], buffer->start);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	memcpy(ram_console_old_log + old_log_size - strbuf_len,
+	       strbuf, strbuf_len);
+#endif
+}
+
+static int __init ram_console_init(struct ram_console_buffer *buffer,
+				   size_t buffer_size, char *old_buf)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	int numerr;
+	uint8_t *par;
+#endif
+	ram_console_buffer = buffer;
+	ram_console_buffer_size =
+		buffer_size - sizeof(struct ram_console_buffer);
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
+						ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
+	ram_console_par_buffer = buffer->data + ram_console_buffer_size;
+
+
+	/* first consecutive root is 0
+	 * primitive element to generate roots = 1
+	 */
+	ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
+	if (ram_console_rs_decoder == NULL) {
+		printk(KERN_INFO "ram_console: init_rs failed\n");
+		return 0;
+	}
+
+	ram_console_corrected_bytes = 0;
+	ram_console_bad_blocks = 0;
+
+	par = ram_console_par_buffer +
+	      DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+
+	numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
+	if (numerr > 0) {
+		printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
+		ram_console_corrected_bytes += numerr;
+	} else if (numerr < 0) {
+		printk(KERN_INFO
+		       "ram_console: uncorrectable error in header\n");
+		ram_console_bad_blocks++;
+	}
+#endif
+
+	if (buffer->sig == RAM_CONSOLE_SIG) {
+		if (buffer->size > ram_console_buffer_size
+		    || buffer->start > buffer->size)
+			printk(KERN_INFO "ram_console: found existing invalid "
+			       "buffer, size %d, start %d\n",
+			       buffer->size, buffer->start);
+		else {
+			printk(KERN_INFO "ram_console: found existing buffer, "
+			       "size %d, start %d\n",
+			       buffer->size, buffer->start);
+			ram_console_save_old(buffer, old_buf);
+		}
+	} else {
+		printk(KERN_INFO "ram_console: no valid data in buffer "
+		       "(sig = 0x%08x)\n", buffer->sig);
+	}
+
+	buffer->sig = RAM_CONSOLE_SIG;
+	buffer->start = 0;
+	buffer->size = 0;
+
+	register_console(&ram_console);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+	console_verbose();
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static int __init ram_console_early_init(void)
+{
+	return ram_console_init((struct ram_console_buffer *)
+		CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
+		CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
+		ram_console_old_log_init_buffer);
+}
+#else
+static int ram_console_driver_probe(struct platform_device *pdev)
+{
+	struct resource *res = pdev->resource;
+	size_t start;
+	size_t buffer_size;
+	void *buffer;
+
+	if (res == NULL || pdev->num_resources != 1 ||
+	    !(res->flags & IORESOURCE_MEM)) {
+		printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
+		       "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
+		return -ENXIO;
+	}
+	buffer_size = res->end - res->start + 1;
+	start = res->start;
+	printk(KERN_INFO "ram_console: got buffer at %x, size %x\n",
+	       start, buffer_size);
+	buffer = ioremap(res->start, buffer_size);
+	if (buffer == NULL) {
+		printk(KERN_ERR "ram_console: failed to map memory\n");
+		return -ENOMEM;
+	}
+
+	return ram_console_init(buffer, buffer_size, NULL/* allocate */);
+}
+
+static struct platform_driver ram_console_driver = {
+	.probe = ram_console_driver_probe,
+	.driver		= {
+		.name	= "ram_console",
+	},
+};
+
+static int __init ram_console_module_init(void)
+{
+	int err;
+	err = platform_driver_register(&ram_console_driver);
+	return err;
+}
+#endif
+
+static ssize_t ram_console_read_old(struct file *file, char __user *buf,
+				    size_t len, loff_t *offset)
+{
+	loff_t pos = *offset;
+	ssize_t count;
+
+	if (pos >= ram_console_old_log_size)
+		return 0;
+
+	count = min(len, (size_t)(ram_console_old_log_size - pos));
+	if (copy_to_user(buf, ram_console_old_log + pos, count))
+		return -EFAULT;
+
+	*offset += count;
+	return count;
+}
+
+static struct file_operations ram_console_file_ops = {
+	.owner = THIS_MODULE,
+	.read = ram_console_read_old,
+};
+
+static int __init ram_console_late_init(void)
+{
+	struct proc_dir_entry *entry;
+
+	if (ram_console_old_log == NULL)
+		return 0;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+	ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
+	if (ram_console_old_log == NULL) {
+		printk(KERN_ERR
+		       "ram_console: failed to allocate buffer for old log\n");
+		ram_console_old_log_size = 0;
+		return 0;
+	}
+	memcpy(ram_console_old_log,
+	       ram_console_old_log_init_buffer, ram_console_old_log_size);
+#endif
+	entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
+	if (!entry) {
+		printk(KERN_ERR "ram_console: failed to create proc entry\n");
+		kfree(ram_console_old_log);
+		ram_console_old_log = NULL;
+		return 0;
+	}
+
+	entry->proc_fops = &ram_console_file_ops;
+	entry->size = ram_console_old_log_size;
+	return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+console_initcall(ram_console_early_init);
+#else
+module_init(ram_console_module_init);
+#endif
+late_initcall(ram_console_late_init);
+

+ 177 - 0
drivers/staging/android/timed_gpio.c

@@ -0,0 +1,177 @@
+/* drivers/misc/timed_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <asm/arch/gpio.h>
+
+#include "timed_gpio.h"
+
+
+static struct class *timed_gpio_class;
+
+struct timed_gpio_data {
+	struct device *dev;
+	struct hrtimer timer;
+	spinlock_t lock;
+	unsigned 	gpio;
+	int 		max_timeout;
+	u8 		active_low;
+};
+
+static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
+{
+	struct timed_gpio_data *gpio_data = container_of(timer, struct timed_gpio_data, timer);
+
+	gpio_direction_output(gpio_data->gpio, gpio_data->active_low ? 1 : 0);
+	return HRTIMER_NORESTART;
+}
+
+static ssize_t gpio_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct timed_gpio_data *gpio_data = dev_get_drvdata(dev);
+	int remaining;
+
+	if (hrtimer_active(&gpio_data->timer)) {
+		ktime_t r = hrtimer_get_remaining(&gpio_data->timer);
+		remaining = r.tv.sec * 1000 + r.tv.nsec / 1000000;
+	} else
+		remaining = 0;
+
+	return sprintf(buf, "%d\n", remaining);
+}
+
+static ssize_t gpio_enable_store(
+		struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t size)
+{
+	struct timed_gpio_data *gpio_data = dev_get_drvdata(dev);
+	int value;
+	unsigned long	flags;
+
+	sscanf(buf, "%d", &value);
+
+	spin_lock_irqsave(&gpio_data->lock, flags);
+
+	/* cancel previous timer and set GPIO according to value */
+	hrtimer_cancel(&gpio_data->timer);
+	gpio_direction_output(gpio_data->gpio, gpio_data->active_low ? !value : !!value);
+
+	if (value > 0) {
+		if (value > gpio_data->max_timeout)
+			value = gpio_data->max_timeout;
+
+		hrtimer_start(&gpio_data->timer,
+						ktime_set(value / 1000, (value % 1000) * 1000000),
+						HRTIMER_MODE_REL);
+	}
+
+	spin_unlock_irqrestore(&gpio_data->lock, flags);
+
+	return size;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, gpio_enable_show, gpio_enable_store);
+
+static int timed_gpio_probe(struct platform_device *pdev)
+{
+	struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct timed_gpio *cur_gpio;
+	struct timed_gpio_data *gpio_data, *gpio_dat;
+	int i, ret = 0;
+
+	if (!pdata)
+		return -EBUSY;
+
+	gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios, GFP_KERNEL);
+	if (!gpio_data)
+		return -ENOMEM;
+
+	for (i = 0; i < pdata->num_gpios; i++) {
+		cur_gpio = &pdata->gpios[i];
+		gpio_dat = &gpio_data[i];
+
+		hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		gpio_dat->timer.function = gpio_timer_func;
+		spin_lock_init(&gpio_dat->lock);
+
+		gpio_dat->gpio = cur_gpio->gpio;
+		gpio_dat->max_timeout = cur_gpio->max_timeout;
+		gpio_dat->active_low = cur_gpio->active_low;
+		gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
+
+		gpio_dat->dev = device_create(timed_gpio_class, &pdev->dev, 0, "%s", cur_gpio->name);
+		if (unlikely(IS_ERR(gpio_dat->dev)))
+			return PTR_ERR(gpio_dat->dev);
+
+		dev_set_drvdata(gpio_dat->dev, gpio_dat);
+		ret = device_create_file(gpio_dat->dev, &dev_attr_enable);
+		if (ret)
+			return ret;
+	}
+
+	platform_set_drvdata(pdev, gpio_data);
+
+	return 0;
+}
+
+static int timed_gpio_remove(struct platform_device *pdev)
+{
+	struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < pdata->num_gpios; i++) {
+		device_remove_file(gpio_data[i].dev, &dev_attr_enable);
+		device_unregister(gpio_data[i].dev);
+	}
+
+	kfree(gpio_data);
+
+	return 0;
+}
+
+static struct platform_driver timed_gpio_driver = {
+	.probe		= timed_gpio_probe,
+	.remove		= timed_gpio_remove,
+	.driver		= {
+		.name		= "timed-gpio",
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init timed_gpio_init(void)
+{
+	timed_gpio_class = class_create(THIS_MODULE, "timed_output");
+	if (IS_ERR(timed_gpio_class))
+		return PTR_ERR(timed_gpio_class);
+	return platform_driver_register(&timed_gpio_driver);
+}
+
+static void __exit timed_gpio_exit(void)
+{
+	class_destroy(timed_gpio_class);
+	platform_driver_unregister(&timed_gpio_driver);
+}
+
+module_init(timed_gpio_init);
+module_exit(timed_gpio_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed gpio driver");
+MODULE_LICENSE("GPL");

+ 31 - 0
drivers/staging/android/timed_gpio.h

@@ -0,0 +1,31 @@
+/* include/linux/timed_gpio.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_GPIO_H
+#define _LINUX_TIMED_GPIO_H
+
+struct timed_gpio {
+	const char *name;
+	unsigned 	gpio;
+	int     max_timeout;
+	u8 		active_low;
+};
+
+struct timed_gpio_platform_data {
+	int 		num_gpios;
+	struct timed_gpio *gpios;
+};
+
+#endif

+ 6 - 0
drivers/staging/asus_oled/Kconfig

@@ -0,0 +1,6 @@
+config ASUS_OLED
+	tristate "Asus OLED driver"
+	depends on USB
+	default N
+	---help---
+	  Enable support for the OLED display present in some Asus laptops.

+ 1 - 0
drivers/staging/asus_oled/Makefile

@@ -0,0 +1 @@
+obj-$(CONFIG_ASUS_OLED)		+= asus_oled.o

+ 156 - 0
drivers/staging/asus_oled/README

@@ -0,0 +1,156 @@
+
+    Driver for Asus OLED display present in some Asus laptops.
+
+    The code of this driver is based on 'asusoled' program taken from
+    https://launchpad.net/asusoled/. I just wanted to have a simple
+    kernel driver for controlling this device, but I didn't know how
+    to do that. Now I know ;) Also, that program can not be used
+    with usbhid loaded, which means no USB mouse/keyboard while
+    controlling OLED display :(
+
+    It has been tested on Asus G1 and didn't cause any problems,
+    but I don't guarantee that it won't do anything wrong :)
+
+    It can (and probably does) have errors. It is usable
+    in my case, and I hope others will find it useful too!
+
+*******
+
+Building the module
+
+   To build the module you need kernel 2.6 include files and some C compiler.
+
+   Just run:
+   make
+   make install (as a root)
+
+   It will build (hopefully) the module and install it in
+   /lib/modules/'uname -r'/extra/asus_oled.ko.
+
+   To load it just use:
+   modprobe asus_oled
+
+   You can check if it has detected your OLED display by looking into dmesg output.
+   There should be something like this:
+   asus-oled 2-7:1.0: Attached Asus OLED device
+
+   If it doesn't find your display, you can try removing usbhid module.
+   If you add asus_oled into the list of modules loaded during system boot
+   before usbhid, it will work even when usbhid is present.
+
+   If it still doesn't detect your hardware, check lsusb output.
+   There should be similar line:
+   Bus 002 Device 005: ID 0b05:1726 ASUSTek Computer, Inc.
+
+   If you don't see any lines with '0b05:1726' it means that you have different
+   type of hardware that is not detected (it may or may not work, but the driver
+   knows only '0b05:1726' device).
+
+*******
+
+Configuration
+
+   There is only one option: start_off.
+   You can use it by: 'modprobe asus_oled start_off=1', or by adding this
+   line to /etc/modprobe.conf:
+   options asus_oled start_off=1
+
+   With this option provided, asus_oled driver will switch off the display
+   when it is detected and attached. It is nice feature to just switch off the 'ASUS'
+   logo. If you don't use the display, it is probably the good idea to switch it off,
+   to protect OLEDs from "wearing off".
+
+*******
+
+Usage
+
+   This module can be controlled with two special files:
+   /sys/class/asus_oled/oled_N/enabled
+   /sys/class/asus_oled/oled_N/picture
+
+   (N is the device number, the first, and probably the only, has number 1,
+    so it is /sys/class/asus_oled/oled_1/enabled
+    and /sys/class/asus_oled/oled_1/picture)
+
+   'enabled' files is for reading and writing, 'picture' is writeable only.
+
+   You can write 0 or 1 to 'enabled' file, which will switch
+   on and off the display. Reading from this file will tell you the last
+   status set, either 0 or 1. By default it is 1, so if the device was set to 'off',
+   and the computer was rebooted without power-off, this file will contain wrong
+   value - because the device is off, but hasn't been disabled this time and is
+   assumed to be on...
+
+   To 'picture' file you write pictures to be displayed by the OLED device.
+   The format of the file:
+   <M:WxH>
+   00001110010111000
+   00010101010101010
+   ....
+
+   First line is a configuration parameter. Meaning of fields in <M:WxH>:
+   M - picture mode. It can be either 's' for static pictures,
+       'r' for rolling pictures, and 'f' for flashing pictures.
+   W - width of the picture. May be between 1 and 1792
+   H - height of the picture. May be between 1 and 32
+
+   For example <s:128x32> means static picture, 128 pixels long and 32 pixels high.
+
+   The physical size of the display is 128x32 pixels. Static and flashing pictures
+   can't be larger than that (actually they can, but only part of them will be displayed ;) )
+
+   If the picture is smaller than 128x32 it will be centered. Rolling pictures wider than
+   128 pixels will be centered too, unless their width = n*128. Vertically they will be
+   centered just like static pictures, if their height is smaller than 32.
+
+   Flashing pictures will be centered horizontally if their width < 128, but they were
+   centered vertically in a different way. If their height < 16, they will be centered
+   in the upper half of the display (rows 0-15). This is because only the first half
+   of flashing pictures is used for flashing. When the picture with heigh = 32 is
+   displayed in flashing mode, its upper 16 rows will be flashing in the upper half
+   of the display, and the lower half will be empty. After few seconds upper part will
+   stop flashing (but that part of the picture will remain there), and the lower
+   half of the display will start displayin the lower half of the picture
+   in rolling mode, unless it is empty, or the picture was small enough to fit in
+   upper part. It is not mine idea, this is just the way Asus' display work ;)
+   So if you need just flashing, use at most 128x16 picture. If you need flashing and
+   rolling, use whole size of the display.
+
+   Lines following the first, configuration, line are picture data. Each '1' means
+   that the pixel is lit, and '0' means that it is not. You can also use '#' as ON,
+   and ' ' (space) as OFF. Empty lines and all other characters are ignored.
+
+   It is possible to write everything in one line <M:WxH>01010101010101010...,
+   and W*H characters will be used. If there is not enough characters, nothing will be
+   displayed. However, the 'line mode' is easier to read (and write), and it also
+   lets to omit parts of data. Whenever End-Of-Line character is found, but
+   the line is not W characters long, it is assumed that all missing characters
+   are equal to the last character in the line.
+
+   Following line represents '0', '1' and a lots of '0's, dependng on the width of the picture
+   provided in configuration data:
+   010
+
+   So if you need empty line, it is sufficient to write line with only one '0' in it.
+   The same works with '1' (or ' ' and '#').
+
+   If there are too many data in the file, they will be ignored. If you are not sure
+   how many characters you are missing, you can add few lines with one zero in each of them.
+
+   There are some example pictures in .txt format, that can be used as follows:
+   cat foo.txt > /sys/class/asus_oled/oled_1/picture
+
+   If the display is switched off you also need to run:
+   echo 1 > /sys/class/asus_oled/oled_1/enabled
+   To switch it off, just use:
+   echo 0 > /sys/class/asus_oled/oled_1/enabled
+
+
+*******
+
+   For any additional info please have a look at http://lapsus.berlios.de/asus_oled.html
+
+
+
+   Jakub Schmidtke (sjakub@gmail.com)
+

+ 10 - 0
drivers/staging/asus_oled/TODO

@@ -0,0 +1,10 @@
+TODO:
+	- checkpatch.pl cleanups
+	- sparse fixes
+	- audit the userspace interface
+		- sysfs vs. char?
+	- Documentation/ABI/ needs to be added
+	- put the sample .txt files and README file somewhere.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+Cc: Jakub Schmidtke <sjakub@gmail.com>

+ 745 - 0
drivers/staging/asus_oled/asus_oled.c

@@ -0,0 +1,745 @@
+/*
+ *  Asus OLED USB driver
+ *
+ *  Copyright (C) 2007,2008 Jakub Schmidtke (sjakub@gmail.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ *
+ *
+ *  This module is based on usbled and asus-laptop modules.
+ *
+ *
+ *  Asus OLED support is based on asusoled program taken from
+ *  https://launchpad.net/asusoled/.
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/ctype.h>
+
+#define ASUS_OLED_VERSION		"0.04-dev"
+#define ASUS_OLED_NAME			"asus-oled"
+#define ASUS_OLED_UNDERSCORE_NAME	"asus_oled"
+
+#define ASUS_OLED_ERROR			"Asus OLED Display Error: "
+
+#define ASUS_OLED_STATIC		's'
+#define ASUS_OLED_ROLL			'r'
+#define ASUS_OLED_FLASH			'f'
+
+#define ASUS_OLED_MAX_WIDTH		1792
+#define ASUS_OLED_DISP_HEIGHT		32
+#define ASUS_OLED_PACKET_BUF_SIZE	256
+
+MODULE_AUTHOR("Jakub Schmidtke, sjakub@gmail.com");
+MODULE_DESCRIPTION("Asus OLED Driver v" ASUS_OLED_VERSION);
+MODULE_LICENSE("GPL");
+
+static struct class *oled_class = 0;
+static int oled_num = 0;
+
+static uint start_off = 0;
+
+module_param(start_off, uint, 0644);
+
+MODULE_PARM_DESC(start_off, "Set to 1 to switch off OLED display after it is attached");
+
+typedef enum {
+	PACK_MODE_G1,
+	PACK_MODE_G50,
+	PACK_MODE_LAST
+} oled_pack_mode_t;
+
+struct oled_dev_desc_str {
+	uint16_t		idVendor;
+	uint16_t		idProduct;
+	uint16_t		devWidth; // width of display
+	oled_pack_mode_t	packMode; // formula to be used while packing the picture
+	const char		*devDesc;
+};
+
+/* table of devices that work with this driver */
+static struct usb_device_id id_table [] = {
+	{ USB_DEVICE(0x0b05, 0x1726) }, // Asus G1/G2 (and variants)
+	{ USB_DEVICE(0x0b05, 0x175b) }, // Asus G50V (and possibly others - G70? G71?)
+	{ },
+};
+
+/* parameters of specific devices */
+static struct oled_dev_desc_str oled_dev_desc_table [] = {
+	{ 0x0b05, 0x1726, 128, PACK_MODE_G1, "G1/G2" },
+	{ 0x0b05, 0x175b, 256, PACK_MODE_G50, "G50" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE (usb, id_table);
+
+#define SETUP_PACKET_HEADER(packet, val1, val2, val3, val4, val5, val6, val7) \
+	do {					\
+		memset(packet, 0, sizeof(struct asus_oled_header));		\
+		packet->header.magic1 = 0x55;		\
+		packet->header.magic2 = 0xaa;		\
+		packet->header.flags = val1;		\
+		packet->header.value3 = val2;		\
+		packet->header.buffer1 = val3;		\
+		packet->header.buffer2 = val4;		\
+		packet->header.value6 = val5;		\
+		packet->header.value7 = val6;		\
+		packet->header.value8 = val7;		\
+	} while(0);
+
+struct asus_oled_header {
+	uint8_t		magic1;
+	uint8_t		magic2;
+	uint8_t		flags;
+	uint8_t		value3;
+	uint8_t		buffer1;
+	uint8_t		buffer2;
+	uint8_t		value6;
+	uint8_t		value7;
+	uint8_t		value8;
+	uint8_t		padding2[7];
+} __attribute((packed));
+
+struct asus_oled_packet {
+	struct asus_oled_header		header;
+	uint8_t				bitmap[ASUS_OLED_PACKET_BUF_SIZE];
+} __attribute((packed));
+
+struct asus_oled_dev {
+	struct usb_device *	udev;
+	uint8_t			pic_mode;
+	uint16_t		dev_width;
+	oled_pack_mode_t	pack_mode;
+	size_t			height;
+	size_t			width;
+	size_t			x_shift;
+	size_t			y_shift;
+	size_t			buf_offs;
+	uint8_t			last_val;
+	size_t			buf_size;
+	char			*buf;
+	uint8_t			enabled;
+	struct device		*dev;
+};
+
+static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
+{
+	int a;
+	int retval;
+	int act_len;
+	struct asus_oled_packet * packet;
+
+	packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
+
+	if (!packet) {
+		dev_err(&odev->udev->dev, "out of memory\n");
+		return;
+	}
+
+	SETUP_PACKET_HEADER(packet, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00);
+
+	if (enabl) packet->bitmap[0] = 0xaf;
+	else packet->bitmap[0] = 0xae;
+
+	for (a=0; a<1; a++) {
+		retval = usb_bulk_msg(odev->udev,
+			usb_sndbulkpipe(odev->udev, 2),
+			packet,
+			sizeof(struct asus_oled_header) + 1,
+			&act_len,
+			-1);
+
+		if (retval)
+			dev_dbg(&odev->udev->dev, "retval = %d\n", retval);
+	}
+
+	odev->enabled = enabl;
+
+	kfree(packet);
+}
+
+static ssize_t set_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct asus_oled_dev *odev = usb_get_intfdata(intf);
+	int temp = simple_strtoul(buf, NULL, 10);
+
+	enable_oled(odev, temp);
+
+	return count;
+}
+
+static ssize_t class_set_enabled(struct device *device, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct asus_oled_dev *odev = (struct asus_oled_dev *) dev_get_drvdata(device);
+
+	int temp = simple_strtoul(buf, NULL, 10);
+
+	enable_oled(odev, temp);
+
+	return count;
+}
+
+static ssize_t get_enabled(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct asus_oled_dev *odev = usb_get_intfdata(intf);
+
+	return sprintf(buf, "%d\n", odev->enabled);
+}
+
+static ssize_t class_get_enabled(struct device *device, struct device_attribute *attr, char *buf)
+{
+	struct asus_oled_dev *odev = (struct asus_oled_dev *) dev_get_drvdata(device);
+
+	return sprintf(buf, "%d\n", odev->enabled);
+}
+
+static void send_packets(struct usb_device *udev, struct asus_oled_packet *packet,
+	char *buf, uint8_t p_type, size_t p_num)
+{
+	size_t i;
+	int act_len;
+
+	for (i = 0; i < p_num; i++) {
+		int retval;
+
+		switch (p_type) {
+			case ASUS_OLED_ROLL:
+				SETUP_PACKET_HEADER(packet, 0x40, 0x80, p_num, i + 1, 0x00, 0x01, 0xff);
+			break;
+			case ASUS_OLED_STATIC:
+				SETUP_PACKET_HEADER(packet, 0x10 + i, 0x80, 0x01, 0x01, 0x00, 0x01, 0x00);
+			break;
+			case ASUS_OLED_FLASH:
+				SETUP_PACKET_HEADER(packet, 0x10 + i, 0x80, 0x01, 0x01, 0x00, 0x00, 0xff);
+			break;
+		}
+
+		memcpy(packet->bitmap, buf + (ASUS_OLED_PACKET_BUF_SIZE*i), ASUS_OLED_PACKET_BUF_SIZE);
+
+		retval = usb_bulk_msg(udev,
+			usb_sndctrlpipe(udev, 2),
+			packet,
+			sizeof(struct asus_oled_packet),
+			&act_len,
+			-1);
+
+		if (retval)
+			dev_dbg(&udev->dev, "retval = %d\n", retval);
+	}
+}
+
+static void send_packet(struct usb_device *udev, struct asus_oled_packet *packet, size_t offset, size_t len, char *buf, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5, uint8_t b6){
+	int retval;
+	int act_len;
+
+	SETUP_PACKET_HEADER(packet, b1, b2, b3, b4, b5, b6, 0x00);
+	memcpy(packet->bitmap, buf + offset, len);
+
+	retval = usb_bulk_msg(udev,
+			usb_sndctrlpipe(udev, 2),
+			packet,
+			sizeof(struct asus_oled_packet),
+			&act_len,
+			-1);
+
+	if (retval)
+		dev_dbg(&udev->dev, "retval = %d\n", retval);
+}
+
+
+static void send_packets_g50(struct usb_device *udev, struct asus_oled_packet *packet, char *buf)
+{
+	send_packet(udev, packet,     0, 0x100, buf, 0x10, 0x00, 0x02, 0x01, 0x00, 0x01);
+	send_packet(udev, packet, 0x100, 0x080, buf, 0x10, 0x00, 0x02, 0x02, 0x80, 0x00);
+
+	send_packet(udev, packet, 0x180, 0x100, buf, 0x11, 0x00, 0x03, 0x01, 0x00, 0x01);
+	send_packet(udev, packet, 0x280, 0x100, buf, 0x11, 0x00, 0x03, 0x02, 0x00, 0x01);
+	send_packet(udev, packet, 0x380, 0x080, buf, 0x11, 0x00, 0x03, 0x03, 0x80, 0x00);
+}
+
+
+static void send_data(struct asus_oled_dev *odev)
+{
+	size_t packet_num = odev->buf_size / ASUS_OLED_PACKET_BUF_SIZE;
+	struct asus_oled_packet * packet;
+
+	packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
+
+	if (!packet) {
+		dev_err(&odev->udev->dev, "out of memory\n");
+		return;
+	}
+
+	if (odev->pack_mode==PACK_MODE_G1){
+		// When sending roll-mode data the display updated only first packet.
+		// I have no idea why, but when static picture is send just before
+		// rolling picture - everything works fine.
+		if (odev->pic_mode == ASUS_OLED_ROLL)
+			send_packets(odev->udev, packet, odev->buf, ASUS_OLED_STATIC, 2);
+
+		// Only ROLL mode can use more than 2 packets.
+		if (odev->pic_mode != ASUS_OLED_ROLL && packet_num > 2)
+			packet_num = 2;
+
+		send_packets(odev->udev, packet, odev->buf, odev->pic_mode, packet_num);
+	}
+	else
+	if (odev->pack_mode==PACK_MODE_G50){
+		send_packets_g50(odev->udev, packet, odev->buf);
+	}
+
+	kfree(packet);
+}
+
+static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
+{
+	while (count-- > 0) {
+		if (val) {
+			size_t x = odev->buf_offs % odev->width;
+			size_t y = odev->buf_offs / odev->width;
+			size_t i;
+
+			x += odev->x_shift;
+			y += odev->y_shift;
+
+			switch(odev->pack_mode)
+			{
+				case PACK_MODE_G1:
+					// i = (x/128)*640 + 127 - x + (y/8)*128;
+					// This one for 128 is the same, but might be better for different widths?
+					i = (x/odev->dev_width)*640 + odev->dev_width - 1 - x + (y/8)*odev->dev_width;
+				break;
+
+				case PACK_MODE_G50:
+					i =  (odev->dev_width - 1 - x)/8 + y*odev->dev_width/8;
+				break;
+
+				default:
+					i = 0;
+					printk(ASUS_OLED_ERROR "Unknown OLED Pack Mode: %d!\n", odev->pack_mode);
+				break;
+			}
+
+			if (i >= odev->buf_size) {
+				printk(ASUS_OLED_ERROR "Buffer overflow! Report a bug in the driver: offs: %d >= %d i: %d (x: %d y: %d)\n",
+					(int) odev->buf_offs, (int) odev->buf_size, (int) i, (int) x, (int) y);
+				return -EIO;
+			}
+
+			switch (odev->pack_mode)
+			{
+				case PACK_MODE_G1:
+					odev->buf[i] &= ~(1<<(y%8));
+				break;
+
+				case PACK_MODE_G50:
+					odev->buf[i] &= ~(1<<(x%8));
+				break;
+
+				default:
+					// cannot get here; stops gcc complaining
+				;
+			}
+		}
+
+		odev->last_val = val;
+		odev->buf_offs++;
+	}
+
+	return 0;
+}
+
+static ssize_t odev_set_picture(struct asus_oled_dev *odev, const char *buf, size_t count)
+{
+	size_t offs = 0, max_offs;
+
+	if (count < 1) return 0;
+
+	if (tolower(buf[0]) == 'b'){
+	    // binary mode, set the entire memory
+
+	    size_t i;
+
+	    odev->buf_size = (odev->dev_width * ASUS_OLED_DISP_HEIGHT) / 8;
+
+	    if (odev->buf) kfree(odev->buf);
+	    odev->buf = kmalloc(odev->buf_size, GFP_KERNEL);
+
+	    memset(odev->buf, 0xff, odev->buf_size);
+
+	    for (i=1; i < count && i<=32*32; i++){
+		odev->buf[i-1] = buf[i];
+		odev->buf_offs = i-1;
+	    }
+
+	    odev->width=odev->dev_width / 8;
+	    odev->height=ASUS_OLED_DISP_HEIGHT;
+	    odev->x_shift=0;
+	    odev->y_shift=0;
+	    odev->last_val=0;
+
+	    send_data(odev);
+
+	    return count;
+	}
+
+	if (buf[0] == '<') {
+		size_t i;
+		size_t w = 0, h = 0;
+		size_t w_mem, h_mem;
+
+		if (count < 10 || buf[2] != ':') {
+			goto error_header;
+		}
+
+		switch(tolower(buf[1])) {
+			case ASUS_OLED_STATIC:
+			case ASUS_OLED_ROLL:
+			case ASUS_OLED_FLASH:
+				odev->pic_mode = buf[1];
+				break;
+			default:
+				printk(ASUS_OLED_ERROR "Wrong picture mode: '%c'.\n", buf[1]);
+				return -EIO;
+				break;
+		}
+
+		for (i = 3; i < count; ++i) {
+			if (buf[i] >= '0' && buf[i] <= '9') {
+				w = 10*w + (buf[i] - '0');
+
+				if (w > ASUS_OLED_MAX_WIDTH) goto error_width;
+			}
+			else if (tolower(buf[i]) == 'x') break;
+			else goto error_width;
+		}
+
+		for (++i; i < count; ++i) {
+			if (buf[i] >= '0' && buf[i] <= '9') {
+				h = 10*h + (buf[i] - '0');
+
+				if (h > ASUS_OLED_DISP_HEIGHT) goto error_height;
+			}
+			else if (tolower(buf[i]) == '>') break;
+			else goto error_height;
+		}
+
+		if (w < 1 || w > ASUS_OLED_MAX_WIDTH) goto error_width;
+
+		if (h < 1 || h > ASUS_OLED_DISP_HEIGHT) goto error_height;
+
+		if (i >= count || buf[i] != '>') goto error_header;
+
+		offs = i+1;
+
+		if (w % (odev->dev_width) != 0)
+			w_mem = (w/(odev->dev_width) + 1)*(odev->dev_width);
+		else
+			w_mem = w;
+
+		if (h < ASUS_OLED_DISP_HEIGHT)
+			h_mem = ASUS_OLED_DISP_HEIGHT;
+		else
+			h_mem = h;
+
+		odev->buf_size = w_mem * h_mem / 8;
+
+		if (odev->buf) kfree(odev->buf);
+		odev->buf = kmalloc(odev->buf_size, GFP_KERNEL);
+
+		if (odev->buf == NULL) {
+			odev->buf_size = 0;
+			printk(ASUS_OLED_ERROR "Out of memory!\n");
+			return -ENOMEM;
+		}
+
+		memset(odev->buf, 0xff, odev->buf_size);
+
+		odev->buf_offs = 0;
+		odev->width = w;
+		odev->height = h;
+		odev->x_shift = 0;
+		odev->y_shift = 0;
+		odev->last_val = 0;
+
+		if (odev->pic_mode == ASUS_OLED_FLASH) {
+			if (h < ASUS_OLED_DISP_HEIGHT/2)
+				odev->y_shift = (ASUS_OLED_DISP_HEIGHT/2 - h)/2;
+		}
+		else {
+			if (h < ASUS_OLED_DISP_HEIGHT)
+				odev->y_shift = (ASUS_OLED_DISP_HEIGHT - h)/2;
+		}
+
+		if (w < (odev->dev_width))
+			odev->x_shift = ((odev->dev_width) - w)/2;
+	}
+
+	max_offs = odev->width * odev->height;
+
+	while (offs < count && odev->buf_offs < max_offs) {
+		int ret;
+
+		if (buf[offs] == '1' || buf[offs] == '#') {
+			if ( (ret = append_values(odev, 1, 1)) < 0) return ret;
+		}
+		else if (buf[offs] == '0' || buf[offs] == ' ') {
+			if ( (ret = append_values(odev, 0, 1)) < 0) return ret;
+		}
+		else if (buf[offs] == '\n') {
+			// New line detected. Lets assume, that all characters till the end of the
+			// line were equal to the last character in this line.
+			if (odev->buf_offs % odev->width != 0)
+				if ( (ret = append_values(odev, odev->last_val,
+				      odev->width - (odev->buf_offs % odev->width))) < 0) return ret;
+		}
+
+		offs++;
+	}
+
+	if (odev->buf_offs >= max_offs) send_data(odev);
+
+	return count;
+
+error_width:
+	printk(ASUS_OLED_ERROR "Wrong picture width specified.\n");
+	return -EIO;
+
+error_height:
+	printk(ASUS_OLED_ERROR "Wrong picture height specified.\n");
+	return -EIO;
+
+error_header:
+	printk(ASUS_OLED_ERROR "Wrong picture header.\n");
+	return -EIO;
+}
+
+static ssize_t set_picture(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+
+	return odev_set_picture(usb_get_intfdata(intf), buf, count);
+}
+
+static ssize_t class_set_picture(struct device *device, struct device_attribute *attr, const char *buf, size_t count)
+{
+	return odev_set_picture((struct asus_oled_dev *) dev_get_drvdata(device), buf, count);
+}
+
+#define ASUS_OLED_DEVICE_ATTR(_file)		dev_attr_asus_oled_##_file
+
+static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO, get_enabled, set_enabled);
+static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture);
+
+static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO, class_get_enabled, class_set_enabled);
+static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture);
+
+static int asus_oled_probe(struct usb_interface *interface, const struct usb_device_id *id)
+{
+	struct usb_device *udev = interface_to_usbdev(interface);
+	struct asus_oled_dev *odev = NULL;
+	int retval = -ENOMEM;
+	uint16_t dev_width = 0;
+	oled_pack_mode_t pack_mode = PACK_MODE_LAST;
+	const struct oled_dev_desc_str * dev_desc = oled_dev_desc_table;
+	const char *desc = 0;
+
+	if (id == 0) {
+		// Even possible? Just to make sure...
+		dev_err(&interface->dev, "No usb_device_id provided!\n");
+		return -ENODEV;
+	}
+
+	for (; dev_desc->idVendor; dev_desc++)
+	{
+		if (dev_desc->idVendor == id->idVendor
+			&& dev_desc->idProduct == id->idProduct)
+		{
+			dev_width = dev_desc->devWidth;
+			desc = dev_desc->devDesc;
+			pack_mode = dev_desc->packMode;
+			break;
+		}
+	}
+
+	if ( !desc || dev_width < 1 || pack_mode == PACK_MODE_LAST) {
+		dev_err(&interface->dev, "Missing or incomplete device description!\n");
+		return -ENODEV;
+	}
+
+	odev = kzalloc(sizeof(struct asus_oled_dev), GFP_KERNEL);
+
+	if (odev == NULL) {
+		dev_err(&interface->dev, "Out of memory\n");
+		return -ENOMEM;
+	}
+
+	odev->udev = usb_get_dev(udev);
+	odev->pic_mode = ASUS_OLED_STATIC;
+	odev->dev_width = dev_width;
+	odev->pack_mode = pack_mode;
+	odev->height = 0;
+	odev->width = 0;
+	odev->x_shift = 0;
+	odev->y_shift = 0;
+	odev->buf_offs = 0;
+	odev->buf_size = 0;
+	odev->last_val = 0;
+	odev->buf = NULL;
+	odev->enabled = 1;
+	odev->dev = 0;
+
+	usb_set_intfdata (interface, odev);
+
+	if ((retval = device_create_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled)))) {
+		goto err_files;
+	}
+
+	if ((retval = device_create_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture)))) {
+		goto err_files;
+	}
+
+	odev->dev = device_create(oled_class, &interface->dev, MKDEV(0,0),
+				NULL,"oled_%d", ++oled_num);
+
+	if (IS_ERR(odev->dev)) {
+		retval = PTR_ERR(odev->dev);
+		goto err_files;
+	}
+
+	dev_set_drvdata(odev->dev, odev);
+
+	if ( (retval = device_create_file(odev->dev, &dev_attr_enabled))) {
+		goto err_class_enabled;
+	}
+
+	if ( (retval = device_create_file(odev->dev, &dev_attr_picture))) {
+		goto err_class_picture;
+	}
+
+	dev_info(&interface->dev, "Attached Asus OLED device: %s [width %u, pack_mode %d]\n", desc, odev->dev_width, odev->pack_mode);
+
+	if (start_off)
+		enable_oled(odev, 0);
+
+	return 0;
+
+err_class_picture:
+	device_remove_file(odev->dev, &dev_attr_picture);
+
+err_class_enabled:
+	device_remove_file(odev->dev, &dev_attr_enabled);
+	device_unregister(odev->dev);
+
+err_files:
+	device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled));
+	device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture));
+
+	usb_set_intfdata (interface, NULL);
+	usb_put_dev(odev->udev);
+	kfree(odev);
+
+	return retval;
+}
+
+static void asus_oled_disconnect(struct usb_interface *interface)
+{
+	struct asus_oled_dev *odev;
+
+	odev = usb_get_intfdata (interface);
+	usb_set_intfdata (interface, NULL);
+
+	device_remove_file(odev->dev, &dev_attr_picture);
+	device_remove_file(odev->dev, &dev_attr_enabled);
+	device_unregister(odev->dev);
+
+	device_remove_file(&interface->dev, & ASUS_OLED_DEVICE_ATTR(picture));
+	device_remove_file(&interface->dev, & ASUS_OLED_DEVICE_ATTR(enabled));
+
+	usb_put_dev(odev->udev);
+
+	if (odev->buf) kfree(odev->buf);
+
+	kfree(odev);
+
+	dev_info(&interface->dev, "Disconnected Asus OLED device\n");
+}
+
+static struct usb_driver oled_driver = {
+	.name =		ASUS_OLED_NAME,
+	.probe =	asus_oled_probe,
+	.disconnect =	asus_oled_disconnect,
+	.id_table =	id_table,
+};
+
+static ssize_t version_show(struct class *dev, char *buf)
+{
+	return sprintf(buf, ASUS_OLED_UNDERSCORE_NAME " %s\n", ASUS_OLED_VERSION);
+}
+
+static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
+
+static int __init asus_oled_init(void)
+{
+	int retval = 0;
+	oled_class = class_create(THIS_MODULE, ASUS_OLED_UNDERSCORE_NAME);
+
+	if (IS_ERR(oled_class)) {
+		err("Error creating " ASUS_OLED_UNDERSCORE_NAME " class");
+		return PTR_ERR(oled_class);
+	}
+
+	if ((retval = class_create_file(oled_class, &class_attr_version))) {
+		err("Error creating class version file");
+		goto error;
+	}
+
+	retval = usb_register(&oled_driver);
+
+	if (retval) {
+		err("usb_register failed. Error number %d", retval);
+		goto error;
+	}
+
+	return retval;
+
+error:
+	class_destroy(oled_class);
+	return retval;
+}
+
+static void __exit asus_oled_exit(void)
+{
+	class_remove_file(oled_class, &class_attr_version);
+	class_destroy(oled_class);
+
+	usb_deregister(&oled_driver);
+}
+
+module_init (asus_oled_init);
+module_exit (asus_oled_exit);
+

+ 33 - 0
drivers/staging/asus_oled/linux.txt

@@ -0,0 +1,33 @@
+<s:74x32>
+0
+0
+00000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000
+01111111111000000000000000000000000000000000000000000000000000000000000000
+00011111100000000000000111000000000000000000000000000000000000000000000000
+00001111000000000000000111000000000000000000000000000000000000000000000000
+00001111000000000000000111000000000000000000000000000000000000000000000000
+00001111000000000000000000000000000000000000000000000000000000000000000000
+00001111000000000000000000000000000000000000000000000000000000000000000000
+00001111000000000000011100001111111111100000111110011111100011111101111000
+00001111000000000000111110000011111000111000111110000111100001111000110000
+00001111000000000001101110000011111000111000001111000111100000111100100000
+00001111000000000001001110000011110000111100001111000111100000111101100000
+00001111000000000100001110000011110000111100001111000111100000011111000000
+00001111000000000100011110000011110000111100001111000111100000001111000000
+00001111000000000100011110000011110000111100001111000111100000001111000000
+00001111000000000100011100100011110000111100001111000111100000001111100000
+00001111000000001100111100100011110000111100001111000111100000001111110000
+00001111000000001100111101100011110000111100001111000111100000011011110000
+00001111000000011100111101000011110000111100001111000111100000010001111000
+00011111000001111100111011000011110000111100001111001111100000110000111100
+11111111111111111100011110001111111011111110000111110111111011111011111110
+00000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000
+0
+0
+0
+0

+ 18 - 0
drivers/staging/asus_oled/linux_f.txt

@@ -0,0 +1,18 @@
+<f:128x16>
+00000000000000000000000000000000001111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000011000111111111100001111001111100111110111000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000111100001111000110001111000111100011100010000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000001011100001111000111000111100111100001110110000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000011100001110000111000111100111100001111100000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000100111001001110000111000111100111100000111110000000000000000000000000000000000000
+00000000000000000000000000000000000011110000001100111011001110000111000111100111100000111110000000000000000000000000000000000000
+00000000000000000000000000000000000011110000001100111010001110000111000111100111100000100111000000000000000000000000000000000000
+00000000000000000000000000000000000011110000111100110110001110000111000111100111100001000011100000000000000000000000000000000000
+00000000000000000000000000000000001111111111111100111100111111011111100011110111110111101111110000000000000000000000000000000000
+

+ 33 - 0
drivers/staging/asus_oled/linux_fr.txt

@@ -0,0 +1,33 @@
+<f:128x32>
+00000000000000000000000000000000001111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000011000111111111100001111001111100111110111000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000111100001111000110001111000111100011100010000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000001011100001111000111000111100111100001110110000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000000011100001110000111000111100111100001111100000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
+00000000000000000000000000000000000011110000000100111001001110000111000111100111100000111110000000000000000000000000000000000000
+00000000000000000000000000000000000011110000001100111011001110000111000111100111100000111110000000000000000000000000000000000000
+00000000000000000000000000000000000011110000001100111010001110000111000111100111100000100111000000000000000000000000000000000000
+00000000000000000000000000000000000011110000111100110110001110000111000111100111100001000011100000000000000000000000000000000000
+00000000000000000000000000000000001111111111111100111100111111011111100011110111110111101111110000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
+00000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000
+00000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000
+00000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000
+00000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000
+00000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000

+ 33 - 0
drivers/staging/asus_oled/tux.txt

@@ -0,0 +1,33 @@
+<s:32x32>
+00000000000001111111000000000000
+0000000000001       100000000000
+000000000001         10000000000
+000000000001         10000000000
+000000000001         10000000000
+000000000001 1  111  10000000000
+000000000001    1 1   1000000000
+000000000001  111     1000000000
+000000000001 111111   1000000000
+000000000001 111111   1000000000
+000000000001    1 1    100000000
+00000000001      11    100000000
+00000000001 11111111    10000000
+0000000001  11111111     1000000
+000000001   111111111    1000000
+000000001  1111111111     100000
+00000001   11111111111    100000
+00000001  111111111111     10000
+0000001   111111111111     10000
+0000001   111111111111     10000
+0000001   111111111111     10000
+0000001   111111111111     10000
+000000011 11111111111      10000
+000011 11  11111111111    100000
+0001  1111  111111111111111 1000
+001 1111111  11111111111111 1000
+001 1111111  1111111  111111 100
+001 11111111 111111   1111111 10
+001 11111111          11111  100
+001  1111111          111  11100
+000111   111   11111  11  100000
+000000111   111111111    1000000

+ 33 - 0
drivers/staging/asus_oled/tux_r.txt

@@ -0,0 +1,33 @@
+<r:32x32>
+00000000000001111111000000000000
+0000000000001       100000000000
+000000000001         10000000000
+000000000001         10000000000
+000000000001         10000000000
+000000000001 1  111  10000000000
+000000000001    1 1   1000000000
+000000000001  111     1000000000
+000000000001 111111   1000000000
+000000000001 111111   1000000000
+000000000001    1 1    100000000
+00000000001      11    100000000
+00000000001 11111111    10000000
+0000000001  11111111     1000000
+000000001   111111111    1000000
+000000001  1111111111     100000
+00000001   11111111111    100000
+00000001  111111111111     10000
+0000001   111111111111     10000
+0000001   111111111111     10000
+0000001   111111111111     10000
+0000001   111111111111     10000
+000000011 11111111111      10000
+000011 11  11111111111    100000
+0001  1111  111111111111111 1000
+001 1111111  11111111111111 1000
+001 1111111  1111111  111111 100
+001 11111111 111111   1111111 10
+001 11111111          11111  100
+001  1111111          111  11100
+000111   111   11111  11  100000
+000000111   111111111    1000000

+ 33 - 0
drivers/staging/asus_oled/tux_r2.txt

@@ -0,0 +1,33 @@
+<r:256x32>
+000000000000000000000000000000000000000000000000000000000000011111110000000000000000000000000000000000000000000000000000000000000
+0000000000000000000000000000000000000000000000000000000000001       1000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000001         1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000001         1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000001         1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000001 1  111  1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000001    1 1   100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000001  111     100000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111100000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000001 111111   100000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000001 111111   100000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000000001    1 1    10000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000001      11    10000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000000001 11111111    1000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000011100001111111111100000111110011111100011111101111000000000000000000000000000000
+0000000000000000000000000000000000000000000000000000000001  11111111     100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000111110000011111000111000111110000111100001111000110000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000001   111111111    100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000001101110000011111000111000001111000111100000111100100000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000001  1111111111     10000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000001001110000011110000111100001111000111100000111101100000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000001   11111111111    10000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100001110000011110000111100001111000111100000011111000000000000000000000000000000000
+00000000000000000000000000000000000000000000000000000001  111111111111     10000000000000000000000000000000000000000000000000000000000000000000000000000000000011110000000001000111100000111100001111000011110001111000000011110000000
+0000000000000000000000000000000000000000000000000000001   111111111111     1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100011110000011110000111100001111000111100000001111000000
+0000000000000000000000000000000000000000000000000000001   111111111111     1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100011100100011110000111100001111000111100000001111100000000000000000000000000000000
+0000000000000000000000000000000000000000000000000000001   111111111111     1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000001100111100100011110000111100001111000111100000001111110000000000000000000000000000000
+0000000000000000000000000000000000000000000000000000001   111111111111     1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000001100111101100011110000111100001111000111100000011011110000000000000000000000000000000
+000000000000000000000000000000000000000000000000000000011 11111111111      1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000011100111101000011110000111100001111000111100000010001111000000000000000000000000000000
+000000000000000000000000000000000000000000000000000011 11  11111111111    10000000000000000000000000000000000000000000000000000000000000000000000000000000000011111000001111100111011000011110000111100001111001111100000110000111100000000000000000000000000000
+0000000000000000000000000000000000000000000000000001  1111  111111111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100011110001111111011111110000111110111111011111011111110000000000000000000000000000
+000000000000000000000000000000000000000000000000001 1111111  11111111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000001 1111111  1111111  111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000001 11111111 111111   1111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000001 11111111          11111  1000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000001  1111111          111  111000000000000000000000000000000000000000000000000000
+000000000000000000000000000000000000000000000000000111   111   11111  11  10
+000000000000000000000000000000000000000000000000000000111   111111111    10

+ 33 - 0
drivers/staging/asus_oled/zig.txt

@@ -0,0 +1,33 @@
+<r:128x32>
+10000000000000000000000000000000000000000000000000000000000000011000000000000000000000000000000000000000000000000000000000000001
+01000000000000000000000000000000000000000000000000000000000000100100000000000000000000000000000000000000000000000000000000000010
+00100000000000000000000000000000000000000000000000000000000001000010000000000000000000000000000000000000000000000000000000000100
+00010000000000000000000000000000000000000000000000000000000010000001000000000000000000000000000000000000000000000000000000001000
+00001000000000000000000000000000000000000000000000000000000100000000100000000000000000000000000000000000000000000000000000010000
+00000100000000000000000000000000000000000000000000000000001000000000010000000000000000000000000000000000000000000000000000100000
+00000010000000000000000000000000000000000000000000000000010000000000001000000000000000000000000000000000000000000000000001000000
+00000001000000000000000000000000000000000000000000000000100000000000000100000000000000000000000000000000000000000000000010000000
+00000000100000000000000000000000000000000000000000000001000000000000000010000000000000000000000000000000000000000000000100000000
+00000000010000000000000000000000000000000000000000000010000000000000000001000000000000000000000000000000000000000000001000000000
+00000000001000000000000000000000000000000000000000000100000000000000000000100000000000000000000000000000000000000000010000000000
+00000000000100000000000000000000000000000000000000001000000000000000000000010000000000000000000000000000000000000000100000000000
+00000000000010000000000000000000000000000000000000010000000000000000000000001000000000000000000000000000000000000001000000000000
+00000000000001000000000000000000000000000000000000100000000000000000000000000100000000000000000000000000000000000010000000000000
+00000000000000100000000000000000000000000000000001000000000000000000000000000010000000000000000000000000000000000100000000000000
+00000000000000010000000000000000000000000000000010000000000000000000000000000001000000000000000000000000000000001000000000000000
+00000000000000001000000000000000000000000000000100000000000000000000000000000000100000000000000000000000000000010000000000000000
+00000000000000000100000000000000000000000000001000000000000000000000000000000000010000000000000000000000000000100000000000000000
+00000000000000000010000000000000000000000000010000000000000000000000000000000000001000000000000000000000000001000000000000000000
+00000000000000000001000000000000000000000000100000000000000000000000000000000000000100000000000000000000000010000000000000000000
+00000000000000000000100000000000000000000001000000000000000000000000000000000000000010000000000000000000000100000000000000000000
+00000000000000000000010000000000000000000010000000000000000000000000000000000000000001000000000000000000001000000000000000000000
+00000000000000000000001000000000000000000100000000000000000000000000000000000000000000100000000000000000010000000000000000000000
+00000000000000000000000100000000000000001000000000000000000000000000000000000000000000010000000000000000100000000000000000000000
+00000000000000000000000010000000000000010000000000000000000000000000000000000000000000001000000000000001000000000000000000000000
+00000000000000000000000001000000000000100000000000000000000000000000000000000000000000000100000000000010000000000000000000000000
+00000000000000000000000000100000000001000000000000000000000000000000000000000000000000000010000000000100000000000000000000000000
+00000000000000000000000000010000000010000000000000000000000000000000000000000000000000000001000000001000000000000000000000000000
+00000000000000000000000000001000000100000000000000000000000000000000000000000000000000000000100000010000000000000000000000000000
+00000000000000000000000000000100001000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000
+00000000000000000000000000000010010000000000000000000000000000000000000000000000000000000000001001000000000000000000000000000000
+00000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000

+ 1 - 1
drivers/staging/at76_usb/Kconfig

@@ -1,6 +1,6 @@
 config USB_ATMEL
 	tristate "Atmel at76c503/at76c505/at76c505a USB cards"
-	depends on WLAN_80211 && USB
+	depends on MAC80211 && WLAN_80211 && USB
 	default N
 	select FW_LOADER
 	---help---

Plik diff jest za duży
+ 248 - 417
drivers/staging/at76_usb/at76_usb.c


+ 58 - 169
drivers/staging/at76_usb/at76_usb.h

@@ -34,23 +34,6 @@ enum board_type {
 	BOARD_505AMX = 8
 };
 
-/* our private ioctl's */
-/* preamble length (0 - long, 1 - short, 2 - auto) */
-#define AT76_SET_SHORT_PREAMBLE		(SIOCIWFIRSTPRIV + 0)
-#define AT76_GET_SHORT_PREAMBLE		(SIOCIWFIRSTPRIV + 1)
-/* which debug channels are enabled */
-#define AT76_SET_DEBUG			(SIOCIWFIRSTPRIV + 2)
-#define AT76_GET_DEBUG			(SIOCIWFIRSTPRIV + 3)
-/* power save mode (incl. the Atmel proprietary smart save mode) */
-#define AT76_SET_POWERSAVE_MODE		(SIOCIWFIRSTPRIV + 4)
-#define AT76_GET_POWERSAVE_MODE		(SIOCIWFIRSTPRIV + 5)
-/* min and max channel times for scan */
-#define AT76_SET_SCAN_TIMES		(SIOCIWFIRSTPRIV + 6)
-#define AT76_GET_SCAN_TIMES		(SIOCIWFIRSTPRIV + 7)
-/* scan mode (0 - active, 1 - passive) */
-#define AT76_SET_SCAN_MODE		(SIOCIWFIRSTPRIV + 8)
-#define AT76_GET_SCAN_MODE		(SIOCIWFIRSTPRIV + 9)
-
 #define CMD_STATUS_IDLE				0x00
 #define CMD_STATUS_COMPLETE			0x01
 #define CMD_STATUS_UNKNOWN			0x02
@@ -82,6 +65,7 @@ enum board_type {
 #define MIB_MAC			0x03
 #define MIB_MAC_MGMT		0x05
 #define MIB_MAC_WEP		0x06
+#define MIB_MAC_ENCRYPTION	0x06
 #define MIB_PHY			0x07
 #define MIB_FW_VERSION		0x08
 #define MIB_MDOMAIN		0x09
@@ -106,6 +90,26 @@ enum board_type {
 #define AT76_PM_ON		2
 #define AT76_PM_SMART		3
 
+/* cipher values for encryption keys */
+#define CIPHER_NONE		0	/* this value is only guessed */
+#define CIPHER_WEP64		1
+#define CIPHER_TKIP		2
+#define CIPHER_CCMP		3
+#define CIPHER_CCX		4	/* for consistency sake only */
+#define CIPHER_WEP128		5
+
+/* bit flags key types for encryption keys */
+#define KEY_PAIRWISE		2
+#define KEY_TX			4
+
+#define CIPHER_KEYS		(4)
+#define CIPHER_KEY_LEN		(40)
+
+struct key_config {
+	u8 cipher;
+	u8 keylen;
+};
+
 struct hwcfg_r505 {
 	u8 cr39_values[14];
 	u8 reserved1[14];
@@ -147,6 +151,9 @@ union at76_hwcfg {
 
 #define WEP_SMALL_KEY_LEN	(40 / 8)
 #define WEP_LARGE_KEY_LEN	(104 / 8)
+#define WEP_KEYS		(4)
+
+
 
 struct at76_card_config {
 	u8 exclude_unencrypted;
@@ -161,7 +168,7 @@ struct at76_card_config {
 	u8 privacy_invoked;
 	u8 wep_default_key_id;	/* 0..3 */
 	u8 current_ssid[32];
-	u8 wep_default_key_value[4][WEP_KEY_LEN];
+	u8 wep_default_key_value[4][WEP_LARGE_KEY_LEN];
 	u8 ssid_len;
 	u8 short_preamble;
 	__le16 beacon_period;
@@ -186,7 +193,7 @@ struct at76_rx_buffer {
 	u8 link_quality;
 	u8 noise_level;
 	__le32 rx_time;
-	u8 packet[IEEE80211_FRAME_LEN + IEEE80211_FCS_LEN];
+	u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
 } __attribute__((packed));
 
 /* Length of Atmel-specific Tx header before 802.11 frame */
@@ -196,8 +203,11 @@ struct at76_tx_buffer {
 	__le16 wlength;
 	u8 tx_rate;
 	u8 padding;
-	u8 reserved[4];
-	u8 packet[IEEE80211_FRAME_LEN + IEEE80211_FCS_LEN];
+	u8 key_id;
+	u8 cipher_type;
+	u8 cipher_length;
+	u8 reserved;
+	u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
 } __attribute__((packed));
 
 /* defines for scan_type below */
@@ -244,6 +254,7 @@ struct set_mib_buffer {
 		u8 byte;
 		__le16 word;
 		u8 addr[ETH_ALEN];
+		u8 data[256];	/* we need more space for mib_mac_encryption */
 	} data;
 } __attribute__((packed));
 
@@ -317,10 +328,24 @@ struct mib_mac_wep {
 	u8 exclude_unencrypted;
 	__le32 wep_icv_error_count;
 	__le32 wep_excluded_count;
-	u8 wep_default_keyvalue[WEP_KEYS][WEP_KEY_LEN];
+	u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN];
 	u8 encryption_level;	/* 1 for 40bit, 2 for 104bit encryption */
 } __attribute__((packed));
 
+struct mib_mac_encryption {
+	u8 cipher_default_keyvalue[CIPHER_KEYS][CIPHER_KEY_LEN];
+	u8 tkip_bssid[6];
+	u8 privacy_invoked;
+	u8 cipher_default_key_id;
+	u8 cipher_default_group_key_id;
+	u8 exclude_unencrypted;
+	u8 wep_encryption_type;
+	u8 ckip_key_permutation;	/* bool */
+	__le32 wep_icv_error_count;
+	__le32 wep_excluded_count;
+	u8 key_rsc[CIPHER_KEYS][8];
+} __attribute__((packed));
+
 struct mib_phy {
 	__le32 ed_threshold;
 
@@ -364,16 +389,6 @@ struct at76_fw_header {
 	__le32 ext_fw_len;	/* external firmware image length */
 } __attribute__((packed));
 
-enum mac_state {
-	MAC_INIT,
-	MAC_SCANNING,
-	MAC_AUTH,
-	MAC_ASSOC,
-	MAC_JOINING,
-	MAC_CONNECTED,
-	MAC_OWN_IBSS
-};
-
 /* a description of a regulatory domain and the allowed channels */
 struct reg_domain {
 	u16 code;
@@ -381,47 +396,6 @@ struct reg_domain {
 	u32 channel_map;	/* if bit N is set, channel (N+1) is allowed */
 };
 
-/* how long do we keep a (I)BSS in the bss_list in jiffies
-   this should be long enough for the user to retrieve the table
-   (by iwlist ?) after the device started, because all entries from
-   other channels than the one the device locks on get removed, too */
-#define BSS_LIST_TIMEOUT	(120 * HZ)
-/* struct to store BSS info found during scan */
-#define BSS_LIST_MAX_RATE_LEN	32	/* 32 rates should be enough ... */
-
-struct bss_info {
-	struct list_head list;
-
-	u8 bssid[ETH_ALEN];	/* bssid */
-	u8 ssid[IW_ESSID_MAX_SIZE];	/* essid */
-	u8 ssid_len;		/* length of ssid above */
-	u8 channel;
-	u16 capa;		/* BSS capabilities */
-	u16 beacon_interval;	/* beacon interval, Kus (1024 microseconds) */
-	u8 rates[BSS_LIST_MAX_RATE_LEN];	/* supported rates in units of
-						   500 kbps, ORed with 0x80 for
-						   basic rates */
-	u8 rates_len;
-
-	/* quality of received beacon */
-	u8 rssi;
-	u8 link_qual;
-	u8 noise_level;
-
-	unsigned long last_rx;	/* time (jiffies) of last beacon received */
-};
-
-/* a rx data buffer to collect rx fragments */
-struct rx_data_buf {
-	u8 sender[ETH_ALEN];	/* sender address */
-	u16 seqnr;		/* sequence number */
-	u16 fragnr;		/* last fragment received */
-	unsigned long last_rx;	/* jiffies of last rx */
-	struct sk_buff *skb;	/* == NULL if entry is free */
-};
-
-#define NR_RX_DATA_BUF		8
-
 /* Data for one loaded firmware file */
 struct fwentry {
 	const char *const fwname;
@@ -438,11 +412,9 @@ struct fwentry {
 
 struct at76_priv {
 	struct usb_device *udev;	/* USB device pointer */
-	struct net_device *netdev;	/* net device pointer */
-	struct net_device_stats stats;	/* net device stats */
-	struct iw_statistics wstats;	/* wireless stats */
 
 	struct sk_buff *rx_skb;	/* skbuff for receiving data */
+	struct sk_buff *tx_skb;	/* skbuff for transmitting data */
 	void *bulk_out_buffer;	/* buffer for sending data */
 
 	struct urb *tx_urb;	/* URB for sending data */
@@ -454,26 +426,17 @@ struct at76_priv {
 	struct mutex mtx;	/* locks this structure */
 
 	/* work queues */
-	struct work_struct work_assoc_done;
-	struct work_struct work_join;
-	struct work_struct work_new_bss;
-	struct work_struct work_start_scan;
 	struct work_struct work_set_promisc;
 	struct work_struct work_submit_rx;
-	struct delayed_work dwork_restart;
-	struct delayed_work dwork_get_scan;
-	struct delayed_work dwork_beacon;
-	struct delayed_work dwork_auth;
-	struct delayed_work dwork_assoc;
+	struct delayed_work dwork_hw_scan;
 
 	struct tasklet_struct rx_tasklet;
 
 	/* the WEP stuff */
 	int wep_enabled;	/* 1 if WEP is enabled */
 	int wep_key_id;		/* key id to be used */
-	u8 wep_keys[WEP_KEYS][WEP_KEY_LEN];	/* the four WEP keys,
-						   5 or 13 bytes are used */
-	u8 wep_keys_len[WEP_KEYS];	/* the length of the above keys */
+	u8 wep_keys[WEP_KEYS][WEP_LARGE_KEY_LEN];	/* WEP keys */
+	u8 wep_keys_len[WEP_KEYS];	/* length of WEP keys */
 
 	int channel;
 	int iw_mode;
@@ -495,44 +458,13 @@ struct at76_priv {
 	int scan_mode;		/* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */
 	int scan_need_any;	/* if set, need to scan for any ESSID */
 
-	/* the list we got from scanning */
-	spinlock_t bss_list_spinlock;	/* protects bss_list operations */
-	struct list_head bss_list;	/* list of BSS we got beacons from */
-	struct timer_list bss_list_timer;	/* timer to purge old entries
-						   from bss_list */
-	struct bss_info *curr_bss;	/* current BSS */
 	u16 assoc_id;		/* current association ID, if associated */
 
-	u8 wanted_bssid[ETH_ALEN];
-	int wanted_bssid_valid;	/* != 0 if wanted_bssid is to be used */
-
-	/* some data for infrastructure mode only */
-	spinlock_t mgmt_spinlock;	/* this spinlock protects access to
-					   next_mgmt_bulk */
-
-	struct at76_tx_buffer *next_mgmt_bulk;	/* pending management msg to
-						   send via bulk out */
-	enum mac_state mac_state;
-	enum {
-		SCAN_IDLE,
-		SCAN_IN_PROGRESS,
-		SCAN_COMPLETED
-	} scan_state;
-	time_t last_scan;
-
-	int retries;		/* remaining retries in case of timeout when
-				 * sending AuthReq or AssocReq */
 	u8 pm_mode;		/* power management mode */
 	u32 pm_period;		/* power management period in microseconds */
 
 	struct reg_domain const *domain;	/* reg domain description */
 
-	/* iwspy support */
-	spinlock_t spy_spinlock;
-	struct iw_spy_data spy_data;
-
-	struct iw_public_data wireless_data;
-
 	/* These fields contain HW config provided by the device (not all of
 	 * these fields are used by all board types) */
 	u8 mac_addr[ETH_ALEN];
@@ -540,9 +472,6 @@ struct at76_priv {
 
 	struct at76_card_config card_config;
 
-	/* store rx fragments until complete */
-	struct rx_data_buf rx_data[NR_RX_DATA_BUF];
-
 	enum board_type board_type;
 	struct mib_fw_version fw_version;
 
@@ -550,58 +479,20 @@ struct at76_priv {
 	unsigned int netdev_registered:1;
 	struct set_mib_buffer mib_buf;	/* global buffer for set_mib calls */
 
-	/* beacon counting */
 	int beacon_period;	/* period of mgmt beacons, Kus */
-	int beacons_received;
-	unsigned long beacons_last_qual;	/* time we restarted counting
-						   beacons */
-};
 
-struct at76_rx_radiotap {
-	struct ieee80211_radiotap_header rt_hdr;
-	__le64 rt_tsft;
-	u8 rt_flags;
-	u8 rt_rate;
-	s8 rt_signal;
-	s8 rt_noise;
-};
-
-#define AT76_RX_RADIOTAP_PRESENT		  \
-	((1 << IEEE80211_RADIOTAP_TSFT)		| \
-	(1 << IEEE80211_RADIOTAP_FLAGS)		| \
-	(1 << IEEE80211_RADIOTAP_RATE)		| \
-	(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL)	| \
-	(1 << IEEE80211_RADIOTAP_DB_ANTNOISE))
-
-#define BEACON_MAX_DATA_LENGTH	1500
-
-/* the maximum size of an AssocReq packet */
-#define ASSOCREQ_MAX_SIZE \
-  (AT76_TX_HDRLEN + sizeof(struct ieee80211_assoc_request) + \
-   1 + 1 + IW_ESSID_MAX_SIZE + 1 + 1 + 4)
-
-/* for shared secret auth, add the challenge text size */
-#define AUTH_FRAME_SIZE (AT76_TX_HDRLEN + sizeof(struct ieee80211_auth))
+	struct ieee80211_hw *hw;
+	int mac80211_registered;
 
-/* Maximal number of AuthReq retries */
-#define AUTH_RETRIES		3
-
-/* Maximal number of AssocReq retries */
-#define ASSOC_RETRIES		3
-
-/* Beacon timeout in managed mode when we are connected */
-#define BEACON_TIMEOUT		(10 * HZ)
-
-/* Timeout for authentication response */
-#define AUTH_TIMEOUT		(1 * HZ)
+	struct key_config keys[4];	/* installed key types */
+	u8 default_pairwise_key;
+	u8 default_group_key;
+};
 
-/* Timeout for association response */
-#define ASSOC_TIMEOUT		(1 * HZ)
+#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS
 
-/* Polling interval when scan is running */
 #define SCAN_POLL_INTERVAL	(HZ / 4)
 
-/* Command completion timeout */
 #define CMD_COMPLETION_TIMEOUT	(5 * HZ)
 
 #define DEF_RTS_THRESHOLD	1536
@@ -611,8 +502,6 @@ struct at76_rx_radiotap {
 #define DEF_SCAN_MIN_TIME	10
 #define DEF_SCAN_MAX_TIME	120
 
-#define MAX_RTS_THRESHOLD	(MAX_FRAG_THRESHOLD + 1)
-
 /* the max padding size for tx in bytes (see calc_padding) */
 #define MAX_PADDING_SIZE	53
 

+ 7 - 0
drivers/staging/benet/Kconfig

@@ -0,0 +1,7 @@
+config BENET
+	tristate "ServerEngines 10Gb NIC - BladeEngine"
+	depends on PCI && INET
+	select INET_LRO
+	help
+	  This driver implements the NIC functionality for ServerEngines
+	  10Gb network adapter BladeEngine (EC 3210).

+ 6 - 0
drivers/staging/benet/MAINTAINERS

@@ -0,0 +1,6 @@
+SERVER ENGINES 10Gbe NIC - BLADE-ENGINE
+P:	Subbu Seetharaman
+M:	subbus@serverengines.com
+L:	netdev@vger.kernel.org
+W:	http://www.serverengines.com
+S:	Supported

+ 14 - 0
drivers/staging/benet/Makefile

@@ -0,0 +1,14 @@
+#
+# Makefile to build the network driver for ServerEngine's BladeEngine
+#
+obj-$(CONFIG_BENET) += benet.o
+
+benet-y :=  be_init.o \
+			be_int.o \
+			be_netif.o \
+			be_ethtool.o \
+			funcobj.o \
+			cq.o \
+			eq.o \
+			mpu.o \
+			eth.o

+ 6 - 0
drivers/staging/benet/TODO

@@ -0,0 +1,6 @@
+TODO:
+	- remove wrappers around common iowrite functions
+	- full netdev audit of common problems/issues
+
+Please send all patches and questions to Subbu Seetharaman
+<subbus@serverengines.com> and Greg Kroah-Hartman <greg@kroah.com>

+ 82 - 0
drivers/staging/benet/asyncmesg.h

@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __asyncmesg_amap_h__
+#define __asyncmesg_amap_h__
+#include "fwcmd_common.h"
+
+/* --- ASYNC_EVENT_CODES --- */
+#define ASYNC_EVENT_CODE_LINK_STATE     (1)
+#define ASYNC_EVENT_CODE_ISCSI          (2)
+
+/* --- ASYNC_LINK_STATES --- */
+#define ASYNC_EVENT_LINK_DOWN           (0)	/* Link Down on a port */
+#define ASYNC_EVENT_LINK_UP             (1)	/* Link Up on a port */
+
+/*
+ * The last 4 bytes of the async events have this common format.  It allows
+ * the driver to distinguish [link]MCC_CQ_ENTRY[/link] structs from
+ * asynchronous events.  Both arrive on the same completion queue.  This
+ * structure also contains the common fields used to decode the async event.
+ */
+struct BE_ASYNC_EVENT_TRAILER_AMAP {
+	u8 rsvd0[8];	/* DWORD 0 */
+	u8 event_code[8];	/* DWORD 0 */
+	u8 event_type[8];	/* DWORD 0 */
+	u8 rsvd1[6];	/* DWORD 0 */
+	u8 async_event;	/* DWORD 0 */
+	u8 valid;		/* DWORD 0 */
+} __packed;
+struct ASYNC_EVENT_TRAILER_AMAP {
+	u32 dw[1];
+};
+
+/*
+ * Applicable in Initiator, Target and NIC modes.
+ * A link state async event is seen by all device drivers as soon they
+ * create an MCC ring. Thereafter, anytime the link status changes the
+ * drivers will receive a link state async event. Notifications continue to
+ * be sent until a driver destroys its MCC ring. A link down event is
+ * reported when either port loses link. A link up event is reported
+ * when either port regains link. When BE's failover mechanism is enabled, a
+ * link down on the active port causes traffic to be diverted to the standby
+ * port by the BE's ARM firmware (assuming the standby port has link). In
+ * this case, the standy port assumes the active status. Note: when link is
+ * restored on the failed port, traffic continues on the currently active
+ * port. The ARM firmware does not attempt to 'fail back' traffic to
+ * the restored port.
+ */
+struct BE_ASYNC_EVENT_LINK_STATE_AMAP {
+	u8 port0_link_status[8];
+	u8 port1_link_status[8];
+	u8 active_port[8];
+	u8 rsvd0[8];	/* DWORD 0 */
+	u8 port0_duplex[8];
+	u8 port0_speed[8];
+	u8 port1_duplex[8];
+	u8 port1_speed[8];
+	u8 port0_fault[8];
+	u8 port1_fault[8];
+	u8 rsvd1[2][8];	/* DWORD 2 */
+	struct BE_ASYNC_EVENT_TRAILER_AMAP trailer;
+} __packed;
+struct ASYNC_EVENT_LINK_STATE_AMAP {
+	u32 dw[4];
+};
+#endif /* __asyncmesg_amap_h__ */

+ 134 - 0
drivers/staging/benet/be_cm.h

@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __be_cm_amap_h__
+#define __be_cm_amap_h__
+#include "be_common.h"
+#include "etx_context.h"
+#include "mpu_context.h"
+
+/*
+ * --- CEV_WATERMARK_ENUM ---
+ * CQ/EQ Watermark Encodings. Encoded as number of free entries in
+ * Queue when Watermark is reached.
+ */
+#define CEV_WMARK_0        (0)	/* Watermark when Queue full */
+#define CEV_WMARK_16       (1)	/* Watermark at 16 free entries */
+#define CEV_WMARK_32       (2)	/* Watermark at 32 free entries */
+#define CEV_WMARK_48       (3)	/* Watermark at 48 free entries */
+#define CEV_WMARK_64       (4)	/* Watermark at 64 free entries */
+#define CEV_WMARK_80       (5)	/* Watermark at 80 free entries */
+#define CEV_WMARK_96       (6)	/* Watermark at 96 free entries */
+#define CEV_WMARK_112      (7)	/* Watermark at 112 free entries */
+#define CEV_WMARK_128      (8)	/* Watermark at 128 free entries */
+#define CEV_WMARK_144      (9)	/* Watermark at 144 free entries */
+#define CEV_WMARK_160      (10)	/* Watermark at 160 free entries */
+#define CEV_WMARK_176      (11)	/* Watermark at 176 free entries */
+#define CEV_WMARK_192      (12)	/* Watermark at 192 free entries */
+#define CEV_WMARK_208      (13)	/* Watermark at 208 free entries */
+#define CEV_WMARK_224      (14)	/* Watermark at 224 free entries */
+#define CEV_WMARK_240      (15)	/* Watermark at 240 free entries */
+
+/*
+ * --- CQ_CNT_ENUM ---
+ * Completion Queue Count Encodings.
+ */
+#define CEV_CQ_CNT_256                  (0)	/* CQ has 256 entries */
+#define CEV_CQ_CNT_512                  (1)	/* CQ has 512 entries */
+#define CEV_CQ_CNT_1024                 (2)	/* CQ has 1024 entries */
+
+/*
+ * --- EQ_CNT_ENUM ---
+ * Event Queue Count Encodings.
+ */
+#define CEV_EQ_CNT_256     (0)	/* EQ has 256 entries (16-byte EQEs only) */
+#define CEV_EQ_CNT_512     (1)	/* EQ has 512 entries (16-byte EQEs only) */
+#define CEV_EQ_CNT_1024    (2)	/* EQ has 1024 entries (4-byte or */
+				/* 16-byte EQEs only) */
+#define CEV_EQ_CNT_2048    (3)	/* EQ has 2048 entries (4-byte or */
+				/* 16-byte EQEs only) */
+#define CEV_EQ_CNT_4096    (4)	/* EQ has 4096 entries (4-byte EQEs only) */
+
+/*
+ * --- EQ_SIZE_ENUM ---
+ * Event Queue Entry Size Encoding.
+ */
+#define CEV_EQ_SIZE_4                   (0)	/* EQE is 4 bytes */
+#define CEV_EQ_SIZE_16                  (1)	/* EQE is 16 bytes */
+
+/*
+ * Completion Queue Context Table Entry. Contains the state of a CQ.
+ * Located in RAM within the CEV block.
+ */
+struct BE_CQ_CONTEXT_AMAP {
+	u8 Cidx[11];	/* DWORD 0 */
+	u8 Watermark[4];	/* DWORD 0 */
+	u8 NoDelay;		/* DWORD 0 */
+	u8 EPIdx[11];	/* DWORD 0 */
+	u8 Count[2];	/* DWORD 0 */
+	u8 valid;		/* DWORD 0 */
+	u8 SolEvent;	/* DWORD 0 */
+	u8 Eventable;	/* DWORD 0 */
+	u8 Pidx[11];	/* DWORD 1 */
+	u8 PD[10];		/* DWORD 1 */
+	u8 EQID[7];		/* DWORD 1 */
+	u8 Func;		/* DWORD 1 */
+	u8 WME;		/* DWORD 1 */
+	u8 Stalled;		/* DWORD 1 */
+	u8 Armed;		/* DWORD 1 */
+} __packed;
+struct CQ_CONTEXT_AMAP {
+	u32 dw[2];
+};
+
+/*
+ * Event Queue Context Table Entry. Contains the state of an EQ.
+ * Located in RAM in the CEV block.
+ */
+struct BE_EQ_CONTEXT_AMAP {
+	u8 Cidx[13];	/* DWORD 0 */
+	u8 rsvd0[2];	/* DWORD 0 */
+	u8 Func;		/* DWORD 0 */
+	u8 EPIdx[13];	/* DWORD 0 */
+	u8 valid;		/* DWORD 0 */
+	u8 rsvd1;		/* DWORD 0 */
+	u8 Size;		/* DWORD 0 */
+	u8 Pidx[13];	/* DWORD 1 */
+	u8 rsvd2[3];	/* DWORD 1 */
+	u8 PD[10];		/* DWORD 1 */
+	u8 Count[3];	/* DWORD 1 */
+	u8 SolEvent;	/* DWORD 1 */
+	u8 Stalled;		/* DWORD 1 */
+	u8 Armed;		/* DWORD 1 */
+	u8 Watermark[4];	/* DWORD 2 */
+	u8 WME;		/* DWORD 2 */
+	u8 rsvd3[3];	/* DWORD 2 */
+	u8 EventVect[6];	/* DWORD 2 */
+	u8 rsvd4[2];	/* DWORD 2 */
+	u8 Delay[8];	/* DWORD 2 */
+	u8 rsvd5[6];	/* DWORD 2 */
+	u8 TMR;		/* DWORD 2 */
+	u8 rsvd6;		/* DWORD 2 */
+	u8 rsvd7[32];	/* DWORD 3 */
+} __packed;
+struct EQ_CONTEXT_AMAP {
+	u32 dw[4];
+};
+
+#endif /* __be_cm_amap_h__ */

+ 53 - 0
drivers/staging/benet/be_common.h

@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __be_common_amap_h__
+#define __be_common_amap_h__
+
+/* Physical Address. */
+struct BE_PHYS_ADDR_AMAP {
+	u8 lo[32];		/* DWORD 0 */
+	u8 hi[32];		/* DWORD 1 */
+} __packed;
+struct PHYS_ADDR_AMAP {
+	u32 dw[2];
+};
+
+/* Virtual Address. */
+struct BE_VIRT_ADDR_AMAP {
+	u8 lo[32];		/* DWORD 0 */
+	u8 hi[32];		/* DWORD 1 */
+} __packed;
+struct VIRT_ADDR_AMAP {
+	u32 dw[2];
+};
+
+/* Scatter gather element. */
+struct BE_SGE_AMAP {
+	u8 addr_hi[32];	/* DWORD 0 */
+	u8 addr_lo[32];	/* DWORD 1 */
+	u8 rsvd0[32];	/* DWORD 2 */
+	u8 len[16];		/* DWORD 3 */
+	u8 rsvd1[16];	/* DWORD 3 */
+} __packed;
+struct SGE_AMAP {
+	u32 dw[4];
+};
+
+#endif /* __be_common_amap_h__ */

+ 348 - 0
drivers/staging/benet/be_ethtool.c

@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * be_ethtool.c
+ *
+ * 	This file contains various functions that ethtool can use
+ * 	to talk to the driver and the BE H/W.
+ */
+
+#include "benet.h"
+
+#include <linux/ethtool.h>
+
+static const char benet_gstrings_stats[][ETH_GSTRING_LEN] = {
+/* net_device_stats */
+	"rx_packets",
+	"tx_packets",
+	"rx_bytes",
+	"tx_bytes",
+	"rx_errors",
+	"tx_errors",
+	"rx_dropped",
+	"tx_dropped",
+	"multicast",
+	"collisions",
+	"rx_length_errors",
+	"rx_over_errors",
+	"rx_crc_errors",
+	"rx_frame_errors",
+	"rx_fifo_errors",
+	"rx_missed_errors",
+	"tx_aborted_errors",
+	"tx_carrier_errors",
+	"tx_fifo_errors",
+	"tx_heartbeat_errors",
+	"tx_window_errors",
+	"rx_compressed",
+	"tc_compressed",
+/* BE driver Stats */
+	"bes_tx_reqs",
+	"bes_tx_fails",
+	"bes_fwd_reqs",
+	"bes_tx_wrbs",
+	"bes_interrupts",
+	"bes_events",
+	"bes_tx_events",
+	"bes_rx_events",
+	"bes_tx_compl",
+	"bes_rx_compl",
+	"bes_ethrx_post_fail",
+	"bes_802_3_dropped_frames",
+	"bes_802_3_malformed_frames",
+	"bes_rx_misc_pkts",
+	"bes_eth_tx_rate",
+	"bes_eth_rx_rate",
+	"Num Packets collected",
+	"Num Times Flushed",
+};
+
+#define NET_DEV_STATS_LEN \
+	(sizeof(struct net_device_stats)/sizeof(unsigned long))
+
+#define BENET_STATS_LEN  ARRAY_SIZE(benet_gstrings_stats)
+
+static void
+be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+
+	strncpy(drvinfo->driver, be_driver_name, 32);
+	strncpy(drvinfo->version, be_drvr_ver, 32);
+	strncpy(drvinfo->fw_version, be_fw_ver, 32);
+	strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+	drvinfo->testinfo_len = 0;
+	drvinfo->regdump_len = 0;
+	drvinfo->eedump_len = 0;
+}
+
+static int
+be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+
+	coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
+
+	coalesce->rx_coalesce_usecs = adapter->cur_eqd;
+	coalesce->rx_coalesce_usecs_high = adapter->max_eqd;
+	coalesce->rx_coalesce_usecs_low = adapter->min_eqd;
+
+	coalesce->tx_coalesce_usecs = adapter->cur_eqd;
+	coalesce->tx_coalesce_usecs_high = adapter->max_eqd;
+	coalesce->tx_coalesce_usecs_low = adapter->min_eqd;
+
+	coalesce->use_adaptive_rx_coalesce = adapter->enable_aic;
+	coalesce->use_adaptive_tx_coalesce = adapter->enable_aic;
+
+	return 0;
+}
+
+/*
+ * This routine is used to set interrup coalescing delay *as well as*
+ * the number of pkts to coalesce for LRO.
+ */
+static int
+be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+	struct be_eq_object *eq_objectp;
+	u32 max, min, cur;
+	int status;
+
+	adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
+	if (adapter->max_rx_coal >= BE_LRO_MAX_PKTS)
+		adapter->max_rx_coal = BE_LRO_MAX_PKTS;
+
+	if (adapter->enable_aic == 0 &&
+		coalesce->use_adaptive_rx_coalesce == 1) {
+		/* if AIC is being turned on now, start with an EQD of 0 */
+		adapter->cur_eqd = 0;
+	}
+	adapter->enable_aic = coalesce->use_adaptive_rx_coalesce;
+
+	/* round off to nearest multiple of 8 */
+	max = (((coalesce->rx_coalesce_usecs_high + 4) >> 3) << 3);
+	min = (((coalesce->rx_coalesce_usecs_low + 4) >> 3) << 3);
+	cur = (((coalesce->rx_coalesce_usecs + 4) >> 3) << 3);
+
+	if (adapter->enable_aic) {
+		/* accept low and high if AIC is enabled */
+		if (max > MAX_EQD)
+			max = MAX_EQD;
+		if (min > max)
+			min = max;
+		adapter->max_eqd = max;
+		adapter->min_eqd = min;
+		if (adapter->cur_eqd > max)
+			adapter->cur_eqd = max;
+		if (adapter->cur_eqd < min)
+			adapter->cur_eqd = min;
+	} else {
+		/* accept specified coalesce_usecs only if AIC is disabled */
+		if (cur > MAX_EQD)
+			cur = MAX_EQD;
+		eq_objectp = &pnob->event_q_obj;
+		status =
+		    be_eq_modify_delay(&pnob->fn_obj, 1, &eq_objectp, &cur,
+				       NULL, NULL, NULL);
+		if (status == BE_SUCCESS)
+			adapter->cur_eqd = cur;
+	}
+	return 0;
+}
+
+static u32 be_get_rx_csum(struct net_device *netdev)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+	return adapter->rx_csum;
+}
+
+static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+
+	if (data)
+		adapter->rx_csum = 1;
+	else
+		adapter->rx_csum = 0;
+
+	return 0;
+}
+
+static void
+be_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(data, *benet_gstrings_stats,
+		       sizeof(benet_gstrings_stats));
+		break;
+	}
+}
+
+static int be_get_stats_count(struct net_device *netdev)
+{
+	return BENET_STATS_LEN;
+}
+
+static void
+be_get_ethtool_stats(struct net_device *netdev,
+		     struct ethtool_stats *stats, uint64_t *data)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+	int i;
+
+	benet_get_stats(netdev);
+
+	for (i = 0; i <= NET_DEV_STATS_LEN; i++)
+		data[i] = ((unsigned long *)&adapter->benet_stats)[i];
+
+	data[i] = adapter->be_stat.bes_tx_reqs;
+	data[i++] = adapter->be_stat.bes_tx_fails;
+	data[i++] = adapter->be_stat.bes_fwd_reqs;
+	data[i++] = adapter->be_stat.bes_tx_wrbs;
+
+	data[i++] = adapter->be_stat.bes_ints;
+	data[i++] = adapter->be_stat.bes_events;
+	data[i++] = adapter->be_stat.bes_tx_events;
+	data[i++] = adapter->be_stat.bes_rx_events;
+	data[i++] = adapter->be_stat.bes_tx_compl;
+	data[i++] = adapter->be_stat.bes_rx_compl;
+	data[i++] = adapter->be_stat.bes_ethrx_post_fail;
+	data[i++] = adapter->be_stat.bes_802_3_dropped_frames;
+	data[i++] = adapter->be_stat.bes_802_3_malformed_frames;
+	data[i++] = adapter->be_stat.bes_rx_misc_pkts;
+	data[i++] = adapter->be_stat.bes_eth_tx_rate;
+	data[i++] = adapter->be_stat.bes_eth_rx_rate;
+	data[i++] = adapter->be_stat.bes_rx_coal;
+	data[i++] = adapter->be_stat.bes_rx_flush;
+
+}
+
+static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+	ecmd->speed = SPEED_10000;
+	ecmd->duplex = DUPLEX_FULL;
+	ecmd->autoneg = AUTONEG_DISABLE;
+	return 0;
+}
+
+/* Get the Ring parameters from the pnob */
+static void
+be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+
+	/* Pre Set Maxims */
+	ring->rx_max_pending = pnob->rx_q_len;
+	ring->rx_mini_max_pending = ring->rx_mini_max_pending;
+	ring->rx_jumbo_max_pending = ring->rx_jumbo_max_pending;
+	ring->tx_max_pending = pnob->tx_q_len;
+
+	/* Current hardware Settings                */
+	ring->rx_pending = atomic_read(&pnob->rx_q_posted);
+	ring->rx_mini_pending = ring->rx_mini_pending;
+	ring->rx_jumbo_pending = ring->rx_jumbo_pending;
+	ring->tx_pending = atomic_read(&pnob->tx_q_used);
+
+}
+
+static void
+be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	bool rxfc, txfc;
+	int status;
+
+	status = be_eth_get_flow_control(&pnob->fn_obj, &txfc, &rxfc);
+	if (status != BE_SUCCESS) {
+		dev_info(&netdev->dev, "Unable to get pause frame settings\n");
+		/* return defaults */
+		ecmd->rx_pause = 1;
+		ecmd->tx_pause = 0;
+		ecmd->autoneg = AUTONEG_ENABLE;
+		return;
+	}
+
+	if (txfc == true)
+		ecmd->tx_pause = 1;
+	else
+		ecmd->tx_pause = 0;
+
+	if (rxfc == true)
+		ecmd->rx_pause = 1;
+	else
+		ecmd->rx_pause = 0;
+
+	ecmd->autoneg = AUTONEG_ENABLE;
+}
+
+static int
+be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	bool txfc, rxfc;
+	int status;
+
+	if (ecmd->autoneg != AUTONEG_ENABLE)
+		return -EINVAL;
+
+	if (ecmd->tx_pause)
+		txfc = true;
+	else
+		txfc = false;
+
+	if (ecmd->rx_pause)
+		rxfc = true;
+	else
+		rxfc = false;
+
+	status = be_eth_set_flow_control(&pnob->fn_obj, txfc, rxfc);
+	if (status != BE_SUCCESS) {
+		dev_info(&netdev->dev, "Unable to set pause frame settings\n");
+		return -1;
+	}
+	return 0;
+}
+
+struct ethtool_ops be_ethtool_ops = {
+	.get_settings = be_get_settings,
+	.get_drvinfo = be_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_coalesce = be_get_coalesce,
+	.set_coalesce = be_set_coalesce,
+	.get_ringparam = be_get_ringparam,
+	.get_pauseparam = be_get_pauseparam,
+	.set_pauseparam = be_set_pauseparam,
+	.get_rx_csum = be_get_rx_csum,
+	.set_rx_csum = be_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = ethtool_op_set_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = ethtool_op_set_tso,
+	.get_strings = be_get_strings,
+	.get_stats_count = be_get_stats_count,
+	.get_ethtool_stats = be_get_ethtool_stats,
+};

+ 1382 - 0
drivers/staging/benet/be_init.c

@@ -0,0 +1,1382 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include <linux/etherdevice.h>
+#include "benet.h"
+
+#define  DRVR_VERSION  "1.0.728"
+
+static const struct pci_device_id be_device_id_table[] = {
+	{PCI_DEVICE(0x19a2, 0x0201)},
+	{0}
+};
+
+MODULE_DEVICE_TABLE(pci, be_device_id_table);
+
+MODULE_VERSION(DRVR_VERSION);
+
+#define DRV_DESCRIPTION "ServerEngines BladeEngine Network Driver Version "
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION DRVR_VERSION);
+MODULE_AUTHOR("ServerEngines");
+MODULE_LICENSE("GPL");
+
+static unsigned int msix = 1;
+module_param(msix, uint, S_IRUGO);
+MODULE_PARM_DESC(msix, "Use MSI-x interrupts");
+
+static unsigned int rxbuf_size = 2048;	/* Default RX frag size */
+module_param(rxbuf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(rxbuf_size, "Size of buffers to hold Rx data");
+
+const char be_drvr_ver[] = DRVR_VERSION;
+char be_fw_ver[32];		/* F/W version filled in by be_probe */
+char be_driver_name[] = "benet";
+
+/*
+ * Number of entries in each queue.
+ */
+#define EVENT_Q_LEN		1024
+#define ETH_TXQ_LEN		2048
+#define ETH_TXCQ_LEN		1024
+#define ETH_RXQ_LEN		1024	/* Does not support any other value */
+#define ETH_UC_RXCQ_LEN		1024
+#define ETH_BC_RXCQ_LEN		256
+#define MCC_Q_LEN               64	/* total size not to exceed 8 pages */
+#define MCC_CQ_LEN              256
+
+/* Bit mask describing events of interest to be traced */
+unsigned int trace_level;
+
+static int
+init_pci_be_function(struct be_adapter *adapter, struct pci_dev *pdev)
+{
+	u64 pa;
+
+	/* CSR */
+	pa = pci_resource_start(pdev, 2);
+	adapter->csr_va = ioremap_nocache(pa, pci_resource_len(pdev, 2));
+	if (adapter->csr_va == NULL)
+		return -ENOMEM;
+
+	/* Door Bell */
+	pa = pci_resource_start(pdev, 4);
+	adapter->db_va = ioremap_nocache(pa, (128 * 1024));
+	if (adapter->db_va == NULL) {
+		iounmap(adapter->csr_va);
+		return -ENOMEM;
+	}
+
+	/* PCI */
+	pa = pci_resource_start(pdev, 1);
+	adapter->pci_va = ioremap_nocache(pa, pci_resource_len(pdev, 1));
+	if (adapter->pci_va == NULL) {
+		iounmap(adapter->csr_va);
+		iounmap(adapter->db_va);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/*
+   This function enables the interrupt corresponding to the Event
+   queue ID for the given NetObject
+*/
+void be_enable_eq_intr(struct be_net_object *pnob)
+{
+	struct CQ_DB_AMAP cqdb;
+	cqdb.dw[0] = 0;
+	AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
+	AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 1);
+	AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
+	AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
+	PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
+}
+
+/*
+   This function disables the interrupt corresponding to the Event
+   queue ID for the given NetObject
+*/
+void be_disable_eq_intr(struct be_net_object *pnob)
+{
+	struct CQ_DB_AMAP cqdb;
+	cqdb.dw[0] = 0;
+	AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
+	AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 0);
+	AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
+	AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
+	PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
+}
+
+/*
+    This function enables the interrupt from the  network function
+    of the BladeEngine. Use the function be_disable_eq_intr()
+    to enable the interrupt from the event queue of only one specific
+    NetObject
+*/
+void be_enable_intr(struct be_net_object *pnob)
+{
+	struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
+	u32 host_intr;
+
+	ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
+	host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
+							hostintr, ctrl.dw);
+	if (!host_intr) {
+		AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
+			hostintr, ctrl.dw, 1);
+		PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
+			ctrl.dw[0]);
+	}
+}
+
+/*
+   This function disables the interrupt from the network function of
+   the BladeEngine.  Use the function be_disable_eq_intr() to
+   disable the interrupt from the event queue of only one specific NetObject
+*/
+void be_disable_intr(struct be_net_object *pnob)
+{
+
+	struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
+	u32 host_intr;
+	ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
+	host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
+							hostintr, ctrl.dw);
+	if (host_intr) {
+		AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, hostintr,
+			ctrl.dw, 0);
+		PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
+			ctrl.dw[0]);
+	}
+}
+
+static int be_enable_msix(struct be_adapter *adapter)
+{
+	int i, ret;
+
+	if (!msix)
+		return -1;
+
+	for (i = 0; i < BE_MAX_REQ_MSIX_VECTORS; i++)
+		adapter->msix_entries[i].entry = i;
+
+	ret = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+		BE_MAX_REQ_MSIX_VECTORS);
+
+	if (ret == 0)
+		adapter->msix_enabled = 1;
+	return ret;
+}
+
+static int be_register_isr(struct be_adapter *adapter,
+		struct be_net_object *pnob)
+{
+	struct net_device *netdev = pnob->netdev;
+	int intx = 0, r;
+
+	netdev->irq = adapter->pdev->irq;
+	r = be_enable_msix(adapter);
+
+	if (r == 0) {
+		r = request_irq(adapter->msix_entries[0].vector,
+				be_int, IRQF_SHARED, netdev->name, netdev);
+		if (r) {
+			printk(KERN_WARNING
+				"MSIX Request IRQ failed - Errno %d\n", r);
+			intx = 1;
+			pci_disable_msix(adapter->pdev);
+			adapter->msix_enabled = 0;
+		}
+	} else {
+		intx = 1;
+	}
+
+	if (intx) {
+		r = request_irq(netdev->irq, be_int, IRQF_SHARED,
+				netdev->name, netdev);
+		if (r) {
+			printk(KERN_WARNING
+				"INTx Request IRQ failed - Errno %d\n", r);
+			return -1;
+		}
+	}
+	adapter->isr_registered = 1;
+	return 0;
+}
+
+static void be_unregister_isr(struct be_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdevp;
+	if (adapter->isr_registered) {
+		if (adapter->msix_enabled) {
+			free_irq(adapter->msix_entries[0].vector, netdev);
+			pci_disable_msix(adapter->pdev);
+			adapter->msix_enabled = 0;
+		} else {
+			free_irq(netdev->irq, netdev);
+		}
+		adapter->isr_registered = 0;
+	}
+}
+
+/*
+    This function processes the Flush Completions that are issued by the
+    ARM F/W, when a Recv Ring is destroyed.  A flush completion is
+    identified when a Rx COmpl descriptor has the tcpcksum and udpcksum
+    set and the pktsize is 32.  These completions are received on the
+    Rx Completion Queue.
+*/
+static u32 be_process_rx_flush_cmpl(struct be_net_object *pnob)
+{
+	struct ETH_RX_COMPL_AMAP *rxcp;
+	unsigned int i = 0;
+	while ((rxcp = be_get_rx_cmpl(pnob)) != NULL) {
+		be_notify_cmpl(pnob, 1, pnob->rx_cq_id, 1);
+		i++;
+	}
+	return i;
+}
+
+static void be_tx_q_clean(struct be_net_object *pnob)
+{
+	while (atomic_read(&pnob->tx_q_used))
+		process_one_tx_compl(pnob, tx_compl_lastwrb_idx_get(pnob));
+}
+
+static void be_rx_q_clean(struct be_net_object *pnob)
+{
+	if (pnob->rx_ctxt) {
+		int i;
+		struct be_rx_page_info *rx_page_info;
+		for (i = 0; i < pnob->rx_q_len; i++) {
+			rx_page_info = &(pnob->rx_page_info[i]);
+			if (!pnob->rx_pg_shared || rx_page_info->page_offset) {
+				pci_unmap_page(pnob->adapter->pdev,
+				       pci_unmap_addr(rx_page_info, bus),
+					       pnob->rx_buf_size,
+					       PCI_DMA_FROMDEVICE);
+			}
+			if (rx_page_info->page)
+				put_page(rx_page_info->page);
+			memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+		}
+		pnob->rx_pg_info_hd = 0;
+	}
+}
+
+static void be_destroy_netobj(struct be_net_object *pnob)
+{
+	int status;
+
+	if (pnob->tx_q_created) {
+		status = be_eth_sq_destroy(&pnob->tx_q_obj);
+		pnob->tx_q_created = 0;
+	}
+
+	if (pnob->rx_q_created) {
+		status = be_eth_rq_destroy(&pnob->rx_q_obj);
+		if (status != 0) {
+			status = be_eth_rq_destroy_options(&pnob->rx_q_obj, 0,
+						      NULL, NULL);
+			BUG_ON(status);
+		}
+		pnob->rx_q_created = 0;
+	}
+
+	be_process_rx_flush_cmpl(pnob);
+
+	if (pnob->tx_cq_created) {
+		status = be_cq_destroy(&pnob->tx_cq_obj);
+		pnob->tx_cq_created = 0;
+	}
+
+	if (pnob->rx_cq_created) {
+		status = be_cq_destroy(&pnob->rx_cq_obj);
+		pnob->rx_cq_created = 0;
+	}
+
+	if (pnob->mcc_q_created) {
+		status = be_mcc_ring_destroy(&pnob->mcc_q_obj);
+		pnob->mcc_q_created = 0;
+	}
+	if (pnob->mcc_cq_created) {
+		status = be_cq_destroy(&pnob->mcc_cq_obj);
+		pnob->mcc_cq_created = 0;
+	}
+
+	if (pnob->event_q_created) {
+		status = be_eq_destroy(&pnob->event_q_obj);
+		pnob->event_q_created = 0;
+	}
+	be_function_cleanup(&pnob->fn_obj);
+}
+
+/*
+ * free all resources associated with a pnob
+ * Called at the time of module cleanup as well a any error during
+ * module init.  Some resources may be partially allocated in a NetObj.
+ */
+static void netobject_cleanup(struct be_adapter *adapter,
+			struct be_net_object *pnob)
+{
+	struct net_device *netdev = adapter->netdevp;
+
+	if (netif_running(netdev)) {
+		netif_stop_queue(netdev);
+		be_wait_nic_tx_cmplx_cmpl(pnob);
+		be_disable_eq_intr(pnob);
+	}
+
+	be_unregister_isr(adapter);
+
+	if (adapter->tasklet_started) {
+		tasklet_kill(&(adapter->sts_handler));
+		adapter->tasklet_started = 0;
+	}
+	if (pnob->fn_obj_created)
+		be_disable_intr(pnob);
+
+	if (adapter->dev_state != BE_DEV_STATE_NONE)
+		unregister_netdev(netdev);
+
+	if (pnob->fn_obj_created)
+		be_destroy_netobj(pnob);
+
+	adapter->net_obj = NULL;
+	adapter->netdevp = NULL;
+
+	be_rx_q_clean(pnob);
+	if (pnob->rx_ctxt) {
+		kfree(pnob->rx_page_info);
+		kfree(pnob->rx_ctxt);
+	}
+
+	be_tx_q_clean(pnob);
+	kfree(pnob->tx_ctxt);
+
+	if (pnob->mcc_q)
+		pci_free_consistent(adapter->pdev, pnob->mcc_q_size,
+			pnob->mcc_q, pnob->mcc_q_bus);
+
+	if (pnob->mcc_wrb_ctxt)
+		free_pages((unsigned long)pnob->mcc_wrb_ctxt,
+			   get_order(pnob->mcc_wrb_ctxt_size));
+
+	if (pnob->mcc_cq)
+		pci_free_consistent(adapter->pdev, pnob->mcc_cq_size,
+			pnob->mcc_cq, pnob->mcc_cq_bus);
+
+	if (pnob->event_q)
+		pci_free_consistent(adapter->pdev, pnob->event_q_size,
+			pnob->event_q, pnob->event_q_bus);
+
+	if (pnob->tx_cq)
+		pci_free_consistent(adapter->pdev, pnob->tx_cq_size,
+			pnob->tx_cq, pnob->tx_cq_bus);
+
+	if (pnob->tx_q)
+		pci_free_consistent(adapter->pdev, pnob->tx_q_size,
+			pnob->tx_q, pnob->tx_q_bus);
+
+	if (pnob->rx_q)
+		pci_free_consistent(adapter->pdev, pnob->rx_q_size,
+			pnob->rx_q, pnob->rx_q_bus);
+
+	if (pnob->rx_cq)
+		pci_free_consistent(adapter->pdev, pnob->rx_cq_size,
+			pnob->rx_cq, pnob->rx_cq_bus);
+
+
+	if (pnob->mb_ptr)
+		pci_free_consistent(adapter->pdev, pnob->mb_size, pnob->mb_ptr,
+			pnob->mb_bus);
+
+	free_netdev(netdev);
+}
+
+
+static int be_nob_ring_alloc(struct be_adapter *adapter,
+	struct be_net_object *pnob)
+{
+	u32 size;
+
+	/* Mail box rd; mailbox pointer needs to be 16 byte aligned */
+	pnob->mb_size = sizeof(struct MCC_MAILBOX_AMAP) + 16;
+	pnob->mb_ptr = pci_alloc_consistent(adapter->pdev, pnob->mb_size,
+				&pnob->mb_bus);
+	if (!pnob->mb_bus)
+		return -1;
+	memset(pnob->mb_ptr, 0, pnob->mb_size);
+	pnob->mb_rd.va = PTR_ALIGN(pnob->mb_ptr, 16);
+	pnob->mb_rd.pa = PTR_ALIGN(pnob->mb_bus, 16);
+	pnob->mb_rd.length = sizeof(struct MCC_MAILBOX_AMAP);
+	/*
+	 * Event queue
+	 */
+	pnob->event_q_len = EVENT_Q_LEN;
+	pnob->event_q_size = pnob->event_q_len * sizeof(struct EQ_ENTRY_AMAP);
+	pnob->event_q = pci_alloc_consistent(adapter->pdev, pnob->event_q_size,
+				&pnob->event_q_bus);
+	if (!pnob->event_q_bus)
+		return -1;
+	memset(pnob->event_q, 0, pnob->event_q_size);
+	/*
+	 * Eth TX queue
+	 */
+	pnob->tx_q_len = ETH_TXQ_LEN;
+	pnob->tx_q_port = 0;
+	pnob->tx_q_size =  pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP);
+	pnob->tx_q = pci_alloc_consistent(adapter->pdev, pnob->tx_q_size,
+				&pnob->tx_q_bus);
+	if (!pnob->tx_q_bus)
+		return -1;
+	memset(pnob->tx_q, 0, pnob->tx_q_size);
+	/*
+	 * Eth TX Compl queue
+	 */
+	pnob->txcq_len = ETH_TXCQ_LEN;
+	pnob->tx_cq_size = pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP);
+	pnob->tx_cq = pci_alloc_consistent(adapter->pdev, pnob->tx_cq_size,
+				&pnob->tx_cq_bus);
+	if (!pnob->tx_cq_bus)
+		return -1;
+	memset(pnob->tx_cq, 0, pnob->tx_cq_size);
+	/*
+	 * Eth RX queue
+	 */
+	pnob->rx_q_len = ETH_RXQ_LEN;
+	pnob->rx_q_size =  pnob->rx_q_len * sizeof(struct ETH_RX_D_AMAP);
+	pnob->rx_q = pci_alloc_consistent(adapter->pdev, pnob->rx_q_size,
+				&pnob->rx_q_bus);
+	if (!pnob->rx_q_bus)
+		return -1;
+	memset(pnob->rx_q, 0, pnob->rx_q_size);
+	/*
+	 * Eth Unicast RX Compl queue
+	 */
+	pnob->rx_cq_len = ETH_UC_RXCQ_LEN;
+	pnob->rx_cq_size =  pnob->rx_cq_len *
+			sizeof(struct ETH_RX_COMPL_AMAP);
+	pnob->rx_cq = pci_alloc_consistent(adapter->pdev, pnob->rx_cq_size,
+				&pnob->rx_cq_bus);
+	if (!pnob->rx_cq_bus)
+		return -1;
+	memset(pnob->rx_cq, 0, pnob->rx_cq_size);
+
+	/* TX resources */
+	size = pnob->tx_q_len * sizeof(void **);
+	pnob->tx_ctxt = kzalloc(size, GFP_KERNEL);
+	if (pnob->tx_ctxt == NULL)
+		return -1;
+
+	/* RX resources */
+	size = pnob->rx_q_len * sizeof(void *);
+	pnob->rx_ctxt = kzalloc(size, GFP_KERNEL);
+	if (pnob->rx_ctxt == NULL)
+		return -1;
+
+	size = (pnob->rx_q_len * sizeof(struct be_rx_page_info));
+	pnob->rx_page_info = kzalloc(size, GFP_KERNEL);
+	if (pnob->rx_page_info == NULL)
+		return -1;
+
+	adapter->eth_statsp = kzalloc(sizeof(struct FWCMD_ETH_GET_STATISTICS),
+				GFP_KERNEL);
+	if (adapter->eth_statsp == NULL)
+		return -1;
+	pnob->rx_buf_size = rxbuf_size;
+	return 0;
+}
+
+/*
+    This function initializes the be_net_object for subsequent
+    network operations.
+
+    Before calling this function, the driver  must have allocated
+    space for the NetObject structure, initialized the structure,
+    allocated DMAable memory for all the network queues that form
+    part of the NetObject and populated the start address (virtual)
+    and number of entries allocated for each queue in the NetObject structure.
+
+    The driver must also have allocated memory to hold the
+    mailbox structure (MCC_MAILBOX) and post the physical address,
+    virtual addresses and the size of the mailbox memory in the
+    NetObj.mb_rd.  This structure is used by BECLIB for
+    initial communication with the embedded MCC processor. BECLIB
+    uses the mailbox until MCC rings are created for  more  efficient
+    communication with the MCC processor.
+
+    If the driver wants to create multiple network interface for more
+    than one protection domain, it can call be_create_netobj()
+    multiple times  once for each protection domain.  A Maximum of
+    32 protection domains are supported.
+
+*/
+static int
+be_create_netobj(struct be_net_object *pnob, u8 __iomem *csr_va,
+	u8 __iomem *db_va, u8 __iomem *pci_va)
+{
+	int status = 0;
+	bool  eventable = false, tx_no_delay = false, rx_no_delay = false;
+	struct be_eq_object *eq_objectp = NULL;
+	struct be_function_object *pfob = &pnob->fn_obj;
+	struct ring_desc rd;
+	u32 set_rxbuf_size;
+	u32 tx_cmpl_wm = CEV_WMARK_96;	/* 0xffffffff to disable */
+	u32 rx_cmpl_wm = CEV_WMARK_160;	/* 0xffffffff to disable */
+	u32 eq_delay = 0; /* delay in 8usec units. 0xffffffff to disable */
+
+	memset(&rd, 0, sizeof(struct ring_desc));
+
+	status = be_function_object_create(csr_va, db_va, pci_va,
+			BE_FUNCTION_TYPE_NETWORK, &pnob->mb_rd, pfob);
+	if (status != BE_SUCCESS)
+		return status;
+	pnob->fn_obj_created = true;
+
+	if (tx_cmpl_wm == 0xffffffff)
+		tx_no_delay = true;
+	if (rx_cmpl_wm == 0xffffffff)
+		rx_no_delay = true;
+	/*
+	 * now create the necessary rings
+	 * Event Queue first.
+	 */
+	if (pnob->event_q_len) {
+		rd.va = pnob->event_q;
+		rd.pa = pnob->event_q_bus;
+		rd.length = pnob->event_q_size;
+
+		status = be_eq_create(pfob, &rd, 4, pnob->event_q_len,
+				(u32) -1,	/* CEV_WMARK_* or -1 */
+				eq_delay,	/* in 8us units, or -1 */
+				&pnob->event_q_obj);
+		if (status != BE_SUCCESS)
+			goto error_ret;
+		pnob->event_q_id = pnob->event_q_obj.eq_id;
+		pnob->event_q_created = 1;
+		eventable = true;
+		eq_objectp = &pnob->event_q_obj;
+	}
+	/*
+	 * Now Eth Tx Compl. queue.
+	 */
+	if (pnob->txcq_len) {
+		rd.va = pnob->tx_cq;
+		rd.pa = pnob->tx_cq_bus;
+		rd.length = pnob->tx_cq_size;
+
+		status = be_cq_create(pfob, &rd,
+			pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP),
+			false,	/* solicted events,  */
+			tx_no_delay,	/* nodelay  */
+			tx_cmpl_wm,	/* Watermark encodings */
+			eq_objectp, &pnob->tx_cq_obj);
+		if (status != BE_SUCCESS)
+			goto error_ret;
+
+		pnob->tx_cq_id = pnob->tx_cq_obj.cq_id;
+		pnob->tx_cq_created = 1;
+	}
+	/*
+	 * Eth Tx queue
+	 */
+	if (pnob->tx_q_len) {
+		struct be_eth_sq_parameters ex_params = { 0 };
+		u32 type;
+
+		if (pnob->tx_q_port) {
+			/* TXQ to be bound to a specific port */
+			type = BE_ETH_TX_RING_TYPE_BOUND;
+			ex_params.port = pnob->tx_q_port - 1;
+		} else
+			type = BE_ETH_TX_RING_TYPE_STANDARD;
+
+		rd.va = pnob->tx_q;
+		rd.pa = pnob->tx_q_bus;
+		rd.length = pnob->tx_q_size;
+
+		status = be_eth_sq_create_ex(pfob, &rd,
+				pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP),
+				type, 2, &pnob->tx_cq_obj,
+				&ex_params, &pnob->tx_q_obj);
+
+		if (status != BE_SUCCESS)
+			goto error_ret;
+
+		pnob->tx_q_id = pnob->tx_q_obj.bid;
+		pnob->tx_q_created = 1;
+	}
+	/*
+	 * Now Eth Rx compl. queue.  Always needed.
+	 */
+	rd.va = pnob->rx_cq;
+	rd.pa = pnob->rx_cq_bus;
+	rd.length = pnob->rx_cq_size;
+
+	status = be_cq_create(pfob, &rd,
+			pnob->rx_cq_len * sizeof(struct ETH_RX_COMPL_AMAP),
+			false,	/* solicted events,  */
+			rx_no_delay,	/* nodelay  */
+			rx_cmpl_wm,	/* Watermark encodings */
+			eq_objectp, &pnob->rx_cq_obj);
+	if (status != BE_SUCCESS)
+		goto error_ret;
+
+	pnob->rx_cq_id = pnob->rx_cq_obj.cq_id;
+	pnob->rx_cq_created = 1;
+
+	status = be_eth_rq_set_frag_size(pfob, pnob->rx_buf_size,
+			(u32 *) &set_rxbuf_size);
+	if (status != BE_SUCCESS) {
+		be_eth_rq_get_frag_size(pfob, (u32 *) &pnob->rx_buf_size);
+		if ((pnob->rx_buf_size != 2048) && (pnob->rx_buf_size != 4096)
+		    && (pnob->rx_buf_size != 8192))
+			goto error_ret;
+	} else {
+		if (pnob->rx_buf_size != set_rxbuf_size)
+			pnob->rx_buf_size = set_rxbuf_size;
+	}
+	/*
+	 * Eth RX queue. be_eth_rq_create() always assumes 2 pages size
+	 */
+	rd.va = pnob->rx_q;
+	rd.pa = pnob->rx_q_bus;
+	rd.length = pnob->rx_q_size;
+
+	status = be_eth_rq_create(pfob, &rd, &pnob->rx_cq_obj,
+			     &pnob->rx_cq_obj, &pnob->rx_q_obj);
+
+	if (status != BE_SUCCESS)
+		goto error_ret;
+
+	pnob->rx_q_id = pnob->rx_q_obj.rid;
+	pnob->rx_q_created = 1;
+
+	return BE_SUCCESS;	/* All required queues created. */
+
+error_ret:
+	be_destroy_netobj(pnob);
+	return status;
+}
+
+static int be_nob_ring_init(struct be_adapter *adapter,
+				struct be_net_object *pnob)
+{
+	int status;
+
+	pnob->event_q_tl = 0;
+
+	pnob->tx_q_hd = 0;
+	pnob->tx_q_tl = 0;
+
+	pnob->tx_cq_tl = 0;
+
+	pnob->rx_cq_tl = 0;
+
+	memset(pnob->event_q, 0, pnob->event_q_size);
+	memset(pnob->tx_cq, 0, pnob->tx_cq_size);
+	memset(pnob->tx_ctxt, 0, pnob->tx_q_len * sizeof(void **));
+	memset(pnob->rx_ctxt, 0, pnob->rx_q_len * sizeof(void *));
+	pnob->rx_pg_info_hd = 0;
+	pnob->rx_q_hd = 0;
+	atomic_set(&pnob->rx_q_posted, 0);
+
+	status = be_create_netobj(pnob, adapter->csr_va, adapter->db_va,
+				adapter->pci_va);
+	if (status != BE_SUCCESS)
+		return -1;
+
+	be_post_eth_rx_buffs(pnob);
+	return 0;
+}
+
+/* This function handles async callback for link status */
+static void
+be_link_status_async_callback(void *context, u32 event_code, void *event)
+{
+	struct ASYNC_EVENT_LINK_STATE_AMAP *link_status = event;
+	struct be_adapter *adapter = context;
+	bool link_enable = false;
+	struct be_net_object *pnob;
+	struct ASYNC_EVENT_TRAILER_AMAP *async_trailer;
+	struct net_device *netdev;
+	u32 async_event_code, async_event_type, active_port;
+	u32 port0_link_status, port1_link_status, port0_duplex, port1_duplex;
+	u32 port0_speed, port1_speed;
+
+	if (event_code != ASYNC_EVENT_CODE_LINK_STATE) {
+		/* Not our event to handle */
+		return;
+	}
+	async_trailer = (struct ASYNC_EVENT_TRAILER_AMAP *)
+	    ((u8 *) event + sizeof(struct MCC_CQ_ENTRY_AMAP) -
+	     sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
+
+	async_event_code = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_code,
+					     async_trailer);
+	BUG_ON(async_event_code != ASYNC_EVENT_CODE_LINK_STATE);
+
+	pnob = adapter->net_obj;
+	netdev = pnob->netdev;
+
+	/* Determine if this event is a switch VLD or a physical link event */
+	async_event_type = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_type,
+					     async_trailer);
+	active_port = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+					active_port, link_status);
+	port0_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+					      port0_link_status, link_status);
+	port1_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+					      port1_link_status, link_status);
+	port0_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+					 port0_duplex, link_status);
+	port1_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+					 port1_duplex, link_status);
+	port0_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+					port0_speed, link_status);
+	port1_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+					port1_speed, link_status);
+	if (async_event_type == NTWK_LINK_TYPE_VIRTUAL) {
+		adapter->be_stat.bes_link_change_virtual++;
+		if (adapter->be_link_sts->active_port != active_port) {
+			dev_notice(&netdev->dev,
+			       "Active port changed due to VLD on switch\n");
+		} else {
+			dev_notice(&netdev->dev, "Link status update\n");
+		}
+
+	} else {
+		adapter->be_stat.bes_link_change_physical++;
+		if (adapter->be_link_sts->active_port != active_port) {
+			dev_notice(&netdev->dev,
+			       "Active port changed due to port link"
+			       " status change\n");
+		} else {
+			dev_notice(&netdev->dev, "Link status update\n");
+		}
+	}
+
+	memset(adapter->be_link_sts, 0, sizeof(adapter->be_link_sts));
+
+	if ((port0_link_status == ASYNC_EVENT_LINK_UP) ||
+	    (port1_link_status == ASYNC_EVENT_LINK_UP)) {
+		if ((adapter->port0_link_sts == BE_PORT_LINK_DOWN) &&
+		    (adapter->port1_link_sts == BE_PORT_LINK_DOWN)) {
+			/* Earlier both the ports are down So link is up */
+			link_enable = true;
+		}
+
+		if (port0_link_status == ASYNC_EVENT_LINK_UP) {
+			adapter->port0_link_sts = BE_PORT_LINK_UP;
+			adapter->be_link_sts->mac0_duplex = port0_duplex;
+			adapter->be_link_sts->mac0_speed = port0_speed;
+			if (active_port == NTWK_PORT_A)
+				adapter->be_link_sts->active_port = 0;
+		} else
+			adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+
+		if (port1_link_status == ASYNC_EVENT_LINK_UP) {
+			adapter->port1_link_sts = BE_PORT_LINK_UP;
+			adapter->be_link_sts->mac1_duplex = port1_duplex;
+			adapter->be_link_sts->mac1_speed = port1_speed;
+			if (active_port == NTWK_PORT_B)
+				adapter->be_link_sts->active_port = 1;
+		} else
+			adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+
+		printk(KERN_INFO "Link Properties for %s:\n", netdev->name);
+		dev_info(&netdev->dev, "Link Properties:\n");
+		be_print_link_info(adapter->be_link_sts);
+
+		if (!link_enable)
+			return;
+		/*
+		 * Both ports were down previously, but atleast one of
+		 * them has come up if this netdevice's carrier is not up,
+		 * then indicate to stack
+		 */
+		if (!netif_carrier_ok(netdev)) {
+			netif_start_queue(netdev);
+			netif_carrier_on(netdev);
+		}
+		return;
+	}
+
+	/* Now both the ports are down. Tell the stack about it */
+	dev_info(&netdev->dev, "Both ports are down\n");
+	adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+	adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+	if (netif_carrier_ok(netdev)) {
+		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+	}
+	return;
+}
+
+static int be_mcc_create(struct be_adapter *adapter)
+{
+	struct be_net_object *pnob;
+
+	pnob = adapter->net_obj;
+	/*
+	 * Create the MCC ring so that all further communication with
+	 * MCC can go thru the ring. we do this at the end since
+	 * we do not want to be dealing with interrupts until the
+	 * initialization is complete.
+	 */
+	pnob->mcc_q_len = MCC_Q_LEN;
+	pnob->mcc_q_size = pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP);
+	pnob->mcc_q =  pci_alloc_consistent(adapter->pdev, pnob->mcc_q_size,
+				&pnob->mcc_q_bus);
+	if (!pnob->mcc_q_bus)
+		return -1;
+	/*
+	 * space for MCC WRB context
+	 */
+	pnob->mcc_wrb_ctxtLen = MCC_Q_LEN;
+	pnob->mcc_wrb_ctxt_size =  pnob->mcc_wrb_ctxtLen *
+		sizeof(struct be_mcc_wrb_context);
+	pnob->mcc_wrb_ctxt = (void *)__get_free_pages(GFP_KERNEL,
+		get_order(pnob->mcc_wrb_ctxt_size));
+	if (pnob->mcc_wrb_ctxt == NULL)
+		return -1;
+	/*
+	 * Space for MCC compl. ring
+	 */
+	pnob->mcc_cq_len = MCC_CQ_LEN;
+	pnob->mcc_cq_size = pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP);
+	pnob->mcc_cq = pci_alloc_consistent(adapter->pdev, pnob->mcc_cq_size,
+				&pnob->mcc_cq_bus);
+	if (!pnob->mcc_cq_bus)
+		return -1;
+	return 0;
+}
+
+/*
+    This function creates the MCC request and completion ring required
+    for communicating with the ARM processor.  The caller must have
+    allocated required amount of memory for the MCC ring and MCC
+    completion ring and posted the virtual address and number of
+    entries in the corresponding members (mcc_q and mcc_cq) in the
+    NetObject struture.
+
+    When this call is completed, all further communication with
+    ARM will switch from mailbox to this ring.
+
+    pnob	- Pointer to the NetObject structure. This NetObject should
+		  have been created using a previous call to be_create_netobj()
+*/
+int be_create_mcc_rings(struct be_net_object *pnob)
+{
+	int status = 0;
+	struct ring_desc rd;
+	struct be_function_object *pfob = &pnob->fn_obj;
+
+	memset(&rd, 0, sizeof(struct ring_desc));
+	if (pnob->mcc_cq_len) {
+		rd.va = pnob->mcc_cq;
+		rd.pa = pnob->mcc_cq_bus;
+		rd.length = pnob->mcc_cq_size;
+
+		status = be_cq_create(pfob, &rd,
+			pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP),
+			false,	/* solicted events,  */
+			true,	/* nodelay  */
+			0,	/* 0 Watermark since Nodelay is true */
+			&pnob->event_q_obj,
+			&pnob->mcc_cq_obj);
+
+		if (status != BE_SUCCESS)
+			return status;
+
+		pnob->mcc_cq_id = pnob->mcc_cq_obj.cq_id;
+		pnob->mcc_cq_created = 1;
+	}
+	if (pnob->mcc_q_len) {
+		rd.va = pnob->mcc_q;
+		rd.pa = pnob->mcc_q_bus;
+		rd.length = pnob->mcc_q_size;
+
+		status = be_mcc_ring_create(pfob, &rd,
+				pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP),
+				pnob->mcc_wrb_ctxt, pnob->mcc_wrb_ctxtLen,
+				&pnob->mcc_cq_obj, &pnob->mcc_q_obj);
+
+		if (status != BE_SUCCESS)
+			return status;
+
+		pnob->mcc_q_created = 1;
+	}
+	return BE_SUCCESS;
+}
+
+static int be_mcc_init(struct be_adapter *adapter)
+{
+	u32 r;
+	struct be_net_object *pnob;
+
+	pnob = adapter->net_obj;
+	memset(pnob->mcc_q, 0, pnob->mcc_q_size);
+	pnob->mcc_q_hd = 0;
+
+	memset(pnob->mcc_wrb_ctxt, 0, pnob->mcc_wrb_ctxt_size);
+
+	memset(pnob->mcc_cq, 0, pnob->mcc_cq_size);
+	pnob->mcc_cq_tl = 0;
+
+	r = be_create_mcc_rings(adapter->net_obj);
+	if (r != BE_SUCCESS)
+		return -1;
+
+	return 0;
+}
+
+static void be_remove(struct pci_dev *pdev)
+{
+	struct be_net_object *pnob;
+	struct be_adapter *adapter;
+
+	adapter = pci_get_drvdata(pdev);
+	if (!adapter)
+		return;
+
+	pci_set_drvdata(pdev, NULL);
+	pnob = (struct be_net_object *)adapter->net_obj;
+
+	flush_scheduled_work();
+
+	if (pnob) {
+		/* Unregister async callback function for link status updates */
+		if (pnob->mcc_q_created)
+			be_mcc_add_async_event_callback(&pnob->mcc_q_obj,
+								NULL, NULL);
+		netobject_cleanup(adapter, pnob);
+	}
+
+	if (adapter->csr_va)
+		iounmap(adapter->csr_va);
+	if (adapter->db_va)
+		iounmap(adapter->db_va);
+	if (adapter->pci_va)
+		iounmap(adapter->pci_va);
+
+	pci_release_regions(adapter->pdev);
+	pci_disable_device(adapter->pdev);
+
+	kfree(adapter->be_link_sts);
+	kfree(adapter->eth_statsp);
+
+	if (adapter->timer_ctxt.get_stats_timer.function)
+		del_timer_sync(&adapter->timer_ctxt.get_stats_timer);
+	kfree(adapter);
+}
+
+/*
+ * This function is called by the PCI sub-system when it finds a PCI
+ * device with dev/vendor IDs that match with one of our devices.
+ * All of the driver initialization is done in this function.
+ */
+static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
+{
+	int status = 0;
+	struct be_adapter *adapter;
+	struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD get_fwv;
+	struct be_net_object *pnob;
+	struct net_device *netdev;
+
+	status = pci_enable_device(pdev);
+	if (status)
+		goto error;
+
+	status = pci_request_regions(pdev, be_driver_name);
+	if (status)
+		goto error_pci_req;
+
+	pci_set_master(pdev);
+	adapter = kzalloc(sizeof(struct be_adapter), GFP_KERNEL);
+	if (adapter == NULL) {
+		status = -ENOMEM;
+		goto error_adapter;
+	}
+	adapter->dev_state = BE_DEV_STATE_NONE;
+	adapter->pdev = pdev;
+	pci_set_drvdata(pdev, adapter);
+
+	adapter->enable_aic = 1;
+	adapter->max_eqd = MAX_EQD;
+	adapter->min_eqd = 0;
+	adapter->cur_eqd = 0;
+
+	status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+	if (!status) {
+		adapter->dma_64bit_cap = true;
+	} else {
+		adapter->dma_64bit_cap = false;
+		status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (status != 0) {
+			printk(KERN_ERR "Could not set PCI DMA Mask\n");
+			goto cleanup;
+		}
+	}
+
+	status = init_pci_be_function(adapter, pdev);
+	if (status != 0) {
+		printk(KERN_ERR "Failed to map PCI BARS\n");
+		status = -ENOMEM;
+		goto cleanup;
+	}
+
+	be_trace_set_level(DL_ALWAYS | DL_ERR);
+
+	adapter->be_link_sts = kmalloc(sizeof(struct BE_LINK_STATUS),
+					GFP_KERNEL);
+	if (adapter->be_link_sts == NULL) {
+		printk(KERN_ERR "Memory allocation for link status "
+		       "buffer failed\n");
+		goto cleanup;
+	}
+	spin_lock_init(&adapter->txq_lock);
+
+	netdev = alloc_etherdev(sizeof(struct be_net_object));
+	if (netdev == NULL) {
+		status = -ENOMEM;
+		goto cleanup;
+	}
+	pnob = netdev_priv(netdev);
+	adapter->net_obj = pnob;
+	adapter->netdevp = netdev;
+	pnob->adapter = adapter;
+	pnob->netdev = netdev;
+
+	status = be_nob_ring_alloc(adapter, pnob);
+	if (status != 0)
+		goto cleanup;
+
+	status = be_nob_ring_init(adapter, pnob);
+	if (status != 0)
+		goto cleanup;
+
+	be_rxf_mac_address_read_write(&pnob->fn_obj, false, false, false,
+		false, false, netdev->dev_addr, NULL, NULL);
+
+	netdev->init = &benet_init;
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	SET_NETDEV_DEV(netdev, &(adapter->pdev->dev));
+
+	netif_napi_add(netdev, &pnob->napi, be_poll, 64);
+
+	/* if the rx_frag size if 2K, one page is shared as two RX frags */
+	pnob->rx_pg_shared =
+		(pnob->rx_buf_size <= PAGE_SIZE / 2) ? true : false;
+	if (pnob->rx_buf_size != rxbuf_size) {
+		printk(KERN_WARNING
+		       "Could not set Rx buffer size to %d. Using %d\n",
+				       rxbuf_size, pnob->rx_buf_size);
+		rxbuf_size = pnob->rx_buf_size;
+	}
+
+	tasklet_init(&(adapter->sts_handler), be_process_intr,
+		     (unsigned long)adapter);
+	adapter->tasklet_started = 1;
+	spin_lock_init(&(adapter->int_lock));
+
+	status = be_register_isr(adapter, pnob);
+	if (status != 0)
+		goto cleanup;
+
+	adapter->rx_csum = 1;
+	adapter->max_rx_coal = BE_LRO_MAX_PKTS;
+
+	memset(&get_fwv, 0,
+	       sizeof(struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD));
+	printk(KERN_INFO "BladeEngine Driver version:%s. "
+	       "Copyright ServerEngines, Corporation 2005 - 2008\n",
+			       be_drvr_ver);
+	status = be_function_get_fw_version(&pnob->fn_obj, &get_fwv, NULL,
+					    NULL);
+	if (status == BE_SUCCESS) {
+		strncpy(be_fw_ver, get_fwv.firmware_version_string, 32);
+		printk(KERN_INFO "BladeEngine Firmware Version:%s\n",
+		       get_fwv.firmware_version_string);
+	} else {
+		printk(KERN_WARNING "Unable to get BE Firmware Version\n");
+	}
+
+	sema_init(&adapter->get_eth_stat_sem, 0);
+	init_timer(&adapter->timer_ctxt.get_stats_timer);
+	atomic_set(&adapter->timer_ctxt.get_stat_flag, 0);
+	adapter->timer_ctxt.get_stats_timer.function =
+	    &be_get_stats_timer_handler;
+
+	status = be_mcc_create(adapter);
+	if (status < 0)
+		goto cleanup;
+	status = be_mcc_init(adapter);
+	if (status < 0)
+		goto cleanup;
+
+
+	status = be_mcc_add_async_event_callback(&adapter->net_obj->mcc_q_obj,
+			 be_link_status_async_callback, (void *)adapter);
+	if (status != BE_SUCCESS) {
+		printk(KERN_WARNING "add_async_event_callback failed");
+		printk(KERN_WARNING
+		       "Link status changes may not be reflected\n");
+	}
+
+	status = register_netdev(netdev);
+	if (status != 0)
+		goto cleanup;
+	be_update_link_status(adapter);
+	adapter->dev_state = BE_DEV_STATE_INIT;
+	return 0;
+
+cleanup:
+	be_remove(pdev);
+	return status;
+error_adapter:
+	pci_release_regions(pdev);
+error_pci_req:
+	pci_disable_device(pdev);
+error:
+	printk(KERN_ERR "BladeEngine initalization failed\n");
+	return status;
+}
+
+/*
+ * Get the current link status and print the status on console
+ */
+void be_update_link_status(struct be_adapter *adapter)
+{
+	int status;
+	struct be_net_object *pnob = adapter->net_obj;
+
+	status = be_rxf_link_status(&pnob->fn_obj, adapter->be_link_sts, NULL,
+			NULL, NULL);
+	if (status == BE_SUCCESS) {
+		if (adapter->be_link_sts->mac0_speed &&
+		    adapter->be_link_sts->mac0_duplex)
+			adapter->port0_link_sts = BE_PORT_LINK_UP;
+		else
+			adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+
+		if (adapter->be_link_sts->mac1_speed &&
+		    adapter->be_link_sts->mac1_duplex)
+			adapter->port1_link_sts = BE_PORT_LINK_UP;
+		else
+			adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+
+		dev_info(&pnob->netdev->dev, "Link Properties:\n");
+		be_print_link_info(adapter->be_link_sts);
+		return;
+	}
+	dev_info(&pnob->netdev->dev, "Could not get link status\n");
+	return;
+}
+
+
+#ifdef CONFIG_PM
+static void
+be_pm_cleanup(struct be_adapter *adapter,
+	      struct be_net_object *pnob, struct net_device *netdev)
+{
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	be_wait_nic_tx_cmplx_cmpl(pnob);
+	be_disable_eq_intr(pnob);
+
+	if (adapter->tasklet_started) {
+		tasklet_kill(&adapter->sts_handler);
+		adapter->tasklet_started = 0;
+	}
+
+	be_unregister_isr(adapter);
+	be_disable_intr(pnob);
+
+	be_tx_q_clean(pnob);
+	be_rx_q_clean(pnob);
+
+	be_destroy_netobj(pnob);
+}
+
+static int be_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct be_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev =  adapter->netdevp;
+	struct be_net_object *pnob = netdev_priv(netdev);
+
+	adapter->dev_pm_state = adapter->dev_state;
+	adapter->dev_state = BE_DEV_STATE_SUSPEND;
+
+	netif_device_detach(netdev);
+	if (netif_running(netdev))
+		be_pm_cleanup(adapter, pnob, netdev);
+
+	pci_enable_wake(pdev, 3, 1);
+	pci_enable_wake(pdev, 4, 1);	/* D3 Cold = 4 */
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static void be_up(struct be_adapter *adapter)
+{
+	struct be_net_object *pnob = adapter->net_obj;
+
+	if (pnob->num_vlans != 0)
+		be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
+			pnob->vlan_tag, NULL, NULL, NULL);
+
+}
+
+static int be_resume(struct pci_dev *pdev)
+{
+	int status = 0;
+	struct be_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev =  adapter->netdevp;
+	struct be_net_object *pnob = netdev_priv(netdev);
+
+	netif_device_detach(netdev);
+
+	status = pci_enable_device(pdev);
+	if (status)
+		return status;
+
+	pci_set_power_state(pdev, 0);
+	pci_restore_state(pdev);
+	pci_enable_wake(pdev, 3, 0);
+	pci_enable_wake(pdev, 4, 0);	/* 4 is D3 cold */
+
+	netif_carrier_on(netdev);
+	netif_start_queue(netdev);
+
+	if (netif_running(netdev)) {
+		be_rxf_mac_address_read_write(&pnob->fn_obj, false, false,
+			false, true, false, netdev->dev_addr, NULL, NULL);
+
+		status = be_nob_ring_init(adapter, pnob);
+		if (status < 0)
+			return status;
+
+		tasklet_init(&(adapter->sts_handler), be_process_intr,
+			     (unsigned long)adapter);
+		adapter->tasklet_started = 1;
+
+		if (be_register_isr(adapter, pnob) != 0) {
+			printk(KERN_ERR "be_register_isr failed\n");
+			return status;
+		}
+
+
+		status = be_mcc_init(adapter);
+		if (status < 0) {
+			printk(KERN_ERR "be_mcc_init failed\n");
+			return status;
+		}
+		be_update_link_status(adapter);
+		/*
+		 * Register async call back function to handle link
+		 * status updates
+		 */
+		status = be_mcc_add_async_event_callback(
+				&adapter->net_obj->mcc_q_obj,
+				be_link_status_async_callback, (void *)adapter);
+		if (status != BE_SUCCESS) {
+			printk(KERN_WARNING "add_async_event_callback failed");
+			printk(KERN_WARNING
+			       "Link status changes may not be reflected\n");
+		}
+		be_enable_intr(pnob);
+		be_enable_eq_intr(pnob);
+		be_up(adapter);
+	}
+	netif_device_attach(netdev);
+	adapter->dev_state = adapter->dev_pm_state;
+	return 0;
+
+}
+
+#endif
+
+/* Wait until no more pending transmits  */
+void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *pnob)
+{
+	int i;
+
+	/* Wait for 20us * 50000 (= 1s) and no more */
+	i = 0;
+	while ((pnob->tx_q_tl != pnob->tx_q_hd) && (i < 50000)) {
+		++i;
+		udelay(20);
+	}
+
+	/* Check for no more pending transmits */
+	if (i >= 50000) {
+		printk(KERN_WARNING
+		       "Did not receive completions for all TX requests\n");
+	}
+}
+
+static struct pci_driver be_driver = {
+	.name = be_driver_name,
+	.id_table = be_device_id_table,
+	.probe = be_probe,
+#ifdef CONFIG_PM
+	.suspend = be_suspend,
+	.resume = be_resume,
+#endif
+	.remove = be_remove
+};
+
+/*
+ * Module init entry point. Registers our our device and return.
+ * Our probe will be called if the device is found.
+ */
+static int __init be_init_module(void)
+{
+	int ret;
+
+	if (rxbuf_size != 8192 && rxbuf_size != 4096 && rxbuf_size != 2048) {
+		printk(KERN_WARNING
+		       "Unsupported receive buffer size (%d) requested\n",
+		       rxbuf_size);
+		printk(KERN_WARNING
+		       "Must be 2048, 4096 or 8192. Defaulting to 2048\n");
+		rxbuf_size = 2048;
+	}
+
+	ret = pci_register_driver(&be_driver);
+
+	return ret;
+}
+
+module_init(be_init_module);
+
+/*
+ * be_exit_module - Driver Exit Cleanup Routine
+ */
+static void __exit be_exit_module(void)
+{
+	pci_unregister_driver(&be_driver);
+}
+
+module_exit(be_exit_module);

+ 863 - 0
drivers/staging/benet/be_int.c

@@ -0,0 +1,863 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include <linux/if_vlan.h>
+#include <linux/inet_lro.h>
+
+#include "benet.h"
+
+/* number of bytes of RX frame that are copied to skb->data */
+#define BE_HDR_LEN 64
+
+#define NETIF_RX(skb) netif_receive_skb(skb)
+#define VLAN_ACCEL_RX(skb, pnob, vt) \
+		vlan_hwaccel_rx(skb, pnob->vlan_grp, vt)
+
+/*
+    This function notifies BladeEngine of the number of completion
+    entries processed from the specified completion queue by writing
+    the number of popped entries to the door bell.
+
+    pnob	- Pointer to the NetObject structure
+    n		- Number of completion entries processed
+    cq_id	- Queue ID of the completion queue for which notification
+			is being done.
+    re_arm	- 1  - rearm the completion ring to generate an event.
+		- 0  - dont rearm the completion ring to generate an event
+*/
+void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm)
+{
+	struct CQ_DB_AMAP cqdb;
+
+	cqdb.dw[0] = 0;
+	AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id);
+	AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm);
+	AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n);
+	PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
+}
+
+/*
+ * adds additional receive frags indicated by BE starting from given
+ * frag index (fi) to specified skb's frag list
+ */
+static void
+add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb,
+	      u32 nresid, u32 fi)
+{
+	struct be_adapter *adapter = pnob->adapter;
+	u32 sk_frag_idx, n;
+	struct be_rx_page_info *rx_page_info;
+	u32 frag_sz = pnob->rx_buf_size;
+
+	sk_frag_idx = skb_shinfo(skb)->nr_frags;
+	while (nresid) {
+		index_inc(&fi, pnob->rx_q_len);
+
+		rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
+		pnob->rx_ctxt[fi] = NULL;
+		if ((rx_page_info->page_offset) ||
+		    (pnob->rx_pg_shared == false)) {
+			pci_unmap_page(adapter->pdev,
+				       pci_unmap_addr(rx_page_info, bus),
+				       frag_sz, PCI_DMA_FROMDEVICE);
+		}
+
+		n = min(nresid, frag_sz);
+		skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page;
+		skb_shinfo(skb)->frags[sk_frag_idx].page_offset
+		    = rx_page_info->page_offset;
+		skb_shinfo(skb)->frags[sk_frag_idx].size = n;
+
+		sk_frag_idx++;
+		skb->len += n;
+		skb->data_len += n;
+		skb_shinfo(skb)->nr_frags++;
+		nresid -= n;
+
+		memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+		atomic_dec(&pnob->rx_q_posted);
+	}
+}
+
+/*
+ * This function processes incoming nic packets over various Rx queues.
+ * This function takes the adapter, the current Rx status descriptor
+ * entry and the Rx completion queue ID as argument.
+ */
+static inline int process_nic_rx_completion(struct be_net_object *pnob,
+					    struct ETH_RX_COMPL_AMAP *rxcp)
+{
+	struct be_adapter *adapter = pnob->adapter;
+	struct sk_buff *skb;
+	int udpcksm, tcpcksm;
+	int n;
+	u32 nresid, fi;
+	u32 frag_sz = pnob->rx_buf_size;
+	u8 *va;
+	struct be_rx_page_info *rx_page_info;
+	u32 numfrags, vtp, vtm, vlan_tag, pktsize;
+
+	fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
+	BUG_ON(fi >= (int)pnob->rx_q_len);
+	BUG_ON(fi < 0);
+
+	rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
+	BUG_ON(!rx_page_info->page);
+	pnob->rx_ctxt[fi] = NULL;
+
+	/*
+	 * If one page is used per fragment or if this is the second half of
+	 *  of the page, unmap the page here
+	 */
+	if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) {
+		pci_unmap_page(adapter->pdev,
+			       pci_unmap_addr(rx_page_info, bus), frag_sz,
+			       PCI_DMA_FROMDEVICE);
+	}
+
+	atomic_dec(&pnob->rx_q_posted);
+	udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
+	tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
+	pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
+	/*
+	 * get rid of RX flush completions first.
+	 */
+	if ((tcpcksm) && (udpcksm) && (pktsize == 32)) {
+		put_page(rx_page_info->page);
+		memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+		return 0;
+	}
+	skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN);
+	if (skb == NULL) {
+		dev_info(&pnob->netdev->dev, "alloc_skb() failed\n");
+		put_page(rx_page_info->page);
+		memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+		goto free_frags;
+	}
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	skb->dev = pnob->netdev;
+
+	n = min(pktsize, frag_sz);
+
+	va = page_address(rx_page_info->page) + rx_page_info->page_offset;
+	prefetch(va);
+
+	skb->len = n;
+	skb->data_len = n;
+	if (n <= BE_HDR_LEN) {
+		memcpy(skb->data, va, n);
+		put_page(rx_page_info->page);
+		skb->data_len -= n;
+		skb->tail += n;
+	} else {
+
+		/* Setup the SKB with page buffer information */
+		skb_shinfo(skb)->frags[0].page = rx_page_info->page;
+		skb_shinfo(skb)->nr_frags++;
+
+		/* Copy the header into the skb_data */
+		memcpy(skb->data, va, BE_HDR_LEN);
+		skb_shinfo(skb)->frags[0].page_offset =
+		    rx_page_info->page_offset + BE_HDR_LEN;
+		skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN;
+		skb->data_len -= BE_HDR_LEN;
+		skb->tail += BE_HDR_LEN;
+	}
+	memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+	nresid = pktsize - n;
+
+	skb->protocol = eth_type_trans(skb, pnob->netdev);
+
+	if ((tcpcksm || udpcksm) && adapter->rx_csum)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	else
+		skb->ip_summed = CHECKSUM_NONE;
+	/*
+	 * if we have more bytes left, the frame has been
+	 * given to us in multiple fragments.  This happens
+	 * with Jumbo frames. Add the remaining fragments to
+	 * skb->frags[] array.
+	 */
+	if (nresid)
+		add_skb_frags(pnob, skb, nresid, fi);
+
+	/* update the the true size of the skb. */
+	skb->truesize = skb->len + sizeof(struct sk_buff);
+
+	/*
+	 * If a 802.3 frame or 802.2 LLC frame
+	 * (i.e) contains length field in MAC Hdr
+	 * and frame len is greater than 64 bytes
+	 */
+	if (((skb->protocol == ntohs(ETH_P_802_2)) ||
+	     (skb->protocol == ntohs(ETH_P_802_3)))
+	    && (pktsize > BE_HDR_LEN)) {
+		/*
+		 * If the length given in Mac Hdr is less than frame size
+		 * Erraneous frame, Drop it
+		 */
+		if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) {
+			/* Increment Non Ether type II frames dropped */
+			adapter->be_stat.bes_802_3_dropped_frames++;
+
+			kfree_skb(skb);
+			return 0;
+		}
+		/*
+		 * else if the length given in Mac Hdr is greater than
+		 * frame size, should not be seeing this sort of frames
+		 * dump the pkt and pass to stack
+		 */
+		else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) {
+			/* Increment Non Ether type II frames malformed */
+			adapter->be_stat.bes_802_3_malformed_frames++;
+		}
+	}
+
+	vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
+	vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
+	if (vtp && vtm) {
+		/* Vlan tag present in pkt and BE found
+		 * that the tag matched an entry in VLAN table
+		 */
+		if (!pnob->vlan_grp || pnob->num_vlans == 0) {
+			/* But we have no VLANs configured.
+			 * This should never happen.  Drop the packet.
+			 */
+			dev_info(&pnob->netdev->dev,
+			       "BladeEngine: Unexpected vlan tagged packet\n");
+			kfree_skb(skb);
+			return 0;
+		}
+		/* pass the VLAN packet to stack */
+		vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
+		VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag));
+
+	} else {
+		NETIF_RX(skb);
+	}
+	return 0;
+
+free_frags:
+	/* free all frags associated with the current rxcp */
+	numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
+	while (numfrags-- > 1) {
+		index_inc(&fi, pnob->rx_q_len);
+
+		rx_page_info = (struct be_rx_page_info *)
+		    pnob->rx_ctxt[fi];
+		pnob->rx_ctxt[fi] = (void *)NULL;
+		if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
+			pci_unmap_page(adapter->pdev,
+				       pci_unmap_addr(rx_page_info, bus),
+				       frag_sz, PCI_DMA_FROMDEVICE);
+		}
+
+		put_page(rx_page_info->page);
+		memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+		atomic_dec(&pnob->rx_q_posted);
+	}
+	return -ENOMEM;
+}
+
+static void process_nic_rx_completion_lro(struct be_net_object *pnob,
+					  struct ETH_RX_COMPL_AMAP *rxcp)
+{
+	struct be_adapter *adapter = pnob->adapter;
+	struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
+	unsigned int udpcksm, tcpcksm;
+	u32 numfrags, vlanf, vtm, vlan_tag, nresid;
+	u16 vlant;
+	unsigned int fi, idx, n;
+	struct be_rx_page_info *rx_page_info;
+	u32 frag_sz = pnob->rx_buf_size, pktsize;
+	bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1;
+	u8 err, *va;
+	__wsum csum = 0;
+
+	if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) {
+		/*  Drop the pkt and move to the next completion.  */
+		adapter->be_stat.bes_rx_misc_pkts++;
+		return;
+	}
+	err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp);
+	if (err || !rx_coal) {
+		/* We won't coalesce Rx pkts if the err bit set.
+		 * take the path of normal completion processing */
+		process_nic_rx_completion(pnob, rxcp);
+		return;
+	}
+
+	fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
+	BUG_ON(fi >= (int)pnob->rx_q_len);
+	BUG_ON(fi < 0);
+	rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
+	BUG_ON(!rx_page_info->page);
+	pnob->rx_ctxt[fi] = (void *)NULL;
+	/*  If one page is used per fragment or if this is the
+	 * second half of the page, unmap the page here
+	 */
+	if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
+		pci_unmap_page(adapter->pdev,
+			       pci_unmap_addr(rx_page_info, bus),
+			       frag_sz, PCI_DMA_FROMDEVICE);
+	}
+
+	numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
+	udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
+	tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
+	vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
+	vlant = be16_to_cpu(vlan_tag);
+	vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
+	vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
+	pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
+
+	atomic_dec(&pnob->rx_q_posted);
+
+	if (tcpcksm && udpcksm && pktsize == 32) {
+		/* flush completion entries */
+		put_page(rx_page_info->page);
+		memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+		return;
+	}
+	/* Only one of udpcksum and tcpcksum can be set */
+	BUG_ON(udpcksm && tcpcksm);
+
+	/* jumbo frames could come in multiple fragments */
+	BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz));
+	n = min(pktsize, frag_sz);
+	nresid = pktsize - n;	/* will be useful for jumbo pkts */
+	idx = 0;
+
+	va = page_address(rx_page_info->page) + rx_page_info->page_offset;
+	prefetch(va);
+	rx_frags[idx].page = rx_page_info->page;
+	rx_frags[idx].page_offset = (rx_page_info->page_offset);
+	rx_frags[idx].size = n;
+	memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+
+	/* If we got multiple fragments, we have more data. */
+	while (nresid) {
+		idx++;
+		index_inc(&fi, pnob->rx_q_len);
+
+		rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
+		pnob->rx_ctxt[fi] = (void *)NULL;
+		if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
+			pci_unmap_page(adapter->pdev,
+				       pci_unmap_addr(rx_page_info, bus),
+				       frag_sz, PCI_DMA_FROMDEVICE);
+		}
+
+		n = min(nresid, frag_sz);
+		rx_frags[idx].page = rx_page_info->page;
+		rx_frags[idx].page_offset = (rx_page_info->page_offset);
+		rx_frags[idx].size = n;
+
+		nresid -= n;
+		memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+		atomic_dec(&pnob->rx_q_posted);
+	}
+
+	if (likely(!(vlanf && vtm))) {
+		lro_receive_frags(&pnob->lro_mgr, rx_frags,
+				  pktsize, pktsize,
+				  (void *)(unsigned long)csum, csum);
+	} else {
+		/* Vlan tag present in pkt and BE found
+		 * that the tag matched an entry in VLAN table
+		 */
+		if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) {
+			/* But we have no VLANs configured.
+			 * This should never happen.  Drop the packet.
+			 */
+			dev_info(&pnob->netdev->dev,
+			       "BladeEngine: Unexpected vlan tagged packet\n");
+			return;
+		}
+		/* pass the VLAN packet to stack */
+		lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr,
+					       rx_frags, pktsize, pktsize,
+					       pnob->vlan_grp, vlant,
+					       (void *)(unsigned long)csum,
+					       csum);
+	}
+
+	adapter->be_stat.bes_rx_coal++;
+}
+
+struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob)
+{
+	struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl];
+	u32 valid, ct;
+
+	valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp);
+	if (valid == 0)
+		return NULL;
+
+	ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp);
+	if (ct != 0) {
+		/* Invalid chute #. treat as error */
+		AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1);
+	}
+
+	be_adv_rxcq_tl(pnob);
+	AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0);
+	return rxcp;
+}
+
+static void update_rx_rate(struct be_adapter *adapter)
+{
+	/* update the rate once in two seconds */
+	if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) {
+		u32 r;
+		r = adapter->eth_rx_bytes /
+		    ((jiffies - adapter->eth_rx_jiffies) / (HZ));
+		r = (r / 1000000);	/* MB/Sec */
+
+		/* Mega Bits/Sec */
+		adapter->be_stat.bes_eth_rx_rate = (r * 8);
+		adapter->eth_rx_jiffies = jiffies;
+		adapter->eth_rx_bytes = 0;
+	}
+}
+
+static int process_rx_completions(struct be_net_object *pnob, int max_work)
+{
+	struct be_adapter *adapter = pnob->adapter;
+	struct ETH_RX_COMPL_AMAP *rxcp;
+	u32 nc = 0;
+	unsigned int pktsize;
+
+	while (max_work && (rxcp = be_get_rx_cmpl(pnob))) {
+		prefetch(rxcp);
+		pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
+		process_nic_rx_completion_lro(pnob, rxcp);
+		adapter->eth_rx_bytes += pktsize;
+		update_rx_rate(adapter);
+		nc++;
+		max_work--;
+		adapter->be_stat.bes_rx_compl++;
+	}
+	if (likely(adapter->max_rx_coal > 1)) {
+		adapter->be_stat.bes_rx_flush++;
+		lro_flush_all(&pnob->lro_mgr);
+	}
+
+	/* Refill the queue */
+	if (atomic_read(&pnob->rx_q_posted) < 900)
+		be_post_eth_rx_buffs(pnob);
+
+	return nc;
+}
+
+static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob)
+{
+	struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl];
+	u32 valid;
+
+	valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp);
+	if (valid == 0)
+		return NULL;
+
+	AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0);
+	be_adv_txcq_tl(pnob);
+	return txcp;
+
+}
+
+void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx)
+{
+	struct be_adapter *adapter = pnob->adapter;
+	int cur_index, tx_wrbs_completed = 0;
+	struct sk_buff *skb;
+	u64 busaddr, pa, pa_lo, pa_hi;
+	struct ETH_WRB_AMAP *wrb;
+	u32 frag_len, last_index, j;
+
+	last_index = tx_compl_lastwrb_idx_get(pnob);
+	BUG_ON(last_index != end_idx);
+	pnob->tx_ctxt[pnob->tx_q_tl] = NULL;
+	do {
+		cur_index = pnob->tx_q_tl;
+		wrb = &pnob->tx_q[cur_index];
+		pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb);
+		pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb);
+		frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb);
+		busaddr = (pa_hi << 32) | pa_lo;
+		if (busaddr != 0) {
+			pa = le64_to_cpu(busaddr);
+			pci_unmap_single(adapter->pdev, pa,
+					 frag_len, PCI_DMA_TODEVICE);
+		}
+		if (cur_index == last_index) {
+			skb = (struct sk_buff *)pnob->tx_ctxt[cur_index];
+			BUG_ON(!skb);
+			for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
+				struct skb_frag_struct *frag;
+				frag = &skb_shinfo(skb)->frags[j];
+				pci_unmap_page(adapter->pdev,
+					       (ulong) frag->page, frag->size,
+					       PCI_DMA_TODEVICE);
+			}
+			kfree_skb(skb);
+			pnob->tx_ctxt[cur_index] = NULL;
+		} else {
+			BUG_ON(pnob->tx_ctxt[cur_index]);
+		}
+		tx_wrbs_completed++;
+		be_adv_txq_tl(pnob);
+	} while (cur_index != last_index);
+	atomic_sub(tx_wrbs_completed, &pnob->tx_q_used);
+}
+
+/* there is no need to take an SMP lock here since currently
+ * we have only one instance of the tasklet that does completion
+ * processing.
+ */
+static void process_nic_tx_completions(struct be_net_object *pnob)
+{
+	struct be_adapter *adapter = pnob->adapter;
+	struct ETH_TX_COMPL_AMAP *txcp;
+	struct net_device *netdev = pnob->netdev;
+	u32 end_idx, num_processed = 0;
+
+	adapter->be_stat.bes_tx_events++;
+
+	while ((txcp = be_get_tx_cmpl(pnob))) {
+		end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp);
+		process_one_tx_compl(pnob, end_idx);
+		num_processed++;
+		adapter->be_stat.bes_tx_compl++;
+	}
+	be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1);
+	/*
+	 * We got Tx completions and have usable WRBs.
+	 * If the netdev's queue has been stopped
+	 * because we had run out of WRBs, wake it now.
+	 */
+	spin_lock(&adapter->txq_lock);
+	if (netif_queue_stopped(netdev)
+	    && atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) {
+		netif_wake_queue(netdev);
+	}
+	spin_unlock(&adapter->txq_lock);
+}
+
+static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl)
+{
+	u32 nposted = 0;
+	struct ETH_RX_D_AMAP *rxd = NULL;
+	struct be_recv_buffer *rxbp;
+	void **rx_ctxp;
+	struct RQ_DB_AMAP rqdb;
+
+	rx_ctxp = pnob->rx_ctxt;
+
+	while (!list_empty(rxbl) &&
+	       (rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) {
+
+		rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list);
+		list_del(&rxbp->rxb_list);
+		rxd = pnob->rx_q + pnob->rx_q_hd;
+		AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo);
+		AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi);
+
+		rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt;
+		be_adv_rxq_hd(pnob);
+		nposted++;
+	}
+
+	if (nposted) {
+		/* Now press the door bell to notify BladeEngine. */
+		rqdb.dw[0] = 0;
+		AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted);
+		AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id);
+		PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]);
+	}
+	atomic_add(nposted, &pnob->rx_q_posted);
+	return nposted;
+}
+
+void be_post_eth_rx_buffs(struct be_net_object *pnob)
+{
+	struct be_adapter *adapter = pnob->adapter;
+	u32 num_bufs, r;
+	u64 busaddr = 0, tmp_pa;
+	u32 max_bufs, pg_hd;
+	u32 frag_size;
+	struct be_recv_buffer *rxbp;
+	struct list_head rxbl;
+	struct be_rx_page_info *rx_page_info;
+	struct page *page = NULL;
+	u32 page_order = 0;
+	gfp_t alloc_flags = GFP_ATOMIC;
+
+	BUG_ON(!adapter);
+
+	max_bufs = 64;		/* should be even # <= 255. */
+
+	frag_size = pnob->rx_buf_size;
+	page_order = get_order(frag_size);
+
+	if (frag_size == 8192)
+		alloc_flags |= (gfp_t) __GFP_COMP;
+	/*
+	 * Form a linked list of RECV_BUFFFER structure to be be posted.
+	 * We will post even number of buffer so that pages can be
+	 * shared.
+	 */
+	INIT_LIST_HEAD(&rxbl);
+
+	for (num_bufs = 0; num_bufs < max_bufs &&
+		!pnob->rx_page_info[pnob->rx_pg_info_hd].page; ++num_bufs) {
+
+		rxbp = &pnob->eth_rx_bufs[num_bufs];
+		pg_hd = pnob->rx_pg_info_hd;
+		rx_page_info = &pnob->rx_page_info[pg_hd];
+
+		if (!page) {
+			page = alloc_pages(alloc_flags, page_order);
+			if (unlikely(page == NULL)) {
+				adapter->be_stat.bes_ethrx_post_fail++;
+				pnob->rxbuf_post_fail++;
+				break;
+			}
+			pnob->rxbuf_post_fail = 0;
+			busaddr = pci_map_page(adapter->pdev, page, 0,
+					       frag_size, PCI_DMA_FROMDEVICE);
+			rx_page_info->page_offset = 0;
+			rx_page_info->page = page;
+			/*
+			 * If we are sharing a page among two skbs,
+			 * alloc a new one on the next iteration
+			 */
+			if (pnob->rx_pg_shared == false)
+				page = NULL;
+		} else {
+			get_page(page);
+			rx_page_info->page_offset += frag_size;
+			rx_page_info->page = page;
+			/*
+			 * We are finished with the alloced page,
+			 * Alloc a new one on the next iteration
+			 */
+			page = NULL;
+		}
+		rxbp->rxb_ctxt = (void *)rx_page_info;
+		index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len);
+
+		pci_unmap_addr_set(rx_page_info, bus, busaddr);
+		tmp_pa = busaddr + rx_page_info->page_offset;
+		rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF);
+		rxbp->rxb_pa_hi = (tmp_pa >> 32);
+		rxbp->rxb_len = frag_size;
+		list_add_tail(&rxbp->rxb_list, &rxbl);
+	}			/* End of for */
+
+	r = post_rx_buffs(pnob, &rxbl);
+	BUG_ON(r != num_bufs);
+	return;
+}
+
+/*
+ * Interrupt service for network function.  We just schedule the
+ * tasklet which does all completion processing.
+ */
+irqreturn_t be_int(int irq, void *dev)
+{
+	struct net_device *netdev = dev;
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+	u32 isr;
+
+	isr = CSR_READ(&pnob->fn_obj, cev.isr1);
+	if (unlikely(!isr))
+		return IRQ_NONE;
+
+	spin_lock(&adapter->int_lock);
+	adapter->isr |= isr;
+	spin_unlock(&adapter->int_lock);
+
+	adapter->be_stat.bes_ints++;
+
+	tasklet_schedule(&adapter->sts_handler);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Poll function called by NAPI with a work budget.
+ * We process as many UC. BC and MC receive completions
+ * as the budget allows and return the actual number of
+ * RX ststutses processed.
+ */
+int be_poll(struct napi_struct *napi, int budget)
+{
+	struct be_net_object *pnob =
+			container_of(napi, struct be_net_object, napi);
+	u32 work_done;
+
+	pnob->adapter->be_stat.bes_polls++;
+	work_done = process_rx_completions(pnob, budget);
+	BUG_ON(work_done > budget);
+
+	/* All consumed */
+	if (work_done < budget) {
+		netif_rx_complete(napi);
+		/* enable intr */
+		be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1);
+	} else {
+		/* More to be consumed; continue with interrupts disabled */
+		be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0);
+	}
+	return work_done;
+}
+
+static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob)
+{
+	struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]);
+	if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp))
+		return NULL;
+	be_adv_eq_tl(pnob);
+	return eqp;
+}
+
+/*
+ * Processes all valid events in the event ring associated with given
+ * NetObject.  Also, notifies BE the number of events processed.
+ */
+static inline u32 process_events(struct be_net_object *pnob)
+{
+	struct be_adapter *adapter = pnob->adapter;
+	struct EQ_ENTRY_AMAP *eqp;
+	u32 rid, num_events = 0;
+	struct net_device *netdev = pnob->netdev;
+
+	while ((eqp = get_event(pnob)) != NULL) {
+		adapter->be_stat.bes_events++;
+		rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp);
+		if (rid == pnob->rx_cq_id) {
+			adapter->be_stat.bes_rx_events++;
+			netif_rx_schedule(&pnob->napi);
+		} else if (rid == pnob->tx_cq_id) {
+			process_nic_tx_completions(pnob);
+		} else if (rid == pnob->mcc_cq_id) {
+			be_mcc_process_cq(&pnob->mcc_q_obj, 1);
+		} else {
+			dev_info(&netdev->dev,
+					"Invalid EQ ResourceID %d\n", rid);
+		}
+		AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0);
+		AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0);
+		num_events++;
+	}
+	return num_events;
+}
+
+static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob)
+{
+	int status;
+	struct be_eq_object *eq_objectp;
+
+	/* update once a second */
+	if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) {
+		/* One second elapsed since last update  */
+		u32 r, new_eqd = -1;
+		r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints;
+		r = r / ((jiffies - adapter->ips_jiffies) / (HZ));
+		adapter->be_stat.bes_ips = r;
+		adapter->ips_jiffies = jiffies;
+		adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints;
+		if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd)
+			new_eqd = (adapter->cur_eqd + 8);
+		if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd)
+			new_eqd = (adapter->cur_eqd - 8);
+		if (adapter->enable_aic && new_eqd != -1) {
+			eq_objectp = &pnob->event_q_obj;
+			status = be_eq_modify_delay(&pnob->fn_obj, 1,
+						    &eq_objectp, &new_eqd, NULL,
+						    NULL, NULL);
+			if (status == BE_SUCCESS)
+				adapter->cur_eqd = new_eqd;
+		}
+	}
+}
+
+/*
+    This function notifies BladeEngine of how many events were processed
+    from the event queue by ringing the corresponding door bell and
+    optionally re-arms the event queue.
+    n		- number of events processed
+    re_arm	- 1 - re-arm the EQ, 0 - do not re-arm the EQ
+
+*/
+static void be_notify_event(struct be_net_object *pnob, int n, int re_arm)
+{
+	struct CQ_DB_AMAP eqdb;
+	eqdb.dw[0] = 0;
+
+	AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id);
+	AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm);
+	AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1);
+	AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n);
+	/*
+	 * Under some situations we see an interrupt and no valid
+	 * EQ entry.  To keep going, we need to ring the DB even if
+	 * numPOsted is 0.
+	 */
+	PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]);
+	return;
+}
+
+/*
+ * Called from the tasklet scheduled by ISR.  All real interrupt processing
+ * is done here.
+ */
+void be_process_intr(unsigned long context)
+{
+	struct be_adapter *adapter = (struct be_adapter *)context;
+	struct be_net_object *pnob = adapter->net_obj;
+	u32 isr, n;
+	ulong flags = 0;
+
+	isr = adapter->isr;
+
+	/*
+	 * we create only one NIC event queue in Linux. Event is
+	 * expected only in the first event queue
+	 */
+	BUG_ON(isr & 0xfffffffe);
+	if ((isr & 1) == 0)
+		return;		/* not our interrupt */
+	n = process_events(pnob);
+	/*
+	 * Clear the event bit. adapter->isr is  set by
+	 * hard interrupt.  Prevent race with lock.
+	 */
+	spin_lock_irqsave(&adapter->int_lock, flags);
+	adapter->isr &= ~1;
+	spin_unlock_irqrestore(&adapter->int_lock, flags);
+	be_notify_event(pnob, n, 1);
+	/*
+	 * If previous allocation attempts had failed and
+	 * BE has used up all posted buffers, post RX buffers here
+	 */
+	if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0)
+		be_post_eth_rx_buffs(pnob);
+	update_eqd(adapter, pnob);
+	return;
+}

+ 705 - 0
drivers/staging/benet/be_netif.c

@@ -0,0 +1,705 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * be_netif.c
+ *
+ * This file contains various entry points of drivers seen by tcp/ip stack.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include "benet.h"
+#include <linux/ip.h>
+#include <linux/inet_lro.h>
+
+/* Strings to print Link properties */
+static const char *link_speed[] = {
+	"Invalid link Speed Value",
+	"10 Mbps",
+	"100 Mbps",
+	"1 Gbps",
+	"10 Gbps"
+};
+
+static const char *link_duplex[] = {
+	"Invalid Duplex Value",
+	"Half Duplex",
+	"Full Duplex"
+};
+
+static const char *link_state[] = {
+	"",
+	"(active)"
+};
+
+void be_print_link_info(struct BE_LINK_STATUS *lnk_status)
+{
+	u16 si, di, ai;
+
+	/* Port 0 */
+	if (lnk_status->mac0_speed && lnk_status->mac0_duplex) {
+		/* Port is up and running */
+		si = (lnk_status->mac0_speed < 5) ? lnk_status->mac0_speed : 0;
+		di = (lnk_status->mac0_duplex < 3) ?
+		    lnk_status->mac0_duplex : 0;
+		ai = (lnk_status->active_port == 0) ? 1 : 0;
+		printk(KERN_INFO "PortNo. 0: Speed - %s %s %s\n",
+		       link_speed[si], link_duplex[di], link_state[ai]);
+	} else
+		printk(KERN_INFO "PortNo. 0: Down\n");
+
+	/* Port 1 */
+	if (lnk_status->mac1_speed && lnk_status->mac1_duplex) {
+		/* Port is up and running */
+		si = (lnk_status->mac1_speed < 5) ? lnk_status->mac1_speed : 0;
+		di = (lnk_status->mac1_duplex < 3) ?
+		    lnk_status->mac1_duplex : 0;
+		ai = (lnk_status->active_port == 0) ? 1 : 0;
+		printk(KERN_INFO "PortNo. 1: Speed - %s %s %s\n",
+		       link_speed[si], link_duplex[di], link_state[ai]);
+	} else
+		printk(KERN_INFO "PortNo. 1: Down\n");
+
+	return;
+}
+
+static int
+be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
+		   void **ip_hdr, void **tcpudp_hdr,
+		   u64 *hdr_flags, void *priv)
+{
+	struct ethhdr *eh;
+	struct vlan_ethhdr *veh;
+	struct iphdr *iph;
+	u8 *va = page_address(frag->page) + frag->page_offset;
+	unsigned long ll_hlen;
+
+	/* find the mac header, abort if not IPv4 */
+
+	prefetch(va);
+	eh = (struct ethhdr *)va;
+	*mac_hdr = eh;
+	ll_hlen = ETH_HLEN;
+	if (eh->h_proto != htons(ETH_P_IP)) {
+		if (eh->h_proto == htons(ETH_P_8021Q)) {
+			veh = (struct vlan_ethhdr *)va;
+			if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
+				return -1;
+
+			ll_hlen += VLAN_HLEN;
+
+		} else {
+			return -1;
+		}
+	}
+	*hdr_flags = LRO_IPV4;
+
+	iph = (struct iphdr *)(va + ll_hlen);
+	*ip_hdr = iph;
+	if (iph->protocol != IPPROTO_TCP)
+		return -1;
+	*hdr_flags |= LRO_TCP;
+	*tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
+
+	return 0;
+}
+
+static int benet_open(struct net_device *netdev)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+	struct net_lro_mgr *lro_mgr;
+
+	if (adapter->dev_state < BE_DEV_STATE_INIT)
+		return -EAGAIN;
+
+	lro_mgr = &pnob->lro_mgr;
+	lro_mgr->dev = netdev;
+
+	lro_mgr->features = LRO_F_NAPI;
+	lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
+	lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
+	lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
+	lro_mgr->lro_arr = pnob->lro_desc;
+	lro_mgr->get_frag_header = be_get_frag_header;
+	lro_mgr->max_aggr = adapter->max_rx_coal;
+	lro_mgr->frag_align_pad = 2;
+	if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
+		lro_mgr->max_aggr = MAX_SKB_FRAGS;
+
+	adapter->max_rx_coal = BE_LRO_MAX_PKTS;
+
+	be_update_link_status(adapter);
+
+	/*
+	 * Set carrier on only if Physical Link up
+	 * Either of the port link status up signifies this
+	 */
+	if ((adapter->port0_link_sts == BE_PORT_LINK_UP) ||
+	    (adapter->port1_link_sts == BE_PORT_LINK_UP)) {
+		netif_start_queue(netdev);
+		netif_carrier_on(netdev);
+	}
+
+	adapter->dev_state = BE_DEV_STATE_OPEN;
+	napi_enable(&pnob->napi);
+	be_enable_intr(pnob);
+	be_enable_eq_intr(pnob);
+	/*
+	 * RX completion queue may be in dis-armed state. Arm it.
+	 */
+	be_notify_cmpl(pnob, 0, pnob->rx_cq_id, 1);
+
+	return 0;
+}
+
+static int benet_close(struct net_device *netdev)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+
+	netif_stop_queue(netdev);
+	synchronize_irq(netdev->irq);
+
+	be_wait_nic_tx_cmplx_cmpl(pnob);
+	adapter->dev_state = BE_DEV_STATE_INIT;
+	netif_carrier_off(netdev);
+
+	adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+	adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+	be_disable_intr(pnob);
+	be_disable_eq_intr(pnob);
+	napi_disable(&pnob->napi);
+
+	return 0;
+}
+
+/*
+ * Setting a Mac Address for BE
+ * Takes netdev and a void pointer as arguments.
+ * The pointer holds the new addres to be used.
+ */
+static int benet_set_mac_addr(struct net_device *netdev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct be_net_object *pnob = netdev_priv(netdev);
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	be_rxf_mac_address_read_write(&pnob->fn_obj, 0, 0, false, true, false,
+				netdev->dev_addr, NULL, NULL);
+	/*
+	 * Since we are doing Active-Passive failover, both
+	 * ports should have matching MAC addresses everytime.
+	 */
+	be_rxf_mac_address_read_write(&pnob->fn_obj, 1, 0, false, true, false,
+				      netdev->dev_addr, NULL, NULL);
+
+	return 0;
+}
+
+void be_get_stats_timer_handler(unsigned long context)
+{
+	struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
+
+	if (atomic_read(&ctxt->get_stat_flag)) {
+		atomic_dec(&ctxt->get_stat_flag);
+		up((void *)ctxt->get_stat_sem_addr);
+	}
+	del_timer(&ctxt->get_stats_timer);
+	return;
+}
+
+void be_get_stat_cb(void *context, int status,
+		    struct MCC_WRB_AMAP *optional_wrb)
+{
+	struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
+	/*
+	 * just up the semaphore if the get_stat_flag
+	 * reads 1. so that the waiter can continue.
+	 * If it is 0, then it was handled by the timer handler.
+	 */
+	del_timer(&ctxt->get_stats_timer);
+	if (atomic_read(&ctxt->get_stat_flag)) {
+		atomic_dec(&ctxt->get_stat_flag);
+		up((void *)ctxt->get_stat_sem_addr);
+	}
+}
+
+struct net_device_stats *benet_get_stats(struct net_device *dev)
+{
+	struct be_net_object *pnob = netdev_priv(dev);
+	struct be_adapter *adapter = pnob->adapter;
+	u64 pa;
+	struct be_timer_ctxt *ctxt = &adapter->timer_ctxt;
+
+	if (adapter->dev_state != BE_DEV_STATE_OPEN) {
+		/* Return previously read stats */
+		return &(adapter->benet_stats);
+	}
+	/* Get Physical Addr */
+	pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
+			    sizeof(struct FWCMD_ETH_GET_STATISTICS),
+			    PCI_DMA_FROMDEVICE);
+	ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem;
+	atomic_inc(&ctxt->get_stat_flag);
+
+	be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp,
+				    cpu_to_le64(pa), be_get_stat_cb, ctxt,
+				    NULL);
+
+	ctxt->get_stats_timer.data = (unsigned long)ctxt;
+	mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
+	down((void *)ctxt->get_stat_sem_addr);	/* callback will unblock us */
+
+	/* Adding port0 and port1 stats. */
+	adapter->benet_stats.rx_packets =
+	    adapter->eth_statsp->params.response.p0recvdtotalframes +
+	    adapter->eth_statsp->params.response.p1recvdtotalframes;
+	adapter->benet_stats.tx_packets =
+	    adapter->eth_statsp->params.response.p0xmitunicastframes +
+	    adapter->eth_statsp->params.response.p1xmitunicastframes;
+	adapter->benet_stats.tx_bytes =
+	    adapter->eth_statsp->params.response.p0xmitbyteslsd +
+	    adapter->eth_statsp->params.response.p1xmitbyteslsd;
+	adapter->benet_stats.rx_errors =
+	    adapter->eth_statsp->params.response.p0crcerrors +
+	    adapter->eth_statsp->params.response.p1crcerrors;
+	adapter->benet_stats.rx_errors +=
+	    adapter->eth_statsp->params.response.p0alignmentsymerrs +
+	    adapter->eth_statsp->params.response.p1alignmentsymerrs;
+	adapter->benet_stats.rx_errors +=
+	    adapter->eth_statsp->params.response.p0inrangelenerrors +
+	    adapter->eth_statsp->params.response.p1inrangelenerrors;
+	adapter->benet_stats.rx_bytes =
+	    adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
+	    adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
+	adapter->benet_stats.rx_crc_errors =
+	    adapter->eth_statsp->params.response.p0crcerrors +
+	    adapter->eth_statsp->params.response.p1crcerrors;
+
+	adapter->benet_stats.tx_packets +=
+	    adapter->eth_statsp->params.response.p0xmitmulticastframes +
+	    adapter->eth_statsp->params.response.p1xmitmulticastframes;
+	adapter->benet_stats.tx_packets +=
+	    adapter->eth_statsp->params.response.p0xmitbroadcastframes +
+	    adapter->eth_statsp->params.response.p1xmitbroadcastframes;
+	adapter->benet_stats.tx_errors = 0;
+
+	adapter->benet_stats.multicast =
+	    adapter->eth_statsp->params.response.p0xmitmulticastframes +
+	    adapter->eth_statsp->params.response.p1xmitmulticastframes;
+
+	adapter->benet_stats.rx_fifo_errors =
+	    adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
+	    adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
+	adapter->benet_stats.rx_frame_errors =
+	    adapter->eth_statsp->params.response.p0alignmentsymerrs +
+	    adapter->eth_statsp->params.response.p1alignmentsymerrs;
+	adapter->benet_stats.rx_length_errors =
+	    adapter->eth_statsp->params.response.p0inrangelenerrors +
+	    adapter->eth_statsp->params.response.p1inrangelenerrors;
+	adapter->benet_stats.rx_length_errors +=
+	    adapter->eth_statsp->params.response.p0outrangeerrors +
+	    adapter->eth_statsp->params.response.p1outrangeerrors;
+	adapter->benet_stats.rx_length_errors +=
+	    adapter->eth_statsp->params.response.p0frametoolongerrors +
+	    adapter->eth_statsp->params.response.p1frametoolongerrors;
+
+	pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
+			 sizeof(struct FWCMD_ETH_GET_STATISTICS),
+			 PCI_DMA_FROMDEVICE);
+	return &(adapter->benet_stats);
+
+}
+
+static void be_start_tx(struct be_net_object *pnob, u32 nposted)
+{
+#define CSR_ETH_MAX_SQPOSTS 255
+	struct SQ_DB_AMAP sqdb;
+
+	sqdb.dw[0] = 0;
+
+	AMAP_SET_BITS_PTR(SQ_DB, cid, &sqdb, pnob->tx_q_id);
+	while (nposted) {
+		if (nposted > CSR_ETH_MAX_SQPOSTS) {
+			AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb,
+					  CSR_ETH_MAX_SQPOSTS);
+			nposted -= CSR_ETH_MAX_SQPOSTS;
+		} else {
+			AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, nposted);
+			nposted = 0;
+		}
+		PD_WRITE(&pnob->fn_obj, etx_sq_db, sqdb.dw[0]);
+	}
+
+	return;
+}
+
+static void update_tx_rate(struct be_adapter *adapter)
+{
+	/* update the rate once in two seconds */
+	if ((jiffies - adapter->eth_tx_jiffies) > 2 * (HZ)) {
+		u32 r;
+		r = adapter->eth_tx_bytes /
+		    ((jiffies - adapter->eth_tx_jiffies) / (HZ));
+		r = (r / 1000000);	/* M bytes/s */
+		adapter->be_stat.bes_eth_tx_rate = (r * 8); /* M bits/s */
+		adapter->eth_tx_jiffies = jiffies;
+		adapter->eth_tx_bytes = 0;
+	}
+}
+
+static int wrb_cnt_in_skb(struct sk_buff *skb)
+{
+	int cnt = 0;
+	while (skb) {
+		if (skb->len > skb->data_len)
+			cnt++;
+		cnt += skb_shinfo(skb)->nr_frags;
+		skb = skb_shinfo(skb)->frag_list;
+	}
+	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
+	return cnt;
+}
+
+static void wrb_fill(struct ETH_WRB_AMAP *wrb, u64 addr, int len)
+{
+	AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb, addr >> 32);
+	AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb, addr & 0xFFFFFFFF);
+	AMAP_SET_BITS_PTR(ETH_WRB, frag_len, wrb, len);
+}
+
+static void wrb_fill_extra(struct ETH_WRB_AMAP *wrb, struct sk_buff *skb,
+			   struct be_net_object *pnob)
+{
+	wrb->dw[2] = 0;
+	wrb->dw[3] = 0;
+	AMAP_SET_BITS_PTR(ETH_WRB, crc, wrb, 1);
+	if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
+		AMAP_SET_BITS_PTR(ETH_WRB, lso, wrb, 1);
+		AMAP_SET_BITS_PTR(ETH_WRB, lso_mss, wrb,
+				  skb_shinfo(skb)->gso_size);
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		u8 proto = ((struct iphdr *)ip_hdr(skb))->protocol;
+		if (proto == IPPROTO_TCP)
+			AMAP_SET_BITS_PTR(ETH_WRB, tcpcs, wrb, 1);
+		else if (proto == IPPROTO_UDP)
+			AMAP_SET_BITS_PTR(ETH_WRB, udpcs, wrb, 1);
+	}
+	if (pnob->vlan_grp && vlan_tx_tag_present(skb)) {
+		AMAP_SET_BITS_PTR(ETH_WRB, vlan, wrb, 1);
+		AMAP_SET_BITS_PTR(ETH_WRB, vlan_tag, wrb, vlan_tx_tag_get(skb));
+	}
+}
+
+static inline void wrb_copy_extra(struct ETH_WRB_AMAP *to,
+				  struct ETH_WRB_AMAP *from)
+{
+
+	to->dw[2] = from->dw[2];
+	to->dw[3] = from->dw[3];
+}
+
+/* Returns the actual count of wrbs used including a possible dummy */
+static int copy_skb_to_txq(struct be_net_object *pnob, struct sk_buff *skb,
+			   u32 wrb_cnt, u32 *copied)
+{
+	u64 busaddr;
+	struct ETH_WRB_AMAP *wrb = NULL, *first = NULL;
+	u32 i;
+	bool dummy = true;
+	struct pci_dev *pdev = pnob->adapter->pdev;
+
+	if (wrb_cnt & 1)
+		wrb_cnt++;
+	else
+		dummy = false;
+
+	atomic_add(wrb_cnt, &pnob->tx_q_used);
+
+	while (skb) {
+		if (skb->len > skb->data_len) {
+			int len = skb->len - skb->data_len;
+			busaddr = pci_map_single(pdev, skb->data, len,
+						 PCI_DMA_TODEVICE);
+			busaddr = cpu_to_le64(busaddr);
+			wrb = &pnob->tx_q[pnob->tx_q_hd];
+			if (first == NULL) {
+				wrb_fill_extra(wrb, skb, pnob);
+				first = wrb;
+			} else {
+				wrb_copy_extra(wrb, first);
+			}
+			wrb_fill(wrb, busaddr, len);
+			be_adv_txq_hd(pnob);
+			*copied += len;
+		}
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			struct skb_frag_struct *frag =
+			    &skb_shinfo(skb)->frags[i];
+			busaddr = pci_map_page(pdev, frag->page,
+					       frag->page_offset, frag->size,
+					       PCI_DMA_TODEVICE);
+			busaddr = cpu_to_le64(busaddr);
+			wrb = &pnob->tx_q[pnob->tx_q_hd];
+			if (first == NULL) {
+				wrb_fill_extra(wrb, skb, pnob);
+				first = wrb;
+			} else {
+				wrb_copy_extra(wrb, first);
+			}
+			wrb_fill(wrb, busaddr, frag->size);
+			be_adv_txq_hd(pnob);
+			*copied += frag->size;
+		}
+		skb = skb_shinfo(skb)->frag_list;
+	}
+
+	if (dummy) {
+		wrb = &pnob->tx_q[pnob->tx_q_hd];
+		BUG_ON(first == NULL);
+		wrb_copy_extra(wrb, first);
+		wrb_fill(wrb, 0, 0);
+		be_adv_txq_hd(pnob);
+	}
+	AMAP_SET_BITS_PTR(ETH_WRB, complete, wrb, 1);
+	AMAP_SET_BITS_PTR(ETH_WRB, last, wrb, 1);
+	return wrb_cnt;
+}
+
+/* For each skb transmitted, tx_ctxt stores the num of wrbs in the
+ * start index and skb pointer in the end index
+ */
+static inline void be_tx_wrb_info_remember(struct be_net_object *pnob,
+					   struct sk_buff *skb, int wrb_cnt,
+					   u32 start)
+{
+	*(u32 *) (&pnob->tx_ctxt[start]) = wrb_cnt;
+	index_adv(&start, wrb_cnt - 1, pnob->tx_q_len);
+	pnob->tx_ctxt[start] = skb;
+}
+
+static int benet_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+	u32 wrb_cnt, copied = 0;
+	u32 start = pnob->tx_q_hd;
+
+	adapter->be_stat.bes_tx_reqs++;
+
+	wrb_cnt = wrb_cnt_in_skb(skb);
+	spin_lock_bh(&adapter->txq_lock);
+	if ((pnob->tx_q_len - 2 - atomic_read(&pnob->tx_q_used)) <= wrb_cnt) {
+		netif_stop_queue(pnob->netdev);
+		spin_unlock_bh(&adapter->txq_lock);
+		adapter->be_stat.bes_tx_fails++;
+		return NETDEV_TX_BUSY;
+	}
+	spin_unlock_bh(&adapter->txq_lock);
+
+	wrb_cnt = copy_skb_to_txq(pnob, skb, wrb_cnt, &copied);
+	be_tx_wrb_info_remember(pnob, skb, wrb_cnt, start);
+
+	be_start_tx(pnob, wrb_cnt);
+
+	adapter->eth_tx_bytes += copied;
+	adapter->be_stat.bes_tx_wrbs += wrb_cnt;
+	update_tx_rate(adapter);
+	netdev->trans_start = jiffies;
+
+	return NETDEV_TX_OK;
+}
+
+/*
+ * This is the driver entry point to change the mtu of the device
+ * Returns 0 for success and errno for failure.
+ */
+static int benet_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	/*
+	 * BE supports jumbo frame size upto 9000 bytes including the link layer
+	 * header. Considering the different variants of frame formats possible
+	 * like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes
+	 */
+
+	if (new_mtu < (ETH_ZLEN + ETH_FCS_LEN) || (new_mtu > BE_MAX_MTU)) {
+		dev_info(&netdev->dev, "Invalid MTU requested. "
+			       "Must be between %d and %d bytes\n",
+				       (ETH_ZLEN + ETH_FCS_LEN), BE_MAX_MTU);
+		return -EINVAL;
+	}
+	dev_info(&netdev->dev, "MTU changed from %d to %d\n",
+						netdev->mtu, new_mtu);
+	netdev->mtu = new_mtu;
+	return 0;
+}
+
+/*
+ * This is the driver entry point to register a vlan with the device
+ */
+static void benet_vlan_register(struct net_device *netdev,
+				struct vlan_group *grp)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+
+	be_disable_eq_intr(pnob);
+	pnob->vlan_grp = grp;
+	pnob->num_vlans = 0;
+	be_enable_eq_intr(pnob);
+}
+
+/*
+ * This is the driver entry point to add a vlan vlan_id
+ * with the device netdev
+ */
+static void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+
+	if (pnob->num_vlans == (BE_NUM_VLAN_SUPPORTED - 1)) {
+		/* no  way to return an error */
+		dev_info(&netdev->dev,
+		       "BladeEngine: Cannot configure more than %d Vlans\n",
+			       BE_NUM_VLAN_SUPPORTED);
+		return;
+	}
+	/* The new vlan tag will be in the slot indicated by num_vlans. */
+	pnob->vlan_tag[pnob->num_vlans++] = vlan_id;
+	be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
+			   pnob->vlan_tag, NULL, NULL, NULL);
+}
+
+/*
+ * This is the driver entry point to remove a vlan vlan_id
+ * with the device netdev
+ */
+static void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+
+	u32 i, value;
+
+	/*
+	 * In Blade Engine, we support 32 vlan tag filters across both ports.
+	 * To program a vlan tag, the RXF_RTPR_CSR register is used.
+	 * Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries.
+	 * The Vlan table is of depth 16. thus we support 32 tags.
+	 */
+
+	value = vlan_id | VLAN_VALID_BIT;
+	for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) {
+		if (pnob->vlan_tag[i] == vlan_id)
+			break;
+	}
+
+	if (i == BE_NUM_VLAN_SUPPORTED)
+		return;
+	/* Now compact the vlan tag array by removing hole created. */
+	while ((i + 1) < BE_NUM_VLAN_SUPPORTED) {
+		pnob->vlan_tag[i] = pnob->vlan_tag[i + 1];
+		i++;
+	}
+	if ((i + 1) == BE_NUM_VLAN_SUPPORTED)
+		pnob->vlan_tag[i] = (u16) 0x0;
+	pnob->num_vlans--;
+	be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
+			   pnob->vlan_tag, NULL, NULL, NULL);
+}
+
+/*
+ * This function is called to program multicast
+ * address in the multicast filter of the ASIC.
+ */
+static void be_set_multicast_filter(struct net_device *netdev)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct dev_mc_list *mc_ptr;
+	u8 mac_addr[32][ETH_ALEN];
+	int i;
+
+	if (netdev->flags & IFF_ALLMULTI) {
+		/* set BE in Multicast promiscuous */
+		be_rxf_multicast_config(&pnob->fn_obj, true, 0, NULL, NULL,
+					NULL, NULL);
+		return;
+	}
+
+	for (mc_ptr = netdev->mc_list, i = 0; mc_ptr;
+	     mc_ptr = mc_ptr->next, i++) {
+		memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
+	}
+
+	/* reset the promiscuous mode also. */
+	be_rxf_multicast_config(&pnob->fn_obj, false, i,
+				&mac_addr[0][0], NULL, NULL, NULL);
+}
+
+/*
+ * This is the driver entry point to set multicast list
+ * with the device netdev. This function will be used to
+ * set promiscuous mode or multicast promiscuous mode
+ * or multicast mode....
+ */
+static void benet_set_multicast_list(struct net_device *netdev)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+
+	if (netdev->flags & IFF_PROMISC) {
+		be_rxf_promiscuous(&pnob->fn_obj, 1, 1, NULL, NULL, NULL);
+	} else {
+		be_rxf_promiscuous(&pnob->fn_obj, 0, 0, NULL, NULL, NULL);
+		be_set_multicast_filter(netdev);
+	}
+}
+
+int benet_init(struct net_device *netdev)
+{
+	struct be_net_object *pnob = netdev_priv(netdev);
+	struct be_adapter *adapter = pnob->adapter;
+
+	ether_setup(netdev);
+
+	netdev->open = &benet_open;
+	netdev->stop = &benet_close;
+	netdev->hard_start_xmit = &benet_xmit;
+
+	netdev->get_stats = &benet_get_stats;
+
+	netdev->set_multicast_list = &benet_set_multicast_list;
+
+	netdev->change_mtu = &benet_change_mtu;
+	netdev->set_mac_address = &benet_set_mac_addr;
+
+	netdev->vlan_rx_register = benet_vlan_register;
+	netdev->vlan_rx_add_vid = benet_vlan_add_vid;
+	netdev->vlan_rx_kill_vid = benet_vlan_rem_vid;
+
+	netdev->features =
+	    NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
+	    NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM;
+
+	netdev->flags |= IFF_MULTICAST;
+
+	/* If device is DAC Capable, set the HIGHDMA flag for netdevice. */
+	if (adapter->dma_64bit_cap)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+	return 0;
+}

+ 429 - 0
drivers/staging/benet/benet.h

@@ -0,0 +1,429 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#ifndef _BENET_H_
+#define _BENET_H_
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/inet_lro.h>
+#include "hwlib.h"
+
+#define _SA_MODULE_NAME "net-driver"
+
+#define VLAN_VALID_BIT		0x8000
+#define BE_NUM_VLAN_SUPPORTED	32
+#define BE_PORT_LINK_DOWN       0000
+#define BE_PORT_LINK_UP         0001
+#define	BE_MAX_TX_FRAG_COUNT		(30)
+
+/* Flag bits for send operation */
+#define IPCS            (1 << 0)	/* Enable IP checksum offload */
+#define UDPCS           (1 << 1)	/* Enable UDP checksum offload */
+#define TCPCS           (1 << 2)	/* Enable TCP checksum offload */
+#define LSO             (1 << 3)	/* Enable Large Segment  offload */
+#define ETHVLAN         (1 << 4)	/* Enable VLAN insert */
+#define ETHEVENT        (1 << 5)	/* Generate  event on completion */
+#define ETHCOMPLETE     (1 << 6)	/* Generate completion when done */
+#define IPSEC           (1 << 7)	/* Enable IPSEC */
+#define FORWARD         (1 << 8)	/* Send the packet in forwarding path */
+#define FIN             (1 << 9)	/* Issue FIN segment */
+
+#define BE_MAX_MTU	8974
+
+#define BE_MAX_LRO_DESCRIPTORS			8
+#define BE_LRO_MAX_PKTS				64
+#define BE_MAX_FRAGS_PER_FRAME			6
+
+extern const char be_drvr_ver[];
+extern char be_fw_ver[];
+extern char be_driver_name[];
+
+extern struct ethtool_ops be_ethtool_ops;
+
+#define BE_DEV_STATE_NONE 0
+#define BE_DEV_STATE_INIT 1
+#define BE_DEV_STATE_OPEN 2
+#define BE_DEV_STATE_SUSPEND 3
+
+/* This structure is used to describe physical fragments to use
+ * for DMAing data from NIC.
+ */
+struct be_recv_buffer {
+	struct list_head rxb_list;	/* for maintaining a linked list */
+	void *rxb_va;		/* buffer virtual address */
+	u32 rxb_pa_lo;		/* low part of physical address */
+	u32 rxb_pa_hi;		/* high part of physical address */
+	u32 rxb_len;		/* length of recv buffer */
+	void *rxb_ctxt;		/* context for OSM driver to use */
+};
+
+/*
+ * fragment list to describe scattered data.
+ */
+struct be_tx_frag_list {
+	u32 txb_len;		/* Size of this fragment */
+	u32 txb_pa_lo;		/* Lower 32 bits of 64 bit physical addr */
+	u32 txb_pa_hi;		/* Higher 32 bits of 64 bit physical addr */
+};
+
+struct be_rx_page_info {
+	struct page *page;
+	dma_addr_t bus;
+	u16 page_offset;
+};
+
+/*
+ *  This structure is the main tracking structure for a NIC interface.
+ */
+struct be_net_object {
+	/* MCC Ring - used to send fwcmds to embedded ARM processor */
+	struct MCC_WRB_AMAP *mcc_q;	/* VA of the start of the ring */
+	u32 mcc_q_len;			/* # of WRB entries in this ring */
+	u32 mcc_q_size;
+	u32 mcc_q_hd;			/* MCC ring head */
+	u8 mcc_q_created;		/* flag to help cleanup */
+	struct be_mcc_object mcc_q_obj;	/* BECLIB's MCC ring Object */
+	dma_addr_t mcc_q_bus;		/* DMA'ble bus address */
+
+	/* MCC Completion Ring - FW responses to fwcmds sent from MCC ring */
+	struct MCC_CQ_ENTRY_AMAP *mcc_cq; /* VA of the start of the ring */
+	u32 mcc_cq_len;			/* # of compl. entries in this ring */
+	u32 mcc_cq_size;
+	u32 mcc_cq_tl;			/* compl. ring tail */
+	u8 mcc_cq_created;		/* flag to help cleanup */
+	struct be_cq_object mcc_cq_obj;	/* BECLIB's MCC compl. ring object */
+	u32 mcc_cq_id;			/* MCC ring ID */
+	dma_addr_t mcc_cq_bus;		/* DMA'ble bus address */
+
+	struct ring_desc mb_rd;		/* RD for MCC_MAIL_BOX */
+	void *mb_ptr;			/* mailbox ptr to be freed  */
+	dma_addr_t mb_bus;		/* DMA'ble bus address */
+	u32 mb_size;
+
+	/* BEClib uses an array of context objects to track outstanding
+	 * requests to the MCC.  We need allocate the same number of
+	 * conext entries as the number of entries in the MCC WRB ring
+	 */
+	u32 mcc_wrb_ctxt_size;
+	void *mcc_wrb_ctxt;		/* pointer to the context area */
+	u32 mcc_wrb_ctxtLen;		/* Number of entries in the context */
+	/*
+	 * NIC send request ring - used for xmitting raw ether frames.
+	 */
+	struct ETH_WRB_AMAP *tx_q;	/* VA of the start of the ring */
+	u32 tx_q_len;			/* # if entries in the send ring */
+	u32 tx_q_size;
+	u32 tx_q_hd;			/* Head index. Next req. goes here */
+	u32 tx_q_tl;			/* Tail indx. oldest outstanding req. */
+	u8 tx_q_created;		/* flag to help cleanup */
+	struct be_ethsq_object tx_q_obj;/* BECLIB's send Q handle */
+	dma_addr_t tx_q_bus;		/* DMA'ble bus address */
+	u32 tx_q_id;			/* send queue ring ID */
+	u32 tx_q_port;			/* 0 no binding, 1 port A,  2 port B */
+	atomic_t tx_q_used;		/* # of WRBs used */
+	/* ptr to an array in which we store context info for each send req. */
+	void **tx_ctxt;
+	/*
+	 * NIC Send compl. ring - completion status for all NIC frames xmitted.
+	 */
+	struct ETH_TX_COMPL_AMAP *tx_cq;/* VA of start of the ring */
+	u32 txcq_len;			/* # of entries in the ring */
+	u32 tx_cq_size;
+	/*
+	 * index into compl ring where the host expects next completion entry
+	 */
+	u32 tx_cq_tl;
+	u32 tx_cq_id;			/* completion queue id */
+	u8 tx_cq_created;		/* flag to help cleanup */
+	struct be_cq_object tx_cq_obj;
+	dma_addr_t tx_cq_bus;		/* DMA'ble bus address */
+	/*
+	 * Event Queue - all completion entries post events here.
+	 */
+	struct EQ_ENTRY_AMAP *event_q;	/* VA of start of event queue */
+	u32 event_q_len;		/* # of entries */
+	u32 event_q_size;
+	u32 event_q_tl;			/* Tail of the event queue */
+	u32 event_q_id;			/* Event queue ID */
+	u8 event_q_created;		/* flag to help cleanup */
+	struct be_eq_object event_q_obj; /* Queue handle */
+	dma_addr_t event_q_bus;		/* DMA'ble bus address */
+	/*
+	 * NIC receive queue - Data buffers to be used for receiving unicast,
+	 * broadcast and multi-cast frames  are posted here.
+	 */
+	struct ETH_RX_D_AMAP *rx_q;	/* VA of start of the queue */
+	u32 rx_q_len;			/* # of entries */
+	u32 rx_q_size;
+	u32 rx_q_hd;			/* Head of the queue */
+	atomic_t rx_q_posted;		/* number of posted buffers */
+	u32 rx_q_id;			/* queue ID */
+	u8 rx_q_created;		/* flag to help cleanup */
+	struct be_ethrq_object rx_q_obj;	/* NIC RX queue handle */
+	dma_addr_t rx_q_bus;		/* DMA'ble bus address */
+	/*
+	 * Pointer to an array of opaque context object for use by OSM driver
+	 */
+	void **rx_ctxt;
+	/*
+	 * NIC unicast RX completion queue - all unicast ether frame completion
+	 * statuses from BE come here.
+	 */
+	struct ETH_RX_COMPL_AMAP *rx_cq;	/* VA of start of the queue */
+	u32 rx_cq_len;		/* # of entries */
+	u32 rx_cq_size;
+	u32 rx_cq_tl;			/* Tail of the queue */
+	u32 rx_cq_id;			/* queue ID */
+	u8 rx_cq_created;		/* flag to help cleanup */
+	struct be_cq_object rx_cq_obj;	/* queue handle */
+	dma_addr_t rx_cq_bus;		/* DMA'ble bus address */
+	struct be_function_object fn_obj;	/* function object   */
+	bool	fn_obj_created;
+	u32 rx_buf_size;		/* Size of the RX buffers */
+
+	struct net_device *netdev;
+	struct be_recv_buffer eth_rx_bufs[256];	/* to pass Rx buffer
+							   addresses */
+	struct be_adapter *adapter;	/* Pointer to OSM adapter */
+	u32 devno;		/* OSM, network dev no. */
+	u32 use_port;		/* Current active port */
+	struct be_rx_page_info *rx_page_info;	/* Array of Rx buf pages */
+	u32 rx_pg_info_hd;	/* Head of queue */
+	int rxbuf_post_fail;	/* RxBuff posting fail count */
+	bool rx_pg_shared;	/* Is an allocsted page shared as two frags ? */
+	struct vlan_group *vlan_grp;
+	u32 num_vlans;		/* Number of vlans in BE's filter */
+	u16 vlan_tag[BE_NUM_VLAN_SUPPORTED]; /* vlans currently configured */
+	struct napi_struct napi;
+	struct net_lro_mgr lro_mgr;
+	struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
+};
+
+#define NET_FH(np)       (&(np)->fn_obj)
+
+/*
+ * BE driver statistics.
+ */
+struct be_drvr_stat {
+	u32 bes_tx_reqs;	/* number of TX requests initiated */
+	u32 bes_tx_fails;	/* number of TX requests that failed */
+	u32 bes_fwd_reqs;	/* number of send reqs through forwarding i/f */
+	u32 bes_tx_wrbs;	/* number of tx WRBs used */
+
+	u32 bes_ints;		/* number of interrupts */
+	u32 bes_polls;		/* number of times NAPI called poll function */
+	u32 bes_events;		/* total evet entries processed */
+	u32 bes_tx_events;	/* number of tx completion events  */
+	u32 bes_rx_events;	/* number of ucast rx completion events  */
+	u32 bes_tx_compl;	/* number of tx completion entries processed */
+	u32 bes_rx_compl;	/* number of rx completion entries
+				   processed */
+	u32 bes_ethrx_post_fail;	/* number of ethrx buffer alloc
+					   failures */
+	/*
+	 * number of non ether type II frames dropped where
+	 * frame len > length field of Mac Hdr
+	 */
+	u32 bes_802_3_dropped_frames;
+	/*
+	 * number of non ether type II frames malformed where
+	 * in frame len < length field of Mac Hdr
+	 */
+	u32 bes_802_3_malformed_frames;
+	u32 bes_ips;		/*  interrupts / sec */
+	u32 bes_prev_ints;	/* bes_ints at last IPS calculation  */
+	u16 bes_eth_tx_rate;	/*  ETH TX rate - Mb/sec */
+	u16 bes_eth_rx_rate;	/*  ETH RX rate - Mb/sec */
+	u32 bes_rx_coal;	/* Num pkts coalasced */
+	u32 bes_rx_flush;	/* Num times coalasced */
+	u32 bes_link_change_physical;	/*Num of times physical link changed */
+	u32 bes_link_change_virtual;	/*Num of times virtual link changed */
+	u32 bes_rx_misc_pkts;	/* Misc pkts received */
+};
+
+/* Maximum interrupt delay (in microseconds) allowed */
+#define MAX_EQD				120
+
+/*
+ * timer to prevent system shutdown hang for ever if h/w stops responding
+ */
+struct be_timer_ctxt {
+	atomic_t get_stat_flag;
+	struct timer_list get_stats_timer;
+	unsigned long get_stat_sem_addr;
+} ;
+
+/* This structure is the main BladeEngine driver context.  */
+struct be_adapter {
+	struct net_device *netdevp;
+	struct be_drvr_stat be_stat;
+	struct net_device_stats benet_stats;
+
+	/* PCI BAR mapped addresses */
+	u8 __iomem *csr_va;	/* CSR */
+	u8 __iomem *db_va;	/* Door  Bell  */
+	u8 __iomem *pci_va;	/* PCI Config */
+
+	struct tasklet_struct sts_handler;
+	struct timer_list cq_timer;
+	spinlock_t int_lock;	/* to protect the isr field in adapter */
+
+	struct FWCMD_ETH_GET_STATISTICS *eth_statsp;
+	/*
+	 * This will enable the use of ethtool to enable or disable
+	 * Checksum on Rx pkts to be obeyed or disobeyed.
+	 * If this is true = 1, then whatever is the checksum on the
+	 * Received pkt as per BE, it will be given to the stack.
+	 * Else the stack will re calculate it.
+	 */
+	bool rx_csum;
+	/*
+	 * This will enable the use of ethtool to enable or disable
+	 * Coalese on Rx pkts to be obeyed or disobeyed.
+	 * If this is grater than 0 and less than 16 then coalascing
+	 * is enabled else it is disabled
+	 */
+	u32 max_rx_coal;
+	struct pci_dev *pdev;	/* Pointer to OS's PCI dvice */
+
+	spinlock_t txq_lock;	/* to stop/wake queue based on tx_q_used */
+
+	u32 isr;		/* copy of Intr status reg. */
+
+	u32 port0_link_sts;	/* Port 0 link status */
+	u32 port1_link_sts;	/* port 1 list status */
+	struct BE_LINK_STATUS *be_link_sts;
+
+	/* pointer to the first netobject of this adapter */
+	struct be_net_object *net_obj;
+
+	/*  Flags to indicate what to clean up */
+	bool tasklet_started;
+	bool isr_registered;
+	/*
+	 * adaptive interrupt coalescing (AIC) related
+	 */
+	bool enable_aic;	/* 1 if AIC is enabled */
+	u16 min_eqd;		/* minimum EQ delay in usec */
+	u16 max_eqd;		/* minimum EQ delay in usec */
+	u16 cur_eqd;		/* current EQ delay in usec */
+	/*
+	 * book keeping for interrupt / sec and TX/RX rate calculation
+	 */
+	ulong ips_jiffies;	/* jiffies at last IPS calc */
+	u32 eth_tx_bytes;
+	ulong eth_tx_jiffies;
+	u32 eth_rx_bytes;
+	ulong eth_rx_jiffies;
+
+	struct semaphore get_eth_stat_sem;
+
+	/* timer ctxt to prevent shutdown hanging due to un-responsive BE */
+	struct be_timer_ctxt timer_ctxt;
+
+#define BE_MAX_MSIX_VECTORS             32
+#define BE_MAX_REQ_MSIX_VECTORS         1 /* only one EQ in Linux driver */
+	struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
+	bool msix_enabled;
+	bool dma_64bit_cap;	/* the Device DAC capable  or not */
+	u8 dev_state;	/* The current state of the device */
+	u8 dev_pm_state; /* The State of device before going to suspend */
+};
+
+/*
+ * Every second we look at the ints/sec and adjust eq_delay
+ * between adapter->min_eqd and adapter->max_eqd to keep the ints/sec between
+ * IPS_HI_WM and IPS_LO_WM.
+ */
+#define IPS_HI_WM	18000
+#define IPS_LO_WM	8000
+
+
+static inline void index_adv(u32 *index, u32 val,  u32 limit)
+{
+	BUG_ON(limit & (limit-1));
+	*index = (*index + val) & (limit - 1);
+}
+
+static inline void index_inc(u32 *index, u32 limit)
+{
+	BUG_ON(limit & (limit-1));
+	*index = (*index + 1) & (limit - 1);
+}
+
+static inline void be_adv_eq_tl(struct be_net_object *pnob)
+{
+	index_inc(&pnob->event_q_tl, pnob->event_q_len);
+}
+
+static inline void be_adv_txq_hd(struct be_net_object *pnob)
+{
+	index_inc(&pnob->tx_q_hd, pnob->tx_q_len);
+}
+
+static inline void be_adv_txq_tl(struct be_net_object *pnob)
+{
+	index_inc(&pnob->tx_q_tl, pnob->tx_q_len);
+}
+
+static inline void be_adv_txcq_tl(struct be_net_object *pnob)
+{
+	index_inc(&pnob->tx_cq_tl, pnob->txcq_len);
+}
+
+static inline void be_adv_rxq_hd(struct be_net_object *pnob)
+{
+	index_inc(&pnob->rx_q_hd, pnob->rx_q_len);
+}
+
+static inline void be_adv_rxcq_tl(struct be_net_object *pnob)
+{
+	index_inc(&pnob->rx_cq_tl, pnob->rx_cq_len);
+}
+
+static inline u32 tx_compl_lastwrb_idx_get(struct be_net_object *pnob)
+{
+	return (pnob->tx_q_tl + *(u32 *)&pnob->tx_ctxt[pnob->tx_q_tl] - 1)
+		    & (pnob->tx_q_len - 1);
+}
+
+int benet_init(struct net_device *);
+int be_ethtool_ioctl(struct net_device *, struct ifreq *);
+struct net_device_stats *benet_get_stats(struct net_device *);
+void be_process_intr(unsigned long context);
+irqreturn_t be_int(int irq, void *dev);
+void be_post_eth_rx_buffs(struct be_net_object *);
+void be_get_stat_cb(void *, int, struct MCC_WRB_AMAP *);
+void be_get_stats_timer_handler(unsigned long);
+void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *);
+void be_print_link_info(struct BE_LINK_STATUS *);
+void be_update_link_status(struct be_adapter *);
+void be_init_procfs(struct be_adapter *);
+void be_cleanup_procfs(struct be_adapter *);
+int be_poll(struct napi_struct *, int);
+struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *);
+void be_notify_cmpl(struct be_net_object *, int, int, int);
+void be_enable_intr(struct be_net_object *);
+void be_enable_eq_intr(struct be_net_object *);
+void be_disable_intr(struct be_net_object *);
+void be_disable_eq_intr(struct be_net_object *);
+int be_set_uc_mac_adr(struct be_net_object *, u8, u8, u8,
+		    u8 *, mcc_wrb_cqe_callback, void *);
+int be_get_flow_ctl(struct be_function_object *pFnObj, bool *, bool *);
+void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx);
+
+#endif /* _BENET_H_ */

+ 103 - 0
drivers/staging/benet/bestatus.h

@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#ifndef _BESTATUS_H_
+#define _BESTATUS_H_
+
+#define BE_SUCCESS                      (0x00000000L)
+/*
+ * MessageId: BE_PENDING
+ *  The BladeEngine Driver call succeeded, and pended operation.
+ */
+#define BE_PENDING                       (0x20070001L)
+#define BE_STATUS_PENDING                (BE_PENDING)
+/*
+ * MessageId: BE_NOT_OK
+ *  An error occurred.
+ */
+#define BE_NOT_OK                        (0xE0070002L)
+/*
+ * MessageId: BE_STATUS_SYSTEM_RESOURCES
+ *  Insufficient host system resources exist to complete the API.
+ */
+#define BE_STATUS_SYSTEM_RESOURCES       (0xE0070003L)
+/*
+ * MessageId: BE_STATUS_CHIP_RESOURCES
+ *  Insufficient chip resources exist to complete the API.
+ */
+#define BE_STATUS_CHIP_RESOURCES         (0xE0070004L)
+/*
+ * MessageId: BE_STATUS_NO_RESOURCE
+ *  Insufficient resources to complete request.
+ */
+#define BE_STATUS_NO_RESOURCE            (0xE0070005L)
+/*
+ * MessageId: BE_STATUS_BUSY
+ *  Resource is currently busy.
+ */
+#define BE_STATUS_BUSY                   (0xE0070006L)
+/*
+ * MessageId: BE_STATUS_INVALID_PARAMETER
+ *  Invalid Parameter in request.
+ */
+#define BE_STATUS_INVALID_PARAMETER      (0xE0000007L)
+/*
+ * MessageId: BE_STATUS_NOT_SUPPORTED
+ *  Requested operation is not supported.
+ */
+#define BE_STATUS_NOT_SUPPORTED          (0xE000000DL)
+
+/*
+ * ***************************************************************************
+ *                     E T H E R N E T   S T A T U S
+ * ***************************************************************************
+ */
+
+/*
+ * MessageId: BE_ETH_TX_ERROR
+ *  The Ethernet device driver failed to transmit a packet.
+ */
+#define BE_ETH_TX_ERROR                  (0xE0070101L)
+
+/*
+ * ***************************************************************************
+ *                     S H A R E D   S T A T U S
+ * ***************************************************************************
+ */
+
+/*
+ * MessageId: BE_STATUS_VBD_INVALID_VERSION
+ *  The device driver is not compatible with this version of the VBD.
+ */
+#define BE_STATUS_INVALID_VERSION    (0xE0070402L)
+/*
+ * MessageId: BE_STATUS_DOMAIN_DENIED
+ *  The operation failed to complete due to insufficient access
+ *  rights for the requesting domain.
+ */
+#define BE_STATUS_DOMAIN_DENIED          (0xE0070403L)
+/*
+ * MessageId: BE_STATUS_TCP_NOT_STARTED
+ *  The embedded TCP/IP stack has not been started.
+ */
+#define BE_STATUS_TCP_NOT_STARTED        (0xE0070409L)
+/*
+ * MessageId: BE_STATUS_NO_MCC_WRB
+ *  No free MCC WRB are available for posting the request.
+ */
+#define BE_STATUS_NO_MCC_WRB                 (0xE0070414L)
+
+#endif /* _BESTATUS_ */

+ 243 - 0
drivers/staging/benet/cev.h

@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __cev_amap_h__
+#define __cev_amap_h__
+#include "ep.h"
+
+/*
+ * Host Interrupt Status Register 0. The first of four application
+ * interrupt status registers. This register contains the interrupts
+ * for Event Queues EQ0 through EQ31.
+ */
+struct BE_CEV_ISR0_CSR_AMAP {
+	u8 interrupt0;	/* DWORD 0 */
+	u8 interrupt1;	/* DWORD 0 */
+	u8 interrupt2;	/* DWORD 0 */
+	u8 interrupt3;	/* DWORD 0 */
+	u8 interrupt4;	/* DWORD 0 */
+	u8 interrupt5;	/* DWORD 0 */
+	u8 interrupt6;	/* DWORD 0 */
+	u8 interrupt7;	/* DWORD 0 */
+	u8 interrupt8;	/* DWORD 0 */
+	u8 interrupt9;	/* DWORD 0 */
+	u8 interrupt10;	/* DWORD 0 */
+	u8 interrupt11;	/* DWORD 0 */
+	u8 interrupt12;	/* DWORD 0 */
+	u8 interrupt13;	/* DWORD 0 */
+	u8 interrupt14;	/* DWORD 0 */
+	u8 interrupt15;	/* DWORD 0 */
+	u8 interrupt16;	/* DWORD 0 */
+	u8 interrupt17;	/* DWORD 0 */
+	u8 interrupt18;	/* DWORD 0 */
+	u8 interrupt19;	/* DWORD 0 */
+	u8 interrupt20;	/* DWORD 0 */
+	u8 interrupt21;	/* DWORD 0 */
+	u8 interrupt22;	/* DWORD 0 */
+	u8 interrupt23;	/* DWORD 0 */
+	u8 interrupt24;	/* DWORD 0 */
+	u8 interrupt25;	/* DWORD 0 */
+	u8 interrupt26;	/* DWORD 0 */
+	u8 interrupt27;	/* DWORD 0 */
+	u8 interrupt28;	/* DWORD 0 */
+	u8 interrupt29;	/* DWORD 0 */
+	u8 interrupt30;	/* DWORD 0 */
+	u8 interrupt31;	/* DWORD 0 */
+} __packed;
+struct CEV_ISR0_CSR_AMAP {
+	u32 dw[1];
+};
+
+/*
+ * Host Interrupt Status Register 1. The second of four application
+ * interrupt status registers. This register contains the interrupts
+ * for Event Queues EQ32 through EQ63.
+ */
+struct BE_CEV_ISR1_CSR_AMAP {
+	u8 interrupt32;	/* DWORD 0 */
+	u8 interrupt33;	/* DWORD 0 */
+	u8 interrupt34;	/* DWORD 0 */
+	u8 interrupt35;	/* DWORD 0 */
+	u8 interrupt36;	/* DWORD 0 */
+	u8 interrupt37;	/* DWORD 0 */
+	u8 interrupt38;	/* DWORD 0 */
+	u8 interrupt39;	/* DWORD 0 */
+	u8 interrupt40;	/* DWORD 0 */
+	u8 interrupt41;	/* DWORD 0 */
+	u8 interrupt42;	/* DWORD 0 */
+	u8 interrupt43;	/* DWORD 0 */
+	u8 interrupt44;	/* DWORD 0 */
+	u8 interrupt45;	/* DWORD 0 */
+	u8 interrupt46;	/* DWORD 0 */
+	u8 interrupt47;	/* DWORD 0 */
+	u8 interrupt48;	/* DWORD 0 */
+	u8 interrupt49;	/* DWORD 0 */
+	u8 interrupt50;	/* DWORD 0 */
+	u8 interrupt51;	/* DWORD 0 */
+	u8 interrupt52;	/* DWORD 0 */
+	u8 interrupt53;	/* DWORD 0 */
+	u8 interrupt54;	/* DWORD 0 */
+	u8 interrupt55;	/* DWORD 0 */
+	u8 interrupt56;	/* DWORD 0 */
+	u8 interrupt57;	/* DWORD 0 */
+	u8 interrupt58;	/* DWORD 0 */
+	u8 interrupt59;	/* DWORD 0 */
+	u8 interrupt60;	/* DWORD 0 */
+	u8 interrupt61;	/* DWORD 0 */
+	u8 interrupt62;	/* DWORD 0 */
+	u8 interrupt63;	/* DWORD 0 */
+} __packed;
+struct CEV_ISR1_CSR_AMAP {
+	u32 dw[1];
+};
+/*
+ * Host Interrupt Status Register 2. The third of four application
+ * interrupt status registers. This register contains the interrupts
+ * for Event Queues EQ64 through EQ95.
+ */
+struct BE_CEV_ISR2_CSR_AMAP {
+	u8 interrupt64;	/* DWORD 0 */
+	u8 interrupt65;	/* DWORD 0 */
+	u8 interrupt66;	/* DWORD 0 */
+	u8 interrupt67;	/* DWORD 0 */
+	u8 interrupt68;	/* DWORD 0 */
+	u8 interrupt69;	/* DWORD 0 */
+	u8 interrupt70;	/* DWORD 0 */
+	u8 interrupt71;	/* DWORD 0 */
+	u8 interrupt72;	/* DWORD 0 */
+	u8 interrupt73;	/* DWORD 0 */
+	u8 interrupt74;	/* DWORD 0 */
+	u8 interrupt75;	/* DWORD 0 */
+	u8 interrupt76;	/* DWORD 0 */
+	u8 interrupt77;	/* DWORD 0 */
+	u8 interrupt78;	/* DWORD 0 */
+	u8 interrupt79;	/* DWORD 0 */
+	u8 interrupt80;	/* DWORD 0 */
+	u8 interrupt81;	/* DWORD 0 */
+	u8 interrupt82;	/* DWORD 0 */
+	u8 interrupt83;	/* DWORD 0 */
+	u8 interrupt84;	/* DWORD 0 */
+	u8 interrupt85;	/* DWORD 0 */
+	u8 interrupt86;	/* DWORD 0 */
+	u8 interrupt87;	/* DWORD 0 */
+	u8 interrupt88;	/* DWORD 0 */
+	u8 interrupt89;	/* DWORD 0 */
+	u8 interrupt90;	/* DWORD 0 */
+	u8 interrupt91;	/* DWORD 0 */
+	u8 interrupt92;	/* DWORD 0 */
+	u8 interrupt93;	/* DWORD 0 */
+	u8 interrupt94;	/* DWORD 0 */
+	u8 interrupt95;	/* DWORD 0 */
+} __packed;
+struct CEV_ISR2_CSR_AMAP {
+	u32 dw[1];
+};
+
+/*
+ * Host Interrupt Status Register 3. The fourth of four application
+ * interrupt status registers. This register contains the interrupts
+ * for Event Queues EQ96 through EQ127.
+ */
+struct BE_CEV_ISR3_CSR_AMAP {
+	u8 interrupt96;	/* DWORD 0 */
+	u8 interrupt97;	/* DWORD 0 */
+	u8 interrupt98;	/* DWORD 0 */
+	u8 interrupt99;	/* DWORD 0 */
+	u8 interrupt100;	/* DWORD 0 */
+	u8 interrupt101;	/* DWORD 0 */
+	u8 interrupt102;	/* DWORD 0 */
+	u8 interrupt103;	/* DWORD 0 */
+	u8 interrupt104;	/* DWORD 0 */
+	u8 interrupt105;	/* DWORD 0 */
+	u8 interrupt106;	/* DWORD 0 */
+	u8 interrupt107;	/* DWORD 0 */
+	u8 interrupt108;	/* DWORD 0 */
+	u8 interrupt109;	/* DWORD 0 */
+	u8 interrupt110;	/* DWORD 0 */
+	u8 interrupt111;	/* DWORD 0 */
+	u8 interrupt112;	/* DWORD 0 */
+	u8 interrupt113;	/* DWORD 0 */
+	u8 interrupt114;	/* DWORD 0 */
+	u8 interrupt115;	/* DWORD 0 */
+	u8 interrupt116;	/* DWORD 0 */
+	u8 interrupt117;	/* DWORD 0 */
+	u8 interrupt118;	/* DWORD 0 */
+	u8 interrupt119;	/* DWORD 0 */
+	u8 interrupt120;	/* DWORD 0 */
+	u8 interrupt121;	/* DWORD 0 */
+	u8 interrupt122;	/* DWORD 0 */
+	u8 interrupt123;	/* DWORD 0 */
+	u8 interrupt124;	/* DWORD 0 */
+	u8 interrupt125;	/* DWORD 0 */
+	u8 interrupt126;	/* DWORD 0 */
+	u8 interrupt127;	/* DWORD 0 */
+} __packed;
+struct CEV_ISR3_CSR_AMAP {
+	u32 dw[1];
+};
+
+/*  Completions and Events block Registers.  */
+struct BE_CEV_CSRMAP_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+	u8 rsvd1[32];	/* DWORD 1 */
+	u8 rsvd2[32];	/* DWORD 2 */
+	u8 rsvd3[32];	/* DWORD 3 */
+	struct BE_CEV_ISR0_CSR_AMAP isr0;
+	struct BE_CEV_ISR1_CSR_AMAP isr1;
+	struct BE_CEV_ISR2_CSR_AMAP isr2;
+	struct BE_CEV_ISR3_CSR_AMAP isr3;
+	u8 rsvd4[32];	/* DWORD 8 */
+	u8 rsvd5[32];	/* DWORD 9 */
+	u8 rsvd6[32];	/* DWORD 10 */
+	u8 rsvd7[32];	/* DWORD 11 */
+	u8 rsvd8[32];	/* DWORD 12 */
+	u8 rsvd9[32];	/* DWORD 13 */
+	u8 rsvd10[32];	/* DWORD 14 */
+	u8 rsvd11[32];	/* DWORD 15 */
+	u8 rsvd12[32];	/* DWORD 16 */
+	u8 rsvd13[32];	/* DWORD 17 */
+	u8 rsvd14[32];	/* DWORD 18 */
+	u8 rsvd15[32];	/* DWORD 19 */
+	u8 rsvd16[32];	/* DWORD 20 */
+	u8 rsvd17[32];	/* DWORD 21 */
+	u8 rsvd18[32];	/* DWORD 22 */
+	u8 rsvd19[32];	/* DWORD 23 */
+	u8 rsvd20[32];	/* DWORD 24 */
+	u8 rsvd21[32];	/* DWORD 25 */
+	u8 rsvd22[32];	/* DWORD 26 */
+	u8 rsvd23[32];	/* DWORD 27 */
+	u8 rsvd24[32];	/* DWORD 28 */
+	u8 rsvd25[32];	/* DWORD 29 */
+	u8 rsvd26[32];	/* DWORD 30 */
+	u8 rsvd27[32];	/* DWORD 31 */
+	u8 rsvd28[32];	/* DWORD 32 */
+	u8 rsvd29[32];	/* DWORD 33 */
+	u8 rsvd30[192];	/* DWORD 34 */
+	u8 rsvd31[192];	/* DWORD 40 */
+	u8 rsvd32[160];	/* DWORD 46 */
+	u8 rsvd33[160];	/* DWORD 51 */
+	u8 rsvd34[160];	/* DWORD 56 */
+	u8 rsvd35[96];	/* DWORD 61 */
+	u8 rsvd36[192][32];	/* DWORD 64 */
+} __packed;
+struct CEV_CSRMAP_AMAP {
+	u32 dw[256];
+};
+
+#endif /* __cev_amap_h__ */

+ 211 - 0
drivers/staging/benet/cq.c

@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include "hwlib.h"
+#include "bestatus.h"
+
+/*
+ * Completion Queue Objects
+ */
+/*
+ *============================================================================
+ *                  P U B L I C  R O U T I N E S
+ *============================================================================
+ */
+
+/*
+    This routine creates a completion queue based on the client completion
+    queue configuration information.
+
+
+    FunctionObject      - Handle to a function object
+    CqBaseVa            - Base VA for a the CQ ring
+    NumEntries          - CEV_CQ_CNT_* values
+    solEventEnable      - 0 = All CQEs can generate Events if CQ is eventable
+			1 = only CQEs with solicited bit set are eventable
+    eventable           - Eventable CQ, generates interrupts.
+    nodelay             - 1 = Force interrupt, relevent if CQ eventable.
+			Interrupt is asserted immediately after EQE
+			write is confirmed, regardless of EQ Timer
+			or watermark settings.
+    wme                 - Enable watermark based coalescing
+    wmThresh            - High watermark(CQ fullness at which event
+			or interrupt should be asserted).  These are the
+			CEV_WATERMARK encoded values.
+    EqObject            - EQ Handle to assign to this CQ
+    ppCqObject          - Internal CQ Handle returned.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful error code is
+	returned.
+
+    IRQL < DISPATCH_LEVEL
+
+*/
+int be_cq_create(struct be_function_object *pfob,
+	struct ring_desc *rd, u32 length, bool solicited_eventable,
+	bool no_delay, u32 wm_thresh,
+	struct be_eq_object *eq_object, struct be_cq_object *cq_object)
+{
+	int status = BE_SUCCESS;
+	u32 num_entries_encoding;
+	u32 num_entries = length / sizeof(struct MCC_CQ_ENTRY_AMAP);
+	struct FWCMD_COMMON_CQ_CREATE *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	u32 n;
+	unsigned long irql;
+
+	ASSERT(rd);
+	ASSERT(cq_object);
+	ASSERT(length % sizeof(struct MCC_CQ_ENTRY_AMAP) == 0);
+
+	switch (num_entries) {
+	case 256:
+		num_entries_encoding = CEV_CQ_CNT_256;
+		break;
+	case 512:
+		num_entries_encoding = CEV_CQ_CNT_512;
+		break;
+	case 1024:
+		num_entries_encoding = CEV_CQ_CNT_1024;
+		break;
+	default:
+		ASSERT(0);
+		return BE_STATUS_INVALID_PARAMETER;
+	}
+
+	/*
+	 * All cq entries all the same size.  Use iSCSI version
+	 * as a test for the proper rd length.
+	 */
+	memset(cq_object, 0, sizeof(*cq_object));
+
+	atomic_set(&cq_object->ref_count, 0);
+	cq_object->parent_function = pfob;
+	cq_object->eq_object = eq_object;
+	cq_object->num_entries = num_entries;
+	/* save for MCC cq processing */
+	cq_object->va = rd->va;
+
+	/* map into UT. */
+	length = num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP);
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		ASSERT(wrb);
+		TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto Error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_CQ_CREATE);
+
+	fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
+									length);
+
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
+	n = pfob->pci_function_number;
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, Func, &fwcmd->params.request.context, n);
+
+	n = (eq_object != NULL);
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, Eventable,
+				&fwcmd->params.request.context, n);
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, Armed, &fwcmd->params.request.context, 1);
+
+	n = eq_object ? eq_object->eq_id : 0;
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, EQID, &fwcmd->params.request.context, n);
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, Count,
+			&fwcmd->params.request.context, num_entries_encoding);
+
+	n = 0; /* Protection Domain is always 0 in  Linux  driver */
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, PD, &fwcmd->params.request.context, n);
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, NoDelay,
+				&fwcmd->params.request.context, no_delay);
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, SolEvent,
+			&fwcmd->params.request.context, solicited_eventable);
+
+	n = (wm_thresh != 0xFFFFFFFF);
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, WME, &fwcmd->params.request.context, n);
+
+	n = (n ? wm_thresh : 0);
+	AMAP_SET_BITS_PTR(CQ_CONTEXT, Watermark,
+				&fwcmd->params.request.context, n);
+	/* Create a page list for the FWCMD. */
+	be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+			  ARRAY_SIZE(fwcmd->params.request.pages));
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+			NULL, NULL, fwcmd, NULL);
+	if (status != BE_SUCCESS) {
+		TRACE(DL_ERR, "MCC to create CQ failed.");
+		goto Error;
+	}
+	/* Remember the CQ id. */
+	cq_object->cq_id = fwcmd->params.response.cq_id;
+
+	/* insert this cq into eq_object reference */
+	if (eq_object) {
+		atomic_inc(&eq_object->ref_count);
+		list_add_tail(&cq_object->cqlist_for_eq,
+					&eq_object->cq_list_head);
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+
+    Deferences the given object. Once the object's reference count drops to
+    zero, the object is destroyed and all resources that are held by this object
+    are released.  The on-chip context is also destroyed along with the queue
+    ID, and any mappings made into the UT.
+
+    cq_object            - CQ handle returned from cq_object_create.
+
+    returns the current reference count on the object
+
+    IRQL: IRQL < DISPATCH_LEVEL
+*/
+int be_cq_destroy(struct be_cq_object *cq_object)
+{
+	int status = 0;
+
+	/* Nothing should reference this CQ at this point. */
+	ASSERT(atomic_read(&cq_object->ref_count) == 0);
+
+	/* Send fwcmd to destroy the CQ. */
+	status = be_function_ring_destroy(cq_object->parent_function,
+		     cq_object->cq_id, FWCMD_RING_TYPE_CQ,
+					NULL, NULL, NULL, NULL);
+	ASSERT(status == 0);
+
+	/* Remove reference if this is an eventable CQ. */
+	if (cq_object->eq_object) {
+		atomic_dec(&cq_object->eq_object->ref_count);
+		list_del(&cq_object->cqlist_for_eq);
+	}
+	return BE_SUCCESS;
+}
+

+ 71 - 0
drivers/staging/benet/descriptors.h

@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __descriptors_amap_h__
+#define __descriptors_amap_h__
+
+/*
+ * --- IPC_NODE_ID_ENUM ---
+ * IPC processor id values
+ */
+#define TPOST_NODE_ID                   (0)	/* TPOST ID */
+#define TPRE_NODE_ID                    (1)	/* TPRE ID */
+#define TXULP0_NODE_ID                  (2)	/* TXULP0 ID */
+#define TXULP1_NODE_ID                  (3)	/* TXULP1 ID */
+#define TXULP2_NODE_ID                  (4)	/* TXULP2 ID */
+#define RXULP0_NODE_ID                  (5)	/* RXULP0 ID */
+#define RXULP1_NODE_ID                  (6)	/* RXULP1 ID */
+#define RXULP2_NODE_ID                  (7)	/* RXULP2 ID */
+#define MPU_NODE_ID                     (15)	/* MPU ID */
+
+/*
+ * --- MAC_ID_ENUM ---
+ * Meaning of the mac_id field in rxpp_eth_d
+ */
+#define PORT0_HOST_MAC0    (0)  /* PD 0, Port 0, host networking, MAC 0. */
+#define PORT0_HOST_MAC1    (1)	/* PD 0, Port 0, host networking, MAC 1. */
+#define PORT0_STORAGE_MAC0 (2)	/* PD 0, Port 0, host storage, MAC 0. */
+#define PORT0_STORAGE_MAC1 (3)	/* PD 0, Port 0, host storage, MAC 1. */
+#define PORT1_HOST_MAC0    (4)	/* PD 0, Port 1 host networking, MAC 0. */
+#define PORT1_HOST_MAC1    (5)	/* PD 0, Port 1 host networking, MAC 1. */
+#define PORT1_STORAGE_MAC0 (6)	/* PD 0, Port 1 host storage, MAC 0. */
+#define PORT1_STORAGE_MAC1 (7)	/* PD 0, Port 1 host storage, MAC 1. */
+#define FIRST_VM_MAC       (8)	/* PD 1 MAC. Protection domains have IDs */
+				/* from 0x8-0x26, one per PD. */
+#define LAST_VM_MAC        (38)	/* PD 31 MAC. */
+#define MGMT_MAC           (39)	/* Management port MAC. */
+#define MARBLE_MAC0        (59)	/* Used for flushing function 0 receive */
+				  /*
+				   * queues before re-using a torn-down
+				   * receive ring. the DA =
+				   * 00-00-00-00-00-00, and the MSB of the
+				   * SA = 00
+				   */
+#define MARBLE_MAC1        (60)	/* Used for flushing function 1 receive */
+				  /*
+				   * queues before re-using a torn-down
+				   * receive ring. the DA =
+				   * 00-00-00-00-00-00, and the MSB of the
+				   * SA != 00
+				   */
+#define NULL_MAC           (61)	/* Promiscuous mode, indicates no match */
+#define MCAST_MAC          (62)	/* Multicast match. */
+#define BCAST_MATCH        (63)	/* Broadcast match. */
+
+#endif /* __descriptors_amap_h__ */

+ 179 - 0
drivers/staging/benet/doorbells.h

@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __doorbells_amap_h__
+#define __doorbells_amap_h__
+
+/* The TX/RDMA send queue doorbell. */
+struct BE_SQ_DB_AMAP {
+	u8 cid[11];		/* DWORD 0 */
+	u8 rsvd0[5];	/* DWORD 0 */
+	u8 numPosted[14];	/* DWORD 0 */
+	u8 rsvd1[2];	/* DWORD 0 */
+} __packed;
+struct SQ_DB_AMAP {
+	u32 dw[1];
+};
+
+/* The receive queue doorbell. */
+struct BE_RQ_DB_AMAP {
+	u8 rq[10];		/* DWORD 0 */
+	u8 rsvd0[13];	/* DWORD 0 */
+	u8 Invalidate;	/* DWORD 0 */
+	u8 numPosted[8];	/* DWORD 0 */
+} __packed;
+struct RQ_DB_AMAP {
+	u32 dw[1];
+};
+
+/*
+ * The CQ/EQ doorbell. Software MUST set reserved fields in this
+ * descriptor to zero, otherwise (CEV) hardware will not execute the
+ * doorbell (flagging a bad_db_qid error instead).
+ */
+struct BE_CQ_DB_AMAP {
+	u8 qid[10];		/* DWORD 0 */
+	u8 rsvd0[4];	/* DWORD 0 */
+	u8 rearm;		/* DWORD 0 */
+	u8 event;		/* DWORD 0 */
+	u8 num_popped[13];	/* DWORD 0 */
+	u8 rsvd1[3];	/* DWORD 0 */
+} __packed;
+struct CQ_DB_AMAP {
+	u32 dw[1];
+};
+
+struct BE_TPM_RQ_DB_AMAP {
+	u8 qid[10];		/* DWORD 0 */
+	u8 rsvd0[6];	/* DWORD 0 */
+	u8 numPosted[11];	/* DWORD 0 */
+	u8 mss_cnt[5];	/* DWORD 0 */
+} __packed;
+struct TPM_RQ_DB_AMAP {
+	u32 dw[1];
+};
+
+/*
+ * Post WRB Queue Doorbell Register used by the host Storage stack
+ * to notify the controller of a posted Work Request Block
+ */
+struct BE_WRB_POST_DB_AMAP {
+	u8 wrb_cid[10];	/* DWORD 0 */
+	u8 rsvd0[6];	/* DWORD 0 */
+	u8 wrb_index[8];	/* DWORD 0 */
+	u8 numberPosted[8];	/* DWORD 0 */
+} __packed;
+struct WRB_POST_DB_AMAP {
+	u32 dw[1];
+};
+
+/*
+ * Update Default PDU Queue Doorbell Register used to communicate
+ * to the controller that the driver has stopped processing the queue
+ * and where in the queue it stopped, this is
+ * a CQ Entry Type. Used by storage driver.
+ */
+struct BE_DEFAULT_PDU_DB_AMAP {
+	u8 qid[10];		/* DWORD 0 */
+	u8 rsvd0[4];	/* DWORD 0 */
+	u8 rearm;		/* DWORD 0 */
+	u8 event;		/* DWORD 0 */
+	u8 cqproc[14];	/* DWORD 0 */
+	u8 rsvd1[2];	/* DWORD 0 */
+} __packed;
+struct DEFAULT_PDU_DB_AMAP {
+	u32 dw[1];
+};
+
+/* Management Command and Controller default fragment ring */
+struct BE_MCC_DB_AMAP {
+	u8 rid[11];		/* DWORD 0 */
+	u8 rsvd0[5];	/* DWORD 0 */
+	u8 numPosted[14];	/* DWORD 0 */
+	u8 rsvd1[2];	/* DWORD 0 */
+} __packed;
+struct MCC_DB_AMAP {
+	u32 dw[1];
+};
+
+/*
+ * Used for bootstrapping the Host interface. This register is
+ * used for driver communication with the MPU when no MCC Rings exist.
+ * The software must write this register twice to post any MCC
+ * command. First, it writes the register with hi=1 and the upper bits of
+ * the  physical address for the MCC_MAILBOX structure.  Software must poll
+ * the ready bit until this is acknowledged.  Then, sotware writes the
+ * register with hi=0 with the lower bits in the address.  It must
+ * poll the ready bit until the MCC command is complete.  Upon completion,
+ * the MCC_MAILBOX will contain a valid completion queue  entry.
+ */
+struct BE_MPU_MAILBOX_DB_AMAP {
+	u8 ready;		/* DWORD 0 */
+	u8 hi;		/* DWORD 0 */
+	u8 address[30];	/* DWORD 0 */
+} __packed;
+struct MPU_MAILBOX_DB_AMAP {
+	u32 dw[1];
+};
+
+/*
+ *  This is the protection domain doorbell register map. Note that
+ *  while this map shows doorbells for all Blade Engine supported
+ *  protocols, not all of these may be valid in a given function or
+ *  protection domain. It is the responsibility of the application
+ *  accessing the doorbells to know which are valid. Each doorbell
+ *  occupies 32 bytes of space, but unless otherwise specified,
+ *  only the first 4 bytes should be written.  There are 32 instances
+ *  of these doorbells for the host and 31 virtual machines respectively.
+ *  The host and VMs will only map the doorbell pages belonging to its
+ *  protection domain. It will not be able to touch the doorbells for
+ *  another VM. The doorbells are the only registers directly accessible
+ *  by a virtual machine. Similarly, there are 511 additional
+ *  doorbells for RDMA protection domains. PD 0 for RDMA shares
+ *  the same physical protection domain doorbell page as ETH/iSCSI.
+ *
+ */
+struct BE_PROTECTION_DOMAIN_DBMAP_AMAP {
+	u8 rsvd0[512];	/* DWORD 0 */
+	struct BE_SQ_DB_AMAP rdma_sq_db;
+	u8 rsvd1[7][32];	/* DWORD 17 */
+	struct BE_WRB_POST_DB_AMAP iscsi_wrb_post_db;
+	u8 rsvd2[7][32];	/* DWORD 25 */
+	struct BE_SQ_DB_AMAP etx_sq_db;
+	u8 rsvd3[7][32];	/* DWORD 33 */
+	struct BE_RQ_DB_AMAP rdma_rq_db;
+	u8 rsvd4[7][32];	/* DWORD 41 */
+	struct BE_DEFAULT_PDU_DB_AMAP iscsi_default_pdu_db;
+	u8 rsvd5[7][32];	/* DWORD 49 */
+	struct BE_TPM_RQ_DB_AMAP tpm_rq_db;
+	u8 rsvd6[7][32];	/* DWORD 57 */
+	struct BE_RQ_DB_AMAP erx_rq_db;
+	u8 rsvd7[7][32];	/* DWORD 65 */
+	struct BE_CQ_DB_AMAP cq_db;
+	u8 rsvd8[7][32];	/* DWORD 73 */
+	struct BE_MCC_DB_AMAP mpu_mcc_db;
+	u8 rsvd9[7][32];	/* DWORD 81 */
+	struct BE_MPU_MAILBOX_DB_AMAP mcc_bootstrap_db;
+	u8 rsvd10[935][32];	/* DWORD 89 */
+} __packed;
+struct PROTECTION_DOMAIN_DBMAP_AMAP {
+	u32 dw[1024];
+};
+
+#endif /* __doorbells_amap_h__ */

+ 66 - 0
drivers/staging/benet/ep.h

@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __ep_amap_h__
+#define __ep_amap_h__
+
+/* General Control and Status Register. */
+struct BE_EP_CONTROL_CSR_AMAP {
+	u8 m0_RxPbuf;	/* DWORD 0 */
+	u8 m1_RxPbuf;	/* DWORD 0 */
+	u8 m2_RxPbuf;	/* DWORD 0 */
+	u8 ff_en;		/* DWORD 0 */
+	u8 rsvd0[27];	/* DWORD 0 */
+	u8 CPU_reset;	/* DWORD 0 */
+} __packed;
+struct EP_CONTROL_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Semaphore Register. */
+struct BE_EP_SEMAPHORE_CSR_AMAP {
+	u8 value[32];	/* DWORD 0 */
+} __packed;
+struct EP_SEMAPHORE_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Embedded Processor Specific Registers. */
+struct BE_EP_CSRMAP_AMAP {
+	struct BE_EP_CONTROL_CSR_AMAP ep_control;
+	u8 rsvd0[32];	/* DWORD 1 */
+	u8 rsvd1[32];	/* DWORD 2 */
+	u8 rsvd2[32];	/* DWORD 3 */
+	u8 rsvd3[32];	/* DWORD 4 */
+	u8 rsvd4[32];	/* DWORD 5 */
+	u8 rsvd5[8][128];	/* DWORD 6 */
+	u8 rsvd6[32];	/* DWORD 38 */
+	u8 rsvd7[32];	/* DWORD 39 */
+	u8 rsvd8[32];	/* DWORD 40 */
+	u8 rsvd9[32];	/* DWORD 41 */
+	u8 rsvd10[32];	/* DWORD 42 */
+	struct BE_EP_SEMAPHORE_CSR_AMAP ep_semaphore;
+	u8 rsvd11[32];	/* DWORD 44 */
+	u8 rsvd12[19][32];	/* DWORD 45 */
+} __packed;
+struct EP_CSRMAP_AMAP {
+	u32 dw[64];
+};
+
+#endif /* __ep_amap_h__ */

+ 299 - 0
drivers/staging/benet/eq.c

@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include "hwlib.h"
+#include "bestatus.h"
+/*
+    This routine creates an event queue based on the client completion
+    queue configuration information.
+
+    FunctionObject      - Handle to a function object
+    EqBaseVa            - Base VA for a the EQ ring
+    SizeEncoding        - The encoded size for the EQ entries. This value is
+			either CEV_EQ_SIZE_4 or CEV_EQ_SIZE_16
+    NumEntries          - CEV_CQ_CNT_* values.
+    Watermark           - Enables watermark based coalescing.  This parameter
+			must be of the type CEV_WMARK_* if watermarks
+			are enabled.  If watermarks to to be disabled
+			this value should be-1.
+    TimerDelay          - If a timer delay is enabled this value should be the
+			time of the delay in 8 microsecond units.  If
+			delays are not used this parameter should be
+			set to -1.
+    ppEqObject          - Internal EQ Handle returned.
+
+    Returns BE_SUCCESS if successfull,, otherwise a useful error code
+	is returned.
+
+    IRQL < DISPATCH_LEVEL
+*/
+int
+be_eq_create(struct be_function_object *pfob,
+		struct ring_desc *rd, u32 eqe_size, u32 num_entries,
+		u32 watermark,	/* CEV_WMARK_* or -1 */
+		u32 timer_delay,	/* in 8us units, or -1 */
+		struct be_eq_object *eq_object)
+{
+	int status = BE_SUCCESS;
+	u32 num_entries_encoding, eqe_size_encoding, length;
+	struct FWCMD_COMMON_EQ_CREATE *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	u32 n;
+	unsigned long irql;
+
+	ASSERT(rd);
+	ASSERT(eq_object);
+
+	switch (num_entries) {
+	case 256:
+		num_entries_encoding = CEV_EQ_CNT_256;
+		break;
+	case 512:
+		num_entries_encoding = CEV_EQ_CNT_512;
+		break;
+	case 1024:
+		num_entries_encoding = CEV_EQ_CNT_1024;
+		break;
+	case 2048:
+		num_entries_encoding = CEV_EQ_CNT_2048;
+		break;
+	case 4096:
+		num_entries_encoding = CEV_EQ_CNT_4096;
+		break;
+	default:
+		ASSERT(0);
+		return BE_STATUS_INVALID_PARAMETER;
+	}
+
+	switch (eqe_size) {
+	case 4:
+		eqe_size_encoding = CEV_EQ_SIZE_4;
+		break;
+	case 16:
+		eqe_size_encoding = CEV_EQ_SIZE_16;
+		break;
+	default:
+		ASSERT(0);
+		return BE_STATUS_INVALID_PARAMETER;
+	}
+
+	if ((eqe_size == 4 && num_entries < 1024) ||
+	    (eqe_size == 16 && num_entries == 4096)) {
+		TRACE(DL_ERR, "Bad EQ size. eqe_size:%d num_entries:%d",
+		      eqe_size, num_entries);
+		ASSERT(0);
+		return BE_STATUS_INVALID_PARAMETER;
+	}
+
+	memset(eq_object, 0, sizeof(*eq_object));
+
+	atomic_set(&eq_object->ref_count, 0);
+	eq_object->parent_function = pfob;
+	eq_object->eq_id = 0xFFFFFFFF;
+
+	INIT_LIST_HEAD(&eq_object->cq_list_head);
+
+	length = num_entries * eqe_size;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		ASSERT(wrb);
+		TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto Error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_EQ_CREATE);
+
+	fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
+									length);
+	n = pfob->pci_function_number;
+	AMAP_SET_BITS_PTR(EQ_CONTEXT, Func, &fwcmd->params.request.context, n);
+
+	AMAP_SET_BITS_PTR(EQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
+
+	AMAP_SET_BITS_PTR(EQ_CONTEXT, Size,
+			&fwcmd->params.request.context, eqe_size_encoding);
+
+	n = 0; /* Protection Domain is always 0 in  Linux  driver */
+	AMAP_SET_BITS_PTR(EQ_CONTEXT, PD, &fwcmd->params.request.context, n);
+
+	/* Let the caller ARM the EQ with the doorbell. */
+	AMAP_SET_BITS_PTR(EQ_CONTEXT, Armed, &fwcmd->params.request.context, 0);
+
+	AMAP_SET_BITS_PTR(EQ_CONTEXT, Count, &fwcmd->params.request.context,
+					num_entries_encoding);
+
+	n = pfob->pci_function_number * 32;
+	AMAP_SET_BITS_PTR(EQ_CONTEXT, EventVect,
+				&fwcmd->params.request.context, n);
+	if (watermark != -1) {
+		AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
+				&fwcmd->params.request.context, 1);
+		AMAP_SET_BITS_PTR(EQ_CONTEXT, Watermark,
+				&fwcmd->params.request.context, watermark);
+		ASSERT(watermark <= CEV_WMARK_240);
+	} else
+		AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
+					&fwcmd->params.request.context, 0);
+	if (timer_delay != -1) {
+		AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
+					&fwcmd->params.request.context, 1);
+
+		ASSERT(timer_delay <= 250);	/* max value according to EAS */
+		timer_delay = min(timer_delay, (u32)250);
+
+		AMAP_SET_BITS_PTR(EQ_CONTEXT, Delay,
+				&fwcmd->params.request.context, timer_delay);
+	} else {
+		AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
+				&fwcmd->params.request.context, 0);
+	}
+	/* Create a page list for the FWCMD. */
+	be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+			  ARRAY_SIZE(fwcmd->params.request.pages));
+
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+					NULL, NULL, fwcmd, NULL);
+	if (status != BE_SUCCESS) {
+		TRACE(DL_ERR, "MCC to create EQ failed.");
+		goto Error;
+	}
+	/* Get the EQ id.  The MPU allocates the IDs. */
+	eq_object->eq_id = fwcmd->params.response.eq_id;
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+    Deferences the given object. Once the object's reference count drops to
+    zero, the object is destroyed and all resources that are held by this
+    object are released.  The on-chip context is also destroyed along with
+    the queue ID, and any mappings made into the UT.
+
+    eq_object            - EQ handle returned from eq_object_create.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful error code
+	is returned.
+
+    IRQL: IRQL < DISPATCH_LEVEL
+*/
+int be_eq_destroy(struct be_eq_object *eq_object)
+{
+	int status = 0;
+
+	ASSERT(atomic_read(&eq_object->ref_count) == 0);
+	/* no CQs should reference this EQ now */
+	ASSERT(list_empty(&eq_object->cq_list_head));
+
+	/* Send fwcmd to destroy the EQ. */
+	status = be_function_ring_destroy(eq_object->parent_function,
+			     eq_object->eq_id, FWCMD_RING_TYPE_EQ,
+					NULL, NULL, NULL, NULL);
+	ASSERT(status == 0);
+
+	return BE_SUCCESS;
+}
+/*
+ *---------------------------------------------------------------------------
+ * Function: be_eq_modify_delay
+ *   Changes the EQ delay for a group of EQs.
+ * num_eq             - The number of EQs in the eq_array to adjust.
+ * 			This also is the number of delay values in
+ * 			the eq_delay_array.
+ * eq_array           - Array of struct be_eq_object pointers to adjust.
+ * eq_delay_array     - Array of "num_eq" timer delays in units
+ * 			of microseconds. The be_eq_query_delay_range
+ * 			fwcmd returns the resolution and range of
+ *                      legal EQ delays.
+ * cb           -
+ * cb_context   -
+ * q_ctxt             - Optional. Pointer to a previously allocated
+ * 			struct. If the MCC WRB ring is full, this
+ * 			structure is used to queue the operation. It
+ *                      will be posted to the MCC ring when space
+ *                      becomes available. All queued commands will
+ *                      be posted to the ring in the order they are
+ *                      received. It is always valid to pass a pointer to
+ *                      a generic be_generic_q_cntxt. However,
+ *                      the specific context structs
+ *                      are generally smaller than the generic struct.
+ * return pend_status - BE_SUCCESS (0) on success.
+ * 			BE_PENDING (postive value) if the FWCMD
+ *                      completion is pending. Negative error code on failure.
+ *-------------------------------------------------------------------------
+ */
+int
+be_eq_modify_delay(struct be_function_object *pfob,
+		   u32 num_eq, struct be_eq_object **eq_array,
+		   u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
+		   void *cb_context, struct be_eq_modify_delay_q_ctxt *q_ctxt)
+{
+	struct FWCMD_COMMON_MODIFY_EQ_DELAY *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	struct be_generic_q_ctxt *gen_ctxt = NULL;
+	u32 i;
+	unsigned long irql;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		if (q_ctxt && cb) {
+			wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+			gen_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+			gen_ctxt->context.bytes = sizeof(*q_ctxt);
+		} else {
+			status = BE_STATUS_NO_MCC_WRB;
+			goto Error;
+		}
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MODIFY_EQ_DELAY);
+
+	ASSERT(num_eq > 0);
+	ASSERT(num_eq <= ARRAY_SIZE(fwcmd->params.request.delay));
+	fwcmd->params.request.num_eq = num_eq;
+	for (i = 0; i < num_eq; i++) {
+		fwcmd->params.request.delay[i].eq_id = eq_array[i]->eq_id;
+		fwcmd->params.request.delay[i].delay_in_microseconds =
+		    eq_delay_array[i];
+	}
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, gen_ctxt,
+			cb, cb_context, NULL, NULL, fwcmd, NULL);
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+

+ 1273 - 0
drivers/staging/benet/eth.c

@@ -0,0 +1,1273 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include <linux/if_ether.h>
+#include "hwlib.h"
+#include "bestatus.h"
+
+/*
+ *---------------------------------------------------------
+ * Function: be_eth_sq_create_ex
+ *   Creates an ethernet send ring - extended version with
+ *   additional parameters.
+ * pfob -
+ * rd             - ring address
+ * length_in_bytes -
+ * type            - The type of ring to create.
+ * ulp             - The requested ULP number for the ring.
+ * 		     This should be zero based, i.e. 0,1,2. This must
+ * 		     be valid NIC ULP based on the firmware config.
+ *                   All doorbells for this ring must be sent to
+ *                   this ULP. The first network ring allocated for
+ *                   each ULP are higher performance than subsequent rings.
+ * cq_object       - cq object for completions
+ * ex_parameters   - Additional parameters (that may increase in
+ * 		     future revisions). These parameters are only used
+ * 		     for certain ring types -- see
+ *                   struct be_eth_sq_parameters for details.
+ * eth_sq          -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *---------------------------------------------------------
+ */
+int
+be_eth_sq_create_ex(struct be_function_object *pfob, struct ring_desc *rd,
+		u32 length, u32 type, u32 ulp, struct be_cq_object *cq_object,
+		struct be_eth_sq_parameters *ex_parameters,
+		struct be_ethsq_object *eth_sq)
+{
+	struct FWCMD_COMMON_ETH_TX_CREATE *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	u32 n;
+	unsigned long irql;
+
+	ASSERT(rd);
+	ASSERT(eth_sq);
+	ASSERT(ex_parameters);
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	memset(eth_sq, 0, sizeof(*eth_sq));
+
+	eth_sq->parent_function = pfob;
+	eth_sq->bid = 0xFFFFFFFF;
+	eth_sq->cq_object = cq_object;
+
+	/* Translate hwlib interface to arm interface. */
+	switch (type) {
+	case BE_ETH_TX_RING_TYPE_FORWARDING:
+		type = ETH_TX_RING_TYPE_FORWARDING;
+		break;
+	case BE_ETH_TX_RING_TYPE_STANDARD:
+		type = ETH_TX_RING_TYPE_STANDARD;
+		break;
+	case BE_ETH_TX_RING_TYPE_BOUND:
+		ASSERT(ex_parameters->port < 2);
+		type = ETH_TX_RING_TYPE_BOUND;
+		break;
+	default:
+		TRACE(DL_ERR, "Invalid eth tx ring type:%d", type);
+		return BE_NOT_OK;
+		break;
+	}
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		ASSERT(wrb);
+		TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto Error;
+	}
+	/* NIC must be supported by the current config. */
+	ASSERT(pfob->fw_config.nic_ulp_mask);
+
+	/*
+	 * The ulp parameter must select a valid NIC ULP
+	 * for the current config.
+	 */
+	ASSERT((1 << ulp) & pfob->fw_config.nic_ulp_mask);
+
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_TX_CREATE);
+	fwcmd->header.request.port_number = ex_parameters->port;
+
+	AMAP_SET_BITS_PTR(ETX_CONTEXT, pd_id,
+				&fwcmd->params.request.context, 0);
+
+	n = be_ring_length_to_encoding(length, sizeof(struct ETH_WRB_AMAP));
+	AMAP_SET_BITS_PTR(ETX_CONTEXT, tx_ring_size,
+					&fwcmd->params.request.context, n);
+
+	AMAP_SET_BITS_PTR(ETX_CONTEXT, cq_id_send,
+			&fwcmd->params.request.context, cq_object->cq_id);
+
+	n = pfob->pci_function_number;
+	AMAP_SET_BITS_PTR(ETX_CONTEXT, func, &fwcmd->params.request.context, n);
+
+	fwcmd->params.request.type = type;
+	fwcmd->params.request.ulp_num  = (1 << ulp);
+	fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+	ASSERT(PAGES_SPANNED(rd->va, rd->length) >=
+				fwcmd->params.request.num_pages);
+
+	/* Create a page list for the FWCMD. */
+	be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+			  ARRAY_SIZE(fwcmd->params.request.pages));
+
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+					NULL, NULL, fwcmd, NULL);
+	if (status != BE_SUCCESS) {
+		TRACE(DL_ERR, "MCC to create etx queue failed.");
+		goto Error;
+	}
+	/* save the butler ID */
+	eth_sq->bid = fwcmd->params.response.cid;
+
+	/* add a reference to the corresponding CQ */
+	atomic_inc(&cq_object->ref_count);
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+
+/*
+    This routine destroys an ethernet send queue
+
+    EthSq - EthSq Handle returned from EthSqCreate
+
+    This function always return BE_SUCCESS.
+
+    This function frees memory allocated by EthSqCreate for the EthSq Object.
+
+*/
+int be_eth_sq_destroy(struct be_ethsq_object *eth_sq)
+{
+	int status = 0;
+
+	/* Send fwcmd to destroy the queue. */
+	status = be_function_ring_destroy(eth_sq->parent_function, eth_sq->bid,
+		     FWCMD_RING_TYPE_ETH_TX, NULL, NULL, NULL, NULL);
+	ASSERT(status == 0);
+
+	/* Derefence any associated CQs. */
+	atomic_dec(&eth_sq->cq_object->ref_count);
+	return status;
+}
+/*
+    This routine attempts to set the transmit flow control parameters.
+
+    FunctionObject      - Handle to a function object
+
+    txfc_enable         - transmit flow control enable - true for
+			  enable, false for disable
+
+    rxfc_enable         - receive flow control enable - true for
+				enable, false for disable
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int error
+    code is returned.
+
+    IRQL: < DISPATCH_LEVEL
+
+    This function always fails in non-privileged machine context.
+*/
+int
+be_eth_set_flow_control(struct be_function_object *pfob,
+			bool txfc_enable, bool rxfc_enable)
+{
+	struct FWCMD_COMMON_SET_FLOW_CONTROL *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	unsigned long irql;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FLOW_CONTROL);
+
+	fwcmd->params.request.rx_flow_control = rxfc_enable;
+	fwcmd->params.request.tx_flow_control = txfc_enable;
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+					NULL, NULL, fwcmd, NULL);
+
+	if (status != 0) {
+		TRACE(DL_ERR, "set flow control fwcmd failed.");
+		goto error;
+	}
+
+error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+    This routine attempts to get the transmit flow control parameters.
+
+    pfob      - Handle to a function object
+
+    txfc_enable         - transmit flow control enable - true for
+			enable, false for disable
+
+    rxfc_enable         - receive flow control enable - true for enable,
+			false for disable
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int error code
+			is returned.
+
+    IRQL: < DISPATCH_LEVEL
+
+    This function always fails in non-privileged machine context.
+*/
+int
+be_eth_get_flow_control(struct be_function_object *pfob,
+			bool *txfc_enable, bool *rxfc_enable)
+{
+	struct FWCMD_COMMON_GET_FLOW_CONTROL *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	unsigned long irql;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FLOW_CONTROL);
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+						NULL, NULL, fwcmd, NULL);
+
+	if (status != 0) {
+		TRACE(DL_ERR, "get flow control fwcmd failed.");
+		goto error;
+	}
+
+	*txfc_enable = fwcmd->params.response.tx_flow_control;
+	*rxfc_enable = fwcmd->params.response.rx_flow_control;
+
+error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+ *---------------------------------------------------------
+ * Function: be_eth_set_qos
+ *   This function sets the ethernet transmit Quality of Service (QoS)
+ *   characteristics of BladeEngine for the domain. All ethernet
+ *   transmit rings of the domain will evenly share the bandwidth.
+ *   The exeception to sharing is the host primary (super) ethernet
+ *   transmit ring as well as the host ethernet forwarding ring
+ *   for missed offload data.
+ * pfob -
+ * max_bps         - the maximum bits per second in units of
+ * 			10 Mbps (valid 0-100)
+ * max_pps         - the maximum packets per second in units
+ * 			of 1 Kpps (0 indicates no limit)
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *---------------------------------------------------------
+ */
+int
+be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps)
+{
+	struct FWCMD_COMMON_SET_QOS *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	unsigned long irql;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_QOS);
+
+	/* Set fields in fwcmd */
+	fwcmd->params.request.max_bits_per_second_NIC = max_bps;
+	fwcmd->params.request.max_packets_per_second_NIC = max_pps;
+	fwcmd->params.request.valid_flags = QOS_BITS_NIC | QOS_PKTS_NIC;
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+					NULL, NULL, fwcmd, NULL);
+
+	if (status != 0)
+		TRACE(DL_ERR, "network set qos fwcmd failed.");
+
+error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+ *---------------------------------------------------------
+ * Function: be_eth_get_qos
+ *   This function retrieves the ethernet transmit Quality of Service (QoS)
+ *   characteristics for the domain.
+ * max_bps         - the maximum bits per second in units of
+ * 			10 Mbps (valid 0-100)
+ * max_pps         - the maximum packets per second in units of
+ * 			1 Kpps (0 indicates no limit)
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *---------------------------------------------------------
+ */
+int
+be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps)
+{
+	struct FWCMD_COMMON_GET_QOS *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	unsigned long irql;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_QOS);
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+					NULL, NULL, fwcmd, NULL);
+
+	if (status != 0) {
+		TRACE(DL_ERR, "network get qos fwcmd failed.");
+		goto error;
+	}
+
+	*max_bps = fwcmd->params.response.max_bits_per_second_NIC;
+	*max_pps = fwcmd->params.response.max_packets_per_second_NIC;
+
+error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+ *---------------------------------------------------------
+ * Function: be_eth_set_frame_size
+ *   This function sets the ethernet maximum frame size. The previous
+ *   values are returned.
+ * pfob -
+ * tx_frame_size   - maximum transmit frame size in bytes
+ * rx_frame_size   - maximum receive frame size in bytes
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *---------------------------------------------------------
+ */
+int
+be_eth_set_frame_size(struct be_function_object *pfob,
+		      u32 *tx_frame_size, u32 *rx_frame_size)
+{
+	struct FWCMD_COMMON_SET_FRAME_SIZE *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	unsigned long irql;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FRAME_SIZE);
+	fwcmd->params.request.max_tx_frame_size = *tx_frame_size;
+	fwcmd->params.request.max_rx_frame_size = *rx_frame_size;
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+						NULL, NULL, fwcmd, NULL);
+
+	if (status != 0) {
+		TRACE(DL_ERR, "network set frame size fwcmd failed.");
+		goto error;
+	}
+
+	*tx_frame_size = fwcmd->params.response.chip_max_tx_frame_size;
+	*rx_frame_size = fwcmd->params.response.chip_max_rx_frame_size;
+
+error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+
+/*
+    This routine creates a Ethernet receive ring.
+
+    pfob      - handle to a function object
+    rq_base_va            - base VA for the default receive ring. this must be
+			exactly 8K in length and continguous physical memory.
+    cq_object            - handle to a previously created CQ to be associated
+			with the RQ.
+    pp_eth_rq             - pointer to an opqaue handle where an eth
+			receive object is returned.
+    Returns BE_SUCCESS if successfull, , otherwise a useful
+    int error code is returned.
+
+    IRQL: < DISPATCH_LEVEL
+    this function allocates a struct be_ethrq_object *object.
+    there must be no more than 1 of these per function object, unless the
+    function object supports RSS (is networking and on the host).
+    the rq_base_va must point to a buffer of exactly 8K.
+    the erx::host_cqid (or host_stor_cqid) register and erx::ring_page registers
+    will be updated as appropriate on return
+*/
+int
+be_eth_rq_create(struct be_function_object *pfob,
+			struct ring_desc *rd, struct be_cq_object *cq_object,
+			struct be_cq_object *bcmc_cq_object,
+			struct be_ethrq_object *eth_rq)
+{
+	int status = 0;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	struct FWCMD_COMMON_ETH_RX_CREATE *fwcmd = NULL;
+	unsigned long irql;
+
+	/* MPU will set the  */
+	ASSERT(rd);
+	ASSERT(eth_rq);
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	eth_rq->parent_function = pfob;
+	eth_rq->cq_object = cq_object;
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto Error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_RX_CREATE);
+
+	fwcmd->params.request.num_pages = 2;	/* required length */
+	fwcmd->params.request.cq_id = cq_object->cq_id;
+
+	if (bcmc_cq_object)
+		fwcmd->params.request.bcmc_cq_id = bcmc_cq_object->cq_id;
+	else
+		fwcmd->params.request.bcmc_cq_id = 0xFFFF;
+
+	/* Create a page list for the FWCMD. */
+	be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+			  ARRAY_SIZE(fwcmd->params.request.pages));
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+						NULL, NULL, fwcmd, NULL);
+	if (status != BE_SUCCESS) {
+		TRACE(DL_ERR, "fwcmd to map eth rxq frags failed.");
+		goto Error;
+	}
+	/* Save the ring ID for cleanup. */
+	eth_rq->rid = fwcmd->params.response.id;
+
+	atomic_inc(&cq_object->ref_count);
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+    This routine destroys an Ethernet receive queue
+
+    eth_rq - ethernet receive queue handle returned from eth_rq_create
+
+    Returns BE_SUCCESS on success and an appropriate int on failure.
+
+    This function frees resourcs allocated by EthRqCreate.
+    The erx::host_cqid (or host_stor_cqid) register and erx::ring_page
+    registers will be updated as appropriate on return
+    IRQL: < DISPATCH_LEVEL
+*/
+
+static void be_eth_rq_destroy_internal_cb(void *context, int status,
+					 struct MCC_WRB_AMAP *wrb)
+{
+	struct be_ethrq_object *eth_rq = (struct be_ethrq_object *) context;
+
+	if (status != BE_SUCCESS) {
+		TRACE(DL_ERR, "Destroy eth rq failed in internal callback.\n");
+	} else {
+		/* Dereference any CQs associated with this queue. */
+		atomic_dec(&eth_rq->cq_object->ref_count);
+	}
+
+	return;
+}
+
+int be_eth_rq_destroy(struct be_ethrq_object *eth_rq)
+{
+	int status = BE_SUCCESS;
+
+	/* Send fwcmd to destroy the RQ. */
+	status = be_function_ring_destroy(eth_rq->parent_function,
+			eth_rq->rid, FWCMD_RING_TYPE_ETH_RX, NULL, NULL,
+			be_eth_rq_destroy_internal_cb, eth_rq);
+
+	return status;
+}
+
+/*
+ *---------------------------------------------------------------------------
+ * Function: be_eth_rq_destroy_options
+ *   Destroys an ethernet receive ring with finer granularity options
+ *   than the standard be_eth_rq_destroy() API function.
+ * eth_rq           -
+ * flush            - Set to 1 to flush the ring, set to 0 to bypass the flush
+ * cb               - Callback function on completion
+ * cb_context       - Callback context
+ * return status    - BE_SUCCESS (0) on success. Negative error code on failure.
+ *----------------------------------------------------------------------------
+ */
+int
+be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
+		mcc_wrb_cqe_callback cb, void *cb_context)
+{
+	struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = BE_SUCCESS;
+	struct be_function_object *pfob = NULL;
+	unsigned long irql;
+
+	pfob = eth_rq->parent_function;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	TRACE(DL_INFO, "Destroy eth_rq ring id:%d, flush:%d", eth_rq->rid,
+	      flush);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		ASSERT(wrb);
+		TRACE(DL_ERR, "No free MCC WRBs in destroy eth_rq ring.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto Error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
+
+	fwcmd->params.request.id = eth_rq->rid;
+	fwcmd->params.request.ring_type = FWCMD_RING_TYPE_ETH_RX;
+	fwcmd->params.request.bypass_flush = ((0 == flush) ? 1 : 0);
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
+			be_eth_rq_destroy_internal_cb, eth_rq, fwcmd, NULL);
+
+	if (status != BE_SUCCESS && status != BE_PENDING) {
+		TRACE(DL_ERR, "eth_rq ring destroy failed. id:%d, flush:%d",
+		      eth_rq->rid, flush);
+		goto Error;
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+    This routine queries the frag size for erx.
+
+    pfob      - handle to a function object
+
+    frag_size_bytes       - erx frag size in bytes that is/was set.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int error
+    code is returned.
+
+    IRQL: < DISPATCH_LEVEL
+
+*/
+int
+be_eth_rq_get_frag_size(struct be_function_object *pfob, u32 *frag_size_bytes)
+{
+	struct FWCMD_ETH_GET_RX_FRAG_SIZE *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	unsigned long irql;
+
+	ASSERT(frag_size_bytes);
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		return BE_STATUS_NO_MCC_WRB;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_GET_RX_FRAG_SIZE);
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+				NULL, NULL, fwcmd, NULL);
+
+	if (status != 0) {
+		TRACE(DL_ERR, "get frag size fwcmd failed.");
+		goto error;
+	}
+
+	*frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2;
+
+error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+    This routine attempts to set the frag size for erx.  If the frag size is
+    already set, the attempt fails and the current frag size is returned.
+
+    pfob      - Handle to a function object
+
+    frag_size       - Erx frag size in bytes that is/was set.
+
+    current_frag_size_bytes    - Pointer to location where currrent frag
+				 is to be rturned
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int error
+    code is returned.
+
+    IRQL: < DISPATCH_LEVEL
+
+    This function always fails in non-privileged machine context.
+*/
+int
+be_eth_rq_set_frag_size(struct be_function_object *pfob,
+			u32 frag_size, u32 *frag_size_bytes)
+{
+	struct FWCMD_ETH_SET_RX_FRAG_SIZE *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	unsigned long irql;
+
+	ASSERT(frag_size_bytes);
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_SET_RX_FRAG_SIZE);
+
+	ASSERT(frag_size >= 128 && frag_size <= 16 * 1024);
+
+	/* This is the log2 of the fragsize.  This is not the exact
+	 * ERX encoding. */
+	fwcmd->params.request.new_fragsize_log2 = __ilog2_u32(frag_size);
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+				NULL, NULL, fwcmd, NULL);
+
+	if (status != 0) {
+		TRACE(DL_ERR, "set frag size fwcmd failed.");
+		goto error;
+	}
+
+	*frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2;
+error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+
+/*
+    This routine gets or sets a mac address for a domain
+    given the port and mac.
+
+    FunctionObject  - Function object handle.
+    port1           - Set to TRUE if this function will set/get the Port 1
+			address.  Only the host may set this to TRUE.
+    mac1            - Set to TRUE if this function will set/get the
+			MAC 1 address.  Only the host may set this to TRUE.
+    write           - Set to TRUE if this function should write the mac address.
+    mac_address      - Buffer of the mac address to read or write.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
+
+    IRQL: < DISPATCH_LEVEL
+*/
+int be_rxf_mac_address_read_write(struct be_function_object *pfob,
+		bool port1,	/* VM must always set to false */
+		bool mac1,	/* VM must always set to false */
+		bool mgmt, bool write,
+		bool permanent, u8 *mac_address,
+		mcc_wrb_cqe_callback cb,	/* optional */
+		void *cb_context)	/* optional */
+{
+	int status = BE_SUCCESS;
+	union {
+		struct FWCMD_COMMON_NTWK_MAC_QUERY *query;
+		struct FWCMD_COMMON_NTWK_MAC_SET *set;
+	} fwcmd = {NULL};
+	struct MCC_WRB_AMAP *wrb = NULL;
+	u32 type = 0;
+	unsigned long irql;
+	struct be_mcc_wrb_response_copy rc;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	ASSERT(mac_address);
+
+	ASSERT(port1 == false);
+	ASSERT(mac1 == false);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto Error;
+	}
+
+	if (mgmt) {
+		type = MAC_ADDRESS_TYPE_MANAGEMENT;
+	} else {
+		if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
+			type = MAC_ADDRESS_TYPE_NETWORK;
+		else
+			type = MAC_ADDRESS_TYPE_STORAGE;
+	}
+
+	if (write) {
+		/* Prepares an embedded fwcmd, including
+		 * request/response sizes.
+		 */
+		fwcmd.set = BE_PREPARE_EMBEDDED_FWCMD(pfob,
+					       wrb, COMMON_NTWK_MAC_SET);
+
+		fwcmd.set->params.request.invalidate = 0;
+		fwcmd.set->params.request.mac1 = (mac1 ? 1 : 0);
+		fwcmd.set->params.request.port = (port1 ? 1 : 0);
+		fwcmd.set->params.request.type = type;
+
+		/* Copy the mac address to set. */
+		fwcmd.set->params.request.mac.SizeOfStructure =
+			    sizeof(fwcmd.set->params.request.mac);
+		memcpy(fwcmd.set->params.request.mac.MACAddress,
+			mac_address, ETH_ALEN);
+
+		/* Post the f/w command */
+		status = be_function_post_mcc_wrb(pfob, wrb, NULL,
+				cb, cb_context, NULL, NULL, fwcmd.set, NULL);
+
+	} else {
+
+		/*
+		 * Prepares an embedded fwcmd, including
+		 * request/response sizes.
+		 */
+		fwcmd.query = BE_PREPARE_EMBEDDED_FWCMD(pfob,
+					       wrb, COMMON_NTWK_MAC_QUERY);
+
+		fwcmd.query->params.request.mac1 = (mac1 ? 1 : 0);
+		fwcmd.query->params.request.port = (port1 ? 1 : 0);
+		fwcmd.query->params.request.type = type;
+		fwcmd.query->params.request.permanent = permanent;
+
+		rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_MAC_QUERY,
+						params.response.mac.MACAddress);
+		rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_MAC_QUERY,
+						params.response.mac.MACAddress);
+		rc.va = mac_address;
+		/* Post the f/w command (with a copy for the response) */
+		status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
+				cb_context, NULL, NULL, fwcmd.query, &rc);
+	}
+
+	if (status < 0) {
+		TRACE(DL_ERR, "mac set/query failed.");
+		goto Error;
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+    This routine writes data to context memory.
+
+    pfob  - Function object handle.
+    mac_table     - Set to the 128-bit multicast address hash table.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
+
+    IRQL: < DISPATCH_LEVEL
+*/
+
+int be_rxf_multicast_config(struct be_function_object *pfob,
+		bool promiscuous, u32 num, u8 *mac_table,
+		mcc_wrb_cqe_callback cb,	/* optional */
+		void *cb_context,
+		struct be_multicast_q_ctxt *q_ctxt)
+{
+	int status = BE_SUCCESS;
+	struct FWCMD_COMMON_NTWK_MULTICAST_SET *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	struct be_generic_q_ctxt *generic_ctxt = NULL;
+	unsigned long irql;
+
+	ASSERT(num <= ARRAY_SIZE(fwcmd->params.request.mac));
+
+	if (num > ARRAY_SIZE(fwcmd->params.request.mac)) {
+		TRACE(DL_ERR, "Too many multicast addresses. BE supports %d.",
+		      (int) ARRAY_SIZE(fwcmd->params.request.mac));
+		return BE_NOT_OK;
+	}
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		if (q_ctxt && cb) {
+			wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+			generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+			generic_ctxt->context.bytes = sizeof(*q_ctxt);
+		} else {
+			status = BE_STATUS_NO_MCC_WRB;
+			goto Error;
+		}
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_MULTICAST_SET);
+
+	fwcmd->params.request.promiscuous = promiscuous;
+	if (!promiscuous) {
+		fwcmd->params.request.num_mac = num;
+		if (num > 0) {
+			ASSERT(mac_table);
+			memcpy(fwcmd->params.request.mac,
+						mac_table, ETH_ALEN * num);
+		}
+	}
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+			cb, cb_context, NULL, NULL, fwcmd, NULL);
+	if (status < 0) {
+		TRACE(DL_ERR, "multicast fwcmd failed.");
+		goto Error;
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+    This routine adds or removes a vlan tag from the rxf table.
+
+    FunctionObject  - Function object handle.
+    VLanTag         - VLan tag to add or remove.
+    Add             - Set to TRUE if this will add a vlan tag
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
+
+    IRQL: < DISPATCH_LEVEL
+*/
+int be_rxf_vlan_config(struct be_function_object *pfob,
+		bool promiscuous, u32 num, u16 *vlan_tag_array,
+		mcc_wrb_cqe_callback cb,	/* optional */
+		void *cb_context,
+		struct be_vlan_q_ctxt *q_ctxt)	/* optional */
+{
+	int status = BE_SUCCESS;
+	struct FWCMD_COMMON_NTWK_VLAN_CONFIG *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	struct be_generic_q_ctxt *generic_ctxt = NULL;
+	unsigned long irql;
+
+	if (num > ARRAY_SIZE(fwcmd->params.request.vlan_tag)) {
+		TRACE(DL_ERR, "Too many VLAN tags.");
+		return BE_NOT_OK;
+	}
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		if (q_ctxt && cb) {
+			wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+			generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+			generic_ctxt->context.bytes = sizeof(*q_ctxt);
+		} else {
+			status = BE_STATUS_NO_MCC_WRB;
+			goto Error;
+		}
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_VLAN_CONFIG);
+
+	fwcmd->params.request.promiscuous = promiscuous;
+	if (!promiscuous) {
+		fwcmd->params.request.num_vlan = num;
+
+		if (num > 0) {
+			ASSERT(vlan_tag_array);
+			memcpy(fwcmd->params.request.vlan_tag, vlan_tag_array,
+				  num * sizeof(vlan_tag_array[0]));
+		}
+	}
+
+	/* Post the commadn */
+	status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+			cb, cb_context, NULL, NULL, fwcmd, NULL);
+	if (status < 0) {
+		TRACE(DL_ERR, "vlan fwcmd failed.");
+		goto Error;
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+
+int be_rxf_link_status(struct be_function_object *pfob,
+		struct BE_LINK_STATUS *link_status,
+		mcc_wrb_cqe_callback cb,
+		void *cb_context,
+		struct be_link_status_q_ctxt *q_ctxt)
+{
+	struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	struct be_generic_q_ctxt *generic_ctxt = NULL;
+	unsigned long irql;
+	struct be_mcc_wrb_response_copy rc;
+
+	ASSERT(link_status);
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+
+	if (!wrb) {
+		if (q_ctxt && cb) {
+			wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+			generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+			generic_ctxt->context.bytes = sizeof(*q_ctxt);
+		} else {
+			status = BE_STATUS_NO_MCC_WRB;
+			goto Error;
+		}
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb,
+					       COMMON_NTWK_LINK_STATUS_QUERY);
+
+	rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY,
+					params.response);
+	rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY,
+					params.response);
+	rc.va = link_status;
+	/* Post or queue the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+			cb, cb_context, NULL, NULL, fwcmd, &rc);
+
+	if (status < 0) {
+		TRACE(DL_ERR, "link status fwcmd failed.");
+		goto Error;
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+int
+be_rxf_query_eth_statistics(struct be_function_object *pfob,
+		    struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
+		    u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
+		    void *cb_context,
+		    struct be_nonembedded_q_ctxt *q_ctxt)
+{
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	struct be_generic_q_ctxt *generic_ctxt = NULL;
+	unsigned long irql;
+
+	ASSERT(va_for_fwcmd);
+	ASSERT(pa_for_fwcmd);
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+
+	if (!wrb) {
+		if (q_ctxt && cb) {
+			wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+			generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+			generic_ctxt->context.bytes = sizeof(*q_ctxt);
+		} else {
+			status = BE_STATUS_NO_MCC_WRB;
+			goto Error;
+		}
+	}
+
+	TRACE(DL_INFO, "Query eth stats. fwcmd va:%p pa:0x%08x_%08x",
+	      va_for_fwcmd, upper_32_bits(pa_for_fwcmd), (u32)pa_for_fwcmd);
+
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	va_for_fwcmd = BE_PREPARE_NONEMBEDDED_FWCMD(pfob, wrb,
+			  va_for_fwcmd, pa_for_fwcmd, ETH_GET_STATISTICS);
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+		cb, cb_context, NULL, NULL, va_for_fwcmd, NULL);
+	if (status < 0) {
+		TRACE(DL_ERR, "eth stats fwcmd failed.");
+		goto Error;
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+int
+be_rxf_promiscuous(struct be_function_object *pfob,
+		   bool enable_port0, bool enable_port1,
+		   mcc_wrb_cqe_callback cb, void *cb_context,
+		   struct be_promiscuous_q_ctxt *q_ctxt)
+{
+	struct FWCMD_ETH_PROMISCUOUS *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	struct be_generic_q_ctxt *generic_ctxt = NULL;
+	unsigned long irql;
+
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+
+	if (!wrb) {
+		if (q_ctxt && cb) {
+			wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+			generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+			generic_ctxt->context.bytes = sizeof(*q_ctxt);
+		} else {
+			status = BE_STATUS_NO_MCC_WRB;
+			goto Error;
+		}
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_PROMISCUOUS);
+
+	fwcmd->params.request.port0_promiscuous = enable_port0;
+	fwcmd->params.request.port1_promiscuous = enable_port1;
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+			cb, cb_context, NULL, NULL, fwcmd, NULL);
+
+	if (status < 0) {
+		TRACE(DL_ERR, "promiscuous fwcmd failed.");
+		goto Error;
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+
+/*
+ *-------------------------------------------------------------------------
+ * Function: be_rxf_filter_config
+ *   Configures BladeEngine ethernet receive filter settings.
+ * pfob    -
+ * settings           - Pointer to the requested filter settings.
+ * 			The response from BladeEngine will be placed back
+ * 			in this structure.
+ * cb                 - optional
+ * cb_context         - optional
+ * q_ctxt             - Optional. Pointer to a previously allocated struct.
+ * 			If the MCC WRB ring is full, this structure is
+ * 			used to queue the operation. It will be posted
+ * 			to the MCC ring when space becomes available. All
+ *                      queued commands will be posted to the ring in
+ *                      the order they are received. It is always valid
+ *                      to pass a pointer to a generic
+ *                      be_generic_q_ctxt. However, the specific
+ *                      context structs are generally smaller than
+ *                      the generic struct.
+ * return pend_status - BE_SUCCESS (0) on success.
+ * 			BE_PENDING (postive value) if the FWCMD
+ *                      completion is pending. Negative error code on failure.
+ *---------------------------------------------------------------------------
+ */
+int
+be_rxf_filter_config(struct be_function_object *pfob,
+		     struct NTWK_RX_FILTER_SETTINGS *settings,
+		     mcc_wrb_cqe_callback cb, void *cb_context,
+		     struct be_rxf_filter_q_ctxt *q_ctxt)
+{
+	struct FWCMD_COMMON_NTWK_RX_FILTER *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	struct be_generic_q_ctxt *generic_ctxt = NULL;
+	unsigned long irql;
+	struct be_mcc_wrb_response_copy rc;
+
+	ASSERT(settings);
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+
+	if (!wrb) {
+		if (q_ctxt && cb) {
+			wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+			generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+			generic_ctxt->context.bytes = sizeof(*q_ctxt);
+		} else {
+			status = BE_STATUS_NO_MCC_WRB;
+			goto Error;
+		}
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_RX_FILTER);
+	memcpy(&fwcmd->params.request, settings, sizeof(*settings));
+
+	rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_RX_FILTER,
+					params.response);
+	rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_RX_FILTER,
+					params.response);
+	rc.va = settings;
+	/* Post or queue the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+			cb, cb_context, NULL, NULL, fwcmd, &rc);
+
+	if (status < 0) {
+		TRACE(DL_ERR, "RXF/ERX filter config fwcmd failed.");
+		goto Error;
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}

+ 55 - 0
drivers/staging/benet/etx_context.h

@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __etx_context_amap_h__
+#define __etx_context_amap_h__
+
+/* ETX ring  context structure. */
+struct BE_ETX_CONTEXT_AMAP {
+	u8 tx_cidx[11];	/* DWORD 0 */
+	u8 rsvd0[5];	/* DWORD 0 */
+	u8 rsvd1[16];	/* DWORD 0 */
+	u8 tx_pidx[11];	/* DWORD 1 */
+	u8 rsvd2;		/* DWORD 1 */
+	u8 tx_ring_size[4];	/* DWORD 1 */
+	u8 pd_id[5];	/* DWORD 1 */
+	u8 pd_id_not_valid;	/* DWORD 1 */
+	u8 cq_id_send[10];	/* DWORD 1 */
+	u8 rsvd3[32];	/* DWORD 2 */
+	u8 rsvd4[32];	/* DWORD 3 */
+	u8 cur_bytes[32];	/* DWORD 4 */
+	u8 max_bytes[32];	/* DWORD 5 */
+	u8 time_stamp[32];	/* DWORD 6 */
+	u8 rsvd5[11];	/* DWORD 7 */
+	u8 func;		/* DWORD 7 */
+	u8 rsvd6[20];	/* DWORD 7 */
+	u8 cur_txd_count[32];	/* DWORD 8 */
+	u8 max_txd_count[32];	/* DWORD 9 */
+	u8 rsvd7[32];	/* DWORD 10 */
+	u8 rsvd8[32];	/* DWORD 11 */
+	u8 rsvd9[32];	/* DWORD 12 */
+	u8 rsvd10[32];	/* DWORD 13 */
+	u8 rsvd11[32];	/* DWORD 14 */
+	u8 rsvd12[32];	/* DWORD 15 */
+} __packed;
+struct ETX_CONTEXT_AMAP {
+	u32 dw[16];
+};
+
+#endif /* __etx_context_amap_h__ */

+ 565 - 0
drivers/staging/benet/funcobj.c

@@ -0,0 +1,565 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include "hwlib.h"
+#include "bestatus.h"
+
+
+int
+be_function_internal_query_firmware_config(struct be_function_object *pfob,
+				   struct BE_FIRMWARE_CONFIG *config)
+{
+	struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	unsigned long irql;
+	struct be_mcc_wrb_response_copy rc;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG);
+
+	rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG,
+					params.response);
+	rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG,
+					params.response);
+	rc.va = config;
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL,
+					NULL, NULL, NULL, fwcmd, &rc);
+error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+    This allocates and initializes a function object based on the information
+    provided by upper layer drivers.
+
+    Returns BE_SUCCESS on success and an appropriate int on failure.
+
+    A function object represents a single BladeEngine (logical) PCI function.
+    That is a function object either represents
+    the networking side of BladeEngine or the iSCSI side of BladeEngine.
+
+    This routine will also detect and create an appropriate PD object for the
+    PCI function as needed.
+*/
+int
+be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
+		u8 __iomem *pci_va, u32 function_type,
+		struct ring_desc *mailbox, struct be_function_object *pfob)
+{
+	int status;
+
+	ASSERT(pfob);	/* not a magic assert */
+	ASSERT(function_type <= 2);
+
+	TRACE(DL_INFO, "Create function object. type:%s object:0x%p",
+	      (function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" :
+	       (function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" :
+		"Arm")), pfob);
+
+	memset(pfob, 0, sizeof(*pfob));
+
+	pfob->type = function_type;
+	pfob->csr_va = csr_va;
+	pfob->db_va = db_va;
+	pfob->pci_va = pci_va;
+
+	spin_lock_init(&pfob->cq_lock);
+	spin_lock_init(&pfob->post_lock);
+	spin_lock_init(&pfob->mcc_context_lock);
+
+
+	pfob->pci_function_number = 1;
+
+
+	pfob->emulate = false;
+	TRACE(DL_NOTE, "Non-emulation mode");
+	status = be_drive_POST(pfob);
+	if (status != BE_SUCCESS) {
+		TRACE(DL_ERR, "BladeEngine POST failed.");
+		goto error;
+	}
+
+	/* Initialize the mailbox */
+	status = be_mpu_init_mailbox(pfob, mailbox);
+	if (status != BE_SUCCESS) {
+		TRACE(DL_ERR, "Failed to initialize mailbox.");
+		goto error;
+	}
+	/*
+	 * Cache the firmware config for ASSERTs in hwclib and later
+	 * driver queries.
+	 */
+	status = be_function_internal_query_firmware_config(pfob,
+					       &pfob->fw_config);
+	if (status != BE_SUCCESS) {
+		TRACE(DL_ERR, "Failed to query firmware config.");
+		goto error;
+	}
+
+error:
+	if (status != BE_SUCCESS) {
+		/* No cleanup necessary */
+		TRACE(DL_ERR, "Failed to create function.");
+		memset(pfob, 0, sizeof(*pfob));
+	}
+	return status;
+}
+
+/*
+    This routine drops the reference count on a given function object. Once
+    the reference count falls to zero, the function object is destroyed and all
+    resources held are freed.
+
+    FunctionObject      - The function object to drop the reference to.
+*/
+int be_function_object_destroy(struct be_function_object *pfob)
+{
+	TRACE(DL_INFO, "Destroy pfob. Object:0x%p",
+	      pfob);
+
+
+	ASSERT(pfob->mcc == NULL);
+
+	return BE_SUCCESS;
+}
+
+int be_function_cleanup(struct be_function_object *pfob)
+{
+	int status = 0;
+	u32 isr;
+	u32 host_intr;
+	struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
+
+
+	if (pfob->type == BE_FUNCTION_TYPE_NETWORK) {
+		status = be_rxf_multicast_config(pfob, false, 0,
+						NULL, NULL, NULL, NULL);
+		ASSERT(status == BE_SUCCESS);
+	}
+	/* VLAN */
+	status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL);
+	ASSERT(status == BE_SUCCESS);
+	/*
+	 * MCC Queue -- Switches to mailbox mode.  May want to destroy
+	 * all but the MCC CQ before this call if polling CQ is much better
+	 * performance than polling mailbox register.
+	 */
+	if (pfob->mcc)
+		status = be_mcc_ring_destroy(pfob->mcc);
+	/*
+	 * If interrupts are disabled, clear any CEV interrupt assertions that
+	 * fired after we stopped processing EQs.
+	 */
+	ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl);
+	host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
+							hostintr, ctrl.dw);
+	if (!host_intr)
+		if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
+			isr = CSR_READ(pfob, cev.isr1);
+		else
+			isr = CSR_READ(pfob, cev.isr0);
+	else
+		/* This should never happen... */
+		TRACE(DL_ERR, "function_cleanup called with interrupt enabled");
+	/* Function object destroy */
+	status = be_function_object_destroy(pfob);
+	ASSERT(status == BE_SUCCESS);
+
+	return status;
+}
+
+
+void *
+be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
+	struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length,
+	u32 response_length, u32 opcode, u32 subsystem)
+{
+	struct FWCMD_REQUEST_HEADER *header = NULL;
+	u32 n;
+
+	ASSERT(wrb);
+
+	n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
+	AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1);
+	AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n));
+	header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
+
+	header->timeout = 0;
+	header->domain = 0;
+	header->request_length = max(request_length, response_length);
+	header->opcode = opcode;
+	header->subsystem = subsystem;
+
+	return header;
+}
+
+void *
+be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
+	struct MCC_WRB_AMAP *wrb,
+	void *fwcmd_va, u64 fwcmd_pa,
+	u32 payld_len,
+	u32 request_length,
+	u32 response_length,
+	u32 opcode, u32 subsystem)
+{
+	struct FWCMD_REQUEST_HEADER *header = NULL;
+	u32 n;
+	struct MCC_WRB_PAYLOAD_AMAP *plp;
+
+	ASSERT(wrb);
+	ASSERT(fwcmd_va);
+
+	header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va;
+
+	AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0);
+	AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len);
+
+	/*
+	 * Assume one fragment. The caller may override the SGL by
+	 * rewriting the 0th length and adding more entries.  They
+	 * will also need to update the sge_count.
+	 */
+	AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1);
+
+	n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
+	plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n);
+	AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len);
+	AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa);
+	AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp,
+					upper_32_bits(fwcmd_pa));
+
+	header->timeout = 0;
+	header->domain = 0;
+	header->request_length = max(request_length, response_length);
+	header->opcode = opcode;
+	header->subsystem = subsystem;
+
+	return header;
+}
+
+struct MCC_WRB_AMAP *
+be_function_peek_mcc_wrb(struct be_function_object *pfob)
+{
+	struct MCC_WRB_AMAP *wrb = NULL;
+	u32 offset;
+
+	if (pfob->mcc)
+		wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false);
+	else {
+		offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
+		wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va +
+				offset);
+	}
+
+	if (wrb)
+		memset(wrb, 0, sizeof(struct MCC_WRB_AMAP));
+
+	return wrb;
+}
+
+#if defined(BE_DEBUG)
+void be_function_debug_print_wrb(struct be_function_object *pfob,
+		struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va,
+		struct be_mcc_wrb_context *wrb_context)
+{
+
+	struct FWCMD_REQUEST_HEADER *header = NULL;
+	u8 embedded;
+	u32 n;
+
+	embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb);
+
+	if (embedded) {
+		n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
+		header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
+	} else {
+		header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va;
+	}
+
+	/* Save the completed count before posting for a debug assert. */
+
+	if (header) {
+		wrb_context->opcode = header->opcode;
+		wrb_context->subsystem = header->subsystem;
+
+	} else {
+		wrb_context->opcode = 0;
+		wrb_context->subsystem = 0;
+	}
+}
+#else
+#define be_function_debug_print_wrb(a_, b_, c_, d_)
+#endif
+
+int
+be_function_post_mcc_wrb(struct be_function_object *pfob,
+		struct MCC_WRB_AMAP *wrb,
+		struct be_generic_q_ctxt *q_ctxt,
+		mcc_wrb_cqe_callback cb, void *cb_context,
+		mcc_wrb_cqe_callback internal_cb,
+		void *internal_cb_context, void *optional_fwcmd_va,
+		struct be_mcc_wrb_response_copy *rc)
+{
+	int status;
+	struct be_mcc_wrb_context *wrb_context = NULL;
+	u64 *p;
+
+	if (q_ctxt) {
+		/* Initialize context.         */
+		q_ctxt->context.internal_cb = internal_cb;
+		q_ctxt->context.internal_cb_context = internal_cb_context;
+		q_ctxt->context.cb = cb;
+		q_ctxt->context.cb_context = cb_context;
+		if (rc) {
+			q_ctxt->context.copy.length = rc->length;
+			q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset;
+			q_ctxt->context.copy.va = rc->va;
+		} else
+			q_ctxt->context.copy.length = 0;
+
+		q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va;
+
+		/* Queue this request */
+		status = be_function_queue_mcc_wrb(pfob, q_ctxt);
+
+		goto Error;
+	}
+	/*
+	 * Allocate a WRB context struct to hold the callback pointers,
+	 * status, etc.  This is required if commands complete out of order.
+	 */
+	wrb_context = _be_mcc_allocate_wrb_context(pfob);
+	if (!wrb_context) {
+		TRACE(DL_WARN, "Failed to allocate MCC WRB context.");
+		status = BE_STATUS_SYSTEM_RESOURCES;
+		goto Error;
+	}
+	/* Initialize context. */
+	memset(wrb_context, 0, sizeof(*wrb_context));
+	wrb_context->internal_cb = internal_cb;
+	wrb_context->internal_cb_context = internal_cb_context;
+	wrb_context->cb = cb;
+	wrb_context->cb_context = cb_context;
+	if (rc) {
+		wrb_context->copy.length = rc->length;
+		wrb_context->copy.fwcmd_offset = rc->fwcmd_offset;
+		wrb_context->copy.va = rc->va;
+	} else
+		wrb_context->copy.length = 0;
+	wrb_context->wrb = wrb;
+
+	/*
+	 * Copy the context pointer into the WRB opaque tag field.
+	 * Verify assumption of 64-bit tag with a compile time assert.
+	 */
+	p = (u64 *) ((u8 *)wrb + offsetof(struct BE_MCC_WRB_AMAP, tag)/8);
+	*p = (u64)(size_t)wrb_context;
+
+	/* Print info about this FWCMD for debug builds. */
+	be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context);
+
+	/*
+	 * issue the WRB to the MPU as appropriate
+	 */
+	if (pfob->mcc) {
+		/*
+		 * we're in WRB mode, pass to the mcc layer
+		 */
+		status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context);
+	} else {
+		/*
+		 * we're in mailbox mode
+		 */
+		status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context);
+
+		/* mailbox mode always completes synchronously */
+		ASSERT(status != BE_STATUS_PENDING);
+	}
+
+Error:
+
+	return status;
+}
+
+int
+be_function_ring_destroy(struct be_function_object *pfob,
+		u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
+		void *cb_context, mcc_wrb_cqe_callback internal_cb,
+		void *internal_cb_context)
+{
+
+	struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	int status = 0;
+	unsigned long irql;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		ASSERT(wrb);
+		TRACE(DL_ERR, "No free MCC WRBs in destroy ring.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto Error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
+
+	fwcmd->params.request.id = id;
+	fwcmd->params.request.ring_type = ring_type;
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
+				internal_cb, internal_cb_context, fwcmd, NULL);
+	if (status != BE_SUCCESS && status != BE_PENDING) {
+		TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d",
+			id, ring_type);
+		goto Error;
+	}
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+void
+be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num)
+{
+	u32 num_pages = PAGES_SPANNED(rd->va, rd->length);
+	u32 i = 0;
+	u64 pa = rd->pa;
+	__le64 lepa;
+
+	ASSERT(pa_list);
+	ASSERT(pa);
+
+	for (i = 0; i < min(num_pages, max_num); i++) {
+		lepa = cpu_to_le64(pa);
+		pa_list[i].lo = (u32)lepa;
+		pa_list[i].hi = upper_32_bits(lepa);
+		pa += PAGE_SIZE;
+	}
+}
+
+
+
+/*-----------------------------------------------------------------------------
+ * Function: be_function_get_fw_version
+ *   Retrieves the firmware version on the adpater. If the callback is
+ *   NULL this call executes synchronously. If the callback is not NULL,
+ *   the returned status will be BE_PENDING if the command was issued
+ *   successfully.
+ * pfob    -
+ * fwv         - Pointer to response buffer if callback is NULL.
+ * cb           - Callback function invoked when the FWCMD completes.
+ * cb_context   - Passed to the callback function.
+ * return pend_status - BE_SUCCESS (0) on success.
+ * 			BE_PENDING (postive value) if the FWCMD
+ *                      completion is pending. Negative error code on failure.
+ *---------------------------------------------------------------------------
+ */
+int
+be_function_get_fw_version(struct be_function_object *pfob,
+		struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv,
+		mcc_wrb_cqe_callback cb, void *cb_context)
+{
+	int status = BE_SUCCESS;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL;
+	unsigned long irql;
+	struct be_mcc_wrb_response_copy rc;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		TRACE(DL_ERR, "MCC wrb peek failed.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto Error;
+	}
+
+	if (!cb && !fwv) {
+		TRACE(DL_ERR, "callback and response buffer NULL!");
+		status = BE_NOT_OK;
+		goto Error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION);
+
+	rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION,
+					params.response);
+	rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION,
+					params.response);
+	rc.va = fwv;
+
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
+				cb_context, NULL, NULL, fwcmd, &rc);
+
+Error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+int
+be_function_queue_mcc_wrb(struct be_function_object *pfob,
+			  struct be_generic_q_ctxt *q_ctxt)
+{
+	int status;
+
+	ASSERT(q_ctxt);
+
+	/*
+	 * issue the WRB to the MPU as appropriate
+	 */
+	if (pfob->mcc) {
+
+		/* We're in ring mode.  Queue this item. */
+		pfob->mcc->backlog_length++;
+		list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog);
+		status = BE_PENDING;
+	} else {
+		status = BE_NOT_OK;
+	}
+	return status;
+}
+

+ 222 - 0
drivers/staging/benet/fwcmd_common.h

@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_common_amap_h__
+#define __fwcmd_common_amap_h__
+#include "host_struct.h"
+
+/* --- PHY_LINK_DUPLEX_ENUM --- */
+#define PHY_LINK_DUPLEX_NONE            (0)
+#define PHY_LINK_DUPLEX_HALF            (1)
+#define PHY_LINK_DUPLEX_FULL            (2)
+
+/* --- PHY_LINK_SPEED_ENUM --- */
+#define PHY_LINK_SPEED_ZERO             (0)	/* No link. */
+#define PHY_LINK_SPEED_10MBPS           (1)	/* 10 Mbps */
+#define PHY_LINK_SPEED_100MBPS          (2)	/* 100 Mbps */
+#define PHY_LINK_SPEED_1GBPS            (3)	/* 1 Gbps */
+#define PHY_LINK_SPEED_10GBPS           (4)	/* 10 Gbps */
+
+/* --- PHY_LINK_FAULT_ENUM --- */
+#define PHY_LINK_FAULT_NONE             (0)	/* No fault status
+							available or detected */
+#define PHY_LINK_FAULT_LOCAL            (1)	/* Local fault detected */
+#define PHY_LINK_FAULT_REMOTE           (2)	/* Remote fault detected */
+
+/* --- BE_ULP_MASK --- */
+#define BE_ULP0_MASK                    (1)
+#define BE_ULP1_MASK                    (2)
+#define BE_ULP2_MASK                    (4)
+
+/* --- NTWK_ACTIVE_PORT --- */
+#define NTWK_PORT_A                     (0)	/* Port A is currently active */
+#define NTWK_PORT_B                     (1)	/* Port B is currently active */
+#define NTWK_NO_ACTIVE_PORT             (15)	/* Both ports have lost link */
+
+/* --- NTWK_LINK_TYPE --- */
+#define NTWK_LINK_TYPE_PHYSICAL         (0)	/* link up/down event
+						   applies to BladeEngine's
+						   Physical Ports
+						   */
+#define NTWK_LINK_TYPE_VIRTUAL          (1)	/* Virtual link up/down event
+						   reported by BladeExchange.
+						   This applies only when the
+						   VLD feature is enabled
+						   */
+
+/*
+ * --- FWCMD_MAC_TYPE_ENUM ---
+ * This enum defines the types of MAC addresses in the RXF MAC Address Table.
+ */
+#define MAC_ADDRESS_TYPE_STORAGE        (0)	/* Storage MAC Address */
+#define MAC_ADDRESS_TYPE_NETWORK        (1)	/* Network MAC Address */
+#define MAC_ADDRESS_TYPE_PD             (2)	/* Protection Domain MAC Addr */
+#define MAC_ADDRESS_TYPE_MANAGEMENT     (3)	/* Managment MAC Address */
+
+
+/* --- FWCMD_RING_TYPE_ENUM --- */
+#define FWCMD_RING_TYPE_ETH_RX          (1)	/* Ring created with */
+					/* FWCMD_COMMON_ETH_RX_CREATE. */
+#define FWCMD_RING_TYPE_ETH_TX          (2)	/* Ring created with */
+					/* FWCMD_COMMON_ETH_TX_CREATE. */
+#define FWCMD_RING_TYPE_ISCSI_WRBQ      (3)	/* Ring created with */
+					/* FWCMD_COMMON_ISCSI_WRBQ_CREATE. */
+#define FWCMD_RING_TYPE_ISCSI_DEFQ      (4)	/* Ring created with */
+					/* FWCMD_COMMON_ISCSI_DEFQ_CREATE. */
+#define FWCMD_RING_TYPE_TPM_WRBQ        (5)	/* Ring created with */
+					/* FWCMD_COMMON_TPM_WRBQ_CREATE. */
+#define FWCMD_RING_TYPE_TPM_DEFQ        (6)	/* Ring created with */
+					/* FWCMD_COMMONTPM_TDEFQ_CREATE. */
+#define FWCMD_RING_TYPE_TPM_RQ          (7)	/* Ring created with */
+					/* FWCMD_COMMON_TPM_RQ_CREATE. */
+#define FWCMD_RING_TYPE_MCC             (8)	/* Ring created with */
+					/* FWCMD_COMMON_MCC_CREATE. */
+#define FWCMD_RING_TYPE_CQ              (9)	/* Ring created with */
+					/* FWCMD_COMMON_CQ_CREATE. */
+#define FWCMD_RING_TYPE_EQ              (10)	/* Ring created with */
+					/* FWCMD_COMMON_EQ_CREATE. */
+#define FWCMD_RING_TYPE_QP              (11)	/* Ring created with */
+					/* FWCMD_RDMA_QP_CREATE. */
+
+
+/* --- ETH_TX_RING_TYPE_ENUM --- */
+#define ETH_TX_RING_TYPE_FORWARDING     (1)	/* Ethernet ring for
+						   forwarding packets */
+#define ETH_TX_RING_TYPE_STANDARD       (2)	/* Ethernet ring for sending
+						   network packets. */
+#define ETH_TX_RING_TYPE_BOUND          (3)	/* Ethernet ring bound to the
+						   port specified in the command
+						   header.port_number field.
+						   Rings of this type are
+						   NOT subject to the
+						   failover logic implemented
+						   in the BladeEngine.
+						   */
+
+/* --- FWCMD_COMMON_QOS_TYPE_ENUM --- */
+#define QOS_BITS_NIC                    (1)	/* max_bits_per_second_NIC */
+						  /* field is valid.  */
+#define QOS_PKTS_NIC                    (2)	/* max_packets_per_second_NIC */
+						  /* field is valid.  */
+#define QOS_IOPS_ISCSI                  (4)	/* max_ios_per_second_iSCSI */
+						  /*field is valid.  */
+#define QOS_VLAN_TAG                    (8)	/* domain_VLAN_tag field
+						   is valid. */
+#define QOS_FABRIC_ID                   (16)	/* fabric_domain_ID field
+						   is valid. */
+#define QOS_OEM_PARAMS                  (32)	/* qos_params_oem field
+						   is valid. */
+#define QOS_TPUT_ISCSI                  (64)	/* max_bytes_per_second_iSCSI
+						   field  is valid.  */
+
+
+/*
+ * --- FAILOVER_CONFIG_ENUM ---
+ * Failover configuration setting used in FWCMD_COMMON_FORCE_FAILOVER
+ */
+#define FAILOVER_CONFIG_NO_CHANGE       (0)	/* No change to automatic */
+						  /* port failover setting. */
+#define FAILOVER_CONFIG_ON              (1)	/* Automatic port failover
+						   on link down  is enabled. */
+#define FAILOVER_CONFIG_OFF             (2)	/* Automatic port failover
+						   on link down is disabled. */
+
+/*
+ * --- FAILOVER_PORT_ENUM ---
+ * Failover port setting used in FWCMD_COMMON_FORCE_FAILOVER
+ */
+#define FAILOVER_PORT_A                 (0)	/* Selects port A. */
+#define FAILOVER_PORT_B                 (1)	/* Selects port B. */
+#define FAILOVER_PORT_NONE              (15)	/* No port change requested. */
+
+
+/*
+ * --- MGMT_FLASHROM_OPCODE ---
+ * Flash ROM operation code
+ */
+#define MGMT_FLASHROM_OPCODE_FLASH      (1)	/* Commit downloaded data
+						   to Flash ROM */
+#define MGMT_FLASHROM_OPCODE_SAVE       (2)	/* Save downloaded data to
+						   ARM's DDR - do not flash */
+#define MGMT_FLASHROM_OPCODE_CLEAR      (3)	/* Erase specified component
+						   from FlashROM */
+#define MGMT_FLASHROM_OPCODE_REPORT     (4)	/* Read specified component
+						   from Flash ROM */
+#define MGMT_FLASHROM_OPCODE_IMAGE_INFO (5)	/* Returns size of a
+						   component */
+
+/*
+ * --- MGMT_FLASHROM_OPTYPE ---
+ * Flash ROM operation type
+ */
+#define MGMT_FLASHROM_OPTYPE_CODE_FIRMWARE (0)	/* Includes ARM firmware,
+						   IPSec (optional) and EP
+						   firmware  */
+#define MGMT_FLASHROM_OPTYPE_CODE_REDBOOT (1)
+#define MGMT_FLASHROM_OPTYPE_CODE_BIOS  (2)
+#define MGMT_FLASHROM_OPTYPE_CODE_PXE_BIOS (3)
+#define MGMT_FLASHROM_OPTYPE_CODE_CTRLS (4)
+#define MGMT_FLASHROM_OPTYPE_CFG_IPSEC  (5)
+#define MGMT_FLASHROM_OPTYPE_CFG_INI    (6)
+#define MGMT_FLASHROM_OPTYPE_ROM_OFFSET_SPECIFIED (7)
+
+/*
+ * --- FLASHROM_TYPE ---
+ * Flash ROM manufacturers supported in the f/w
+ */
+#define INTEL                           (0)
+#define SPANSION                        (1)
+#define MICRON                          (2)
+
+/* --- DDR_CAS_TYPE --- */
+#define CAS_3                           (0)
+#define CAS_4                           (1)
+#define CAS_5                           (2)
+
+/* --- DDR_SIZE_TYPE --- */
+#define SIZE_256MB                      (0)
+#define SIZE_512MB                      (1)
+
+/* --- DDR_MODE_TYPE --- */
+#define DDR_NO_ECC                      (0)
+#define DDR_ECC                         (1)
+
+/* --- INTERFACE_10GB_TYPE --- */
+#define CX4_TYPE                        (0)
+#define XFP_TYPE                        (1)
+
+/* --- BE_CHIP_MAX_MTU --- */
+#define CHIP_MAX_MTU                    (9000)
+
+/* --- XAUI_STATE_ENUM --- */
+#define XAUI_STATE_ENABLE               (0)	/* This MUST be the default
+						   value for all requests
+						   which set/change
+						   equalization parameter.  */
+#define XAUI_STATE_DISABLE              (255)	/* The XAUI for both ports
+						   may be disabled for EMI
+						   tests. There is no
+						   provision for turning off
+						   individual ports.
+						   */
+/* --- BE_ASIC_REVISION --- */
+#define BE_ASIC_REV_A0                  (1)
+#define BE_ASIC_REV_A1                  (2)
+
+#endif /* __fwcmd_common_amap_h__ */

+ 717 - 0
drivers/staging/benet/fwcmd_common_bmap.h

@@ -0,0 +1,717 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_common_bmap_h__
+#define __fwcmd_common_bmap_h__
+#include "fwcmd_types_bmap.h"
+#include "fwcmd_hdr_bmap.h"
+
+#if defined(__BIG_ENDIAN)
+   /* Physical Address. */
+struct PHYS_ADDR {
+	union {
+		struct {
+			u32 lo;	/* DWORD 0 */
+			u32 hi;	/* DWORD 1 */
+		} __packed;	/* unnamed struct */
+		u32 dw[2];	/* dword union */
+	};			/* unnamed union */
+} __packed ;
+
+
+#else
+   /* Physical Address. */
+struct PHYS_ADDR {
+	union {
+		struct {
+			u32 lo;	/* DWORD 0 */
+			u32 hi;	/* DWORD 1 */
+		} __packed;	/* unnamed struct */
+		u32 dw[2];	/* dword union */
+	};			/* unnamed union */
+} __packed ;
+
+struct BE_LINK_STATUS {
+	u8 mac0_duplex;
+	u8 mac0_speed;
+	u8 mac1_duplex;
+	u8 mac1_speed;
+	u8 mgmt_mac_duplex;
+	u8 mgmt_mac_speed;
+	u8 active_port;
+	u8 rsvd0;
+	u8 mac0_fault;
+	u8 mac1_fault;
+	u16 rsvd1;
+} __packed;
+#endif
+
+struct FWCMD_COMMON_ANON_170_REQUEST {
+	u32 rsvd0;
+} __packed;
+
+union LINK_STATUS_QUERY_PARAMS {
+	struct BE_LINK_STATUS response;
+	struct FWCMD_COMMON_ANON_170_REQUEST request;
+} __packed;
+
+/*
+ *  Queries the the link status for all ports.  The valid values below
+ *  DO NOT indicate that  a particular duplex or speed is supported by
+ *  BladeEngine. These enumerations simply  list all possible duplexes
+ *  and speeds for any port. Consult BladeEngine product  documentation
+ *  for the supported parameters.
+ */
+struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY {
+	union FWCMD_HEADER header;
+	union LINK_STATUS_QUERY_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_171_REQUEST {
+	u8 type;
+	u8 port;
+	u8 mac1;
+	u8 permanent;
+} __packed;
+
+struct FWCMD_COMMON_ANON_172_RESPONSE {
+	struct MAC_ADDRESS_FORMAT mac;
+} __packed;
+
+union NTWK_MAC_QUERY_PARAMS {
+	struct FWCMD_COMMON_ANON_171_REQUEST request;
+	struct FWCMD_COMMON_ANON_172_RESPONSE response;
+} __packed;
+
+/* Queries one MAC address.  */
+struct FWCMD_COMMON_NTWK_MAC_QUERY {
+	union FWCMD_HEADER header;
+	union NTWK_MAC_QUERY_PARAMS params;
+} __packed;
+
+struct MAC_SET_PARAMS_IN {
+	u8 type;
+	u8 port;
+	u8 mac1;
+	u8 invalidate;
+	struct MAC_ADDRESS_FORMAT mac;
+} __packed;
+
+struct MAC_SET_PARAMS_OUT {
+	u32 rsvd0;
+} __packed;
+
+union MAC_SET_PARAMS {
+	struct MAC_SET_PARAMS_IN request;
+	struct MAC_SET_PARAMS_OUT response;
+} __packed;
+
+/* Sets a MAC address.  */
+struct FWCMD_COMMON_NTWK_MAC_SET {
+	union FWCMD_HEADER header;
+	union MAC_SET_PARAMS params;
+} __packed;
+
+/* MAC address list. */
+struct NTWK_MULTICAST_MAC_LIST {
+	u8 byte[6];
+} __packed;
+
+struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD {
+	u16 num_mac;
+	u8 promiscuous;
+	u8 rsvd0;
+	struct NTWK_MULTICAST_MAC_LIST mac[32];
+} __packed;
+
+struct FWCMD_COMMON_ANON_174_RESPONSE {
+	u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_173_PARAMS {
+	struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD request;
+	struct FWCMD_COMMON_ANON_174_RESPONSE response;
+} __packed;
+
+/*
+ *  Sets multicast address hash. The MPU will merge the MAC address lists
+ *  from all clients,  including the networking and storage functions.
+ *  This command may fail if the final merged  list of MAC addresses exceeds
+ *  32 entries.
+ */
+struct FWCMD_COMMON_NTWK_MULTICAST_SET {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_173_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD {
+	u16 num_vlan;
+	u8 promiscuous;
+	u8 rsvd0;
+	u16 vlan_tag[32];
+} __packed;
+
+struct FWCMD_COMMON_ANON_176_RESPONSE {
+	u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_175_PARAMS {
+	struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD request;
+	struct FWCMD_COMMON_ANON_176_RESPONSE response;
+} __packed;
+
+/*
+ *  Sets VLAN tag filter. The MPU will merge the VLAN tag list from all
+ *  clients, including  the networking and storage functions. This command
+ *  may fail if the final vlan_tag array  (from all functions) is longer
+ *  than 32 entries.
+ */
+struct FWCMD_COMMON_NTWK_VLAN_CONFIG {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_175_PARAMS params;
+} __packed;
+
+struct RING_DESTROY_REQUEST {
+	u16 ring_type;
+	u16 id;
+	u8 bypass_flush;
+	u8 rsvd0;
+	u16 rsvd1;
+} __packed;
+
+struct FWCMD_COMMON_ANON_190_RESPONSE {
+	u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_189_PARAMS {
+	struct RING_DESTROY_REQUEST request;
+	struct FWCMD_COMMON_ANON_190_RESPONSE response;
+} __packed;
+/*
+ *  Command for destroying any ring. The connection(s) using the ring should
+ *  be quiesced  before destroying the ring.
+ */
+struct FWCMD_COMMON_RING_DESTROY {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_189_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_192_REQUEST {
+	u16 num_pages;
+	u16 rsvd0;
+	struct CQ_CONTEXT_AMAP context;
+	struct PHYS_ADDR pages[4];
+} __packed ;
+
+struct FWCMD_COMMON_ANON_193_RESPONSE {
+	u16 cq_id;
+} __packed ;
+
+union FWCMD_COMMON_ANON_191_PARAMS {
+	struct FWCMD_COMMON_ANON_192_REQUEST request;
+	struct FWCMD_COMMON_ANON_193_RESPONSE response;
+} __packed ;
+
+/*
+ *  Command for creating a completion queue. A Completion Queue must span
+ *  at least 1 page and  at most 4 pages. Each completion queue entry
+ *  is 16 bytes regardless of CQ entry format.  Thus the ring must be
+ *  at least 256 entries deep (corresponding to 1 page) and can be at
+ *   most 1024 entries deep (corresponding to 4 pages). The number of
+ *  pages posted must  contain the CQ ring size as encoded in the context.
+ *
+ */
+struct FWCMD_COMMON_CQ_CREATE {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_191_PARAMS params;
+} __packed ;
+
+struct FWCMD_COMMON_ANON_198_REQUEST {
+	u16 num_pages;
+	u16 rsvd0;
+	struct EQ_CONTEXT_AMAP context;
+	struct PHYS_ADDR pages[8];
+} __packed ;
+
+struct FWCMD_COMMON_ANON_199_RESPONSE {
+	u16 eq_id;
+} __packed ;
+
+union FWCMD_COMMON_ANON_197_PARAMS {
+	struct FWCMD_COMMON_ANON_198_REQUEST request;
+	struct FWCMD_COMMON_ANON_199_RESPONSE response;
+} __packed ;
+
+/*
+ *  Command for creating a event queue. An Event Queue must span at least
+ *  1 page and at most  8 pages. The number of pages posted must contain
+ *  the EQ ring. The ring is defined by  the size of the EQ entries (encoded
+ *  in the context) and the number of EQ entries (also  encoded in the
+ *  context).
+ */
+struct FWCMD_COMMON_EQ_CREATE {
+	union  FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_197_PARAMS params;
+} __packed ;
+
+struct FWCMD_COMMON_ANON_201_REQUEST {
+	u16 cq_id;
+	u16 bcmc_cq_id;
+	u16 num_pages;
+	u16 rsvd0;
+	struct PHYS_ADDR pages[2];
+} __packed;
+
+struct FWCMD_COMMON_ANON_202_RESPONSE {
+	u16 id;
+} __packed;
+
+union FWCMD_COMMON_ANON_200_PARAMS {
+	struct FWCMD_COMMON_ANON_201_REQUEST request;
+	struct FWCMD_COMMON_ANON_202_RESPONSE response;
+} __packed;
+
+/*
+ *  Command for creating Ethernet receive ring.  An ERX ring contains ETH_RX_D
+ *  entries (8  bytes each). An ERX ring must be 1024 entries deep
+ *  (corresponding to 2 pages).
+ */
+struct FWCMD_COMMON_ETH_RX_CREATE {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_200_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_204_REQUEST {
+	u16 num_pages;
+	u8 ulp_num;
+	u8 type;
+	struct ETX_CONTEXT_AMAP context;
+	struct PHYS_ADDR pages[8];
+} __packed ;
+
+struct FWCMD_COMMON_ANON_205_RESPONSE {
+	u16 cid;
+	u8 ulp_num;
+	u8 rsvd0;
+} __packed ;
+
+union FWCMD_COMMON_ANON_203_PARAMS {
+	struct FWCMD_COMMON_ANON_204_REQUEST request;
+	struct FWCMD_COMMON_ANON_205_RESPONSE response;
+} __packed ;
+
+/*
+ *  Command for creating an Ethernet transmit ring.  An ETX ring contains
+ *  ETH_WRB entries (16  bytes each). An ETX ring must be at least 256
+ *  entries deep (corresponding to 1 page)  and at most 2k entries deep
+ *  (corresponding to 8 pages).
+ */
+struct FWCMD_COMMON_ETH_TX_CREATE {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_203_PARAMS params;
+} __packed ;
+
+struct FWCMD_COMMON_ANON_222_REQUEST {
+	u16 num_pages;
+	u16 rsvd0;
+	struct MCC_RING_CONTEXT_AMAP context;
+	struct PHYS_ADDR pages[8];
+} __packed ;
+
+struct FWCMD_COMMON_ANON_223_RESPONSE {
+	u16 id;
+} __packed ;
+
+union FWCMD_COMMON_ANON_221_PARAMS {
+	struct FWCMD_COMMON_ANON_222_REQUEST request;
+	struct FWCMD_COMMON_ANON_223_RESPONSE response;
+} __packed ;
+
+/*
+ *  Command for creating the MCC ring. An MCC ring must be at least 16
+ *  entries deep  (corresponding to 1 page) and at most 128 entries deep
+ *  (corresponding to 8 pages).
+ */
+struct FWCMD_COMMON_MCC_CREATE {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_221_PARAMS params;
+} __packed ;
+
+struct GET_QOS_IN {
+	u32 qos_params_rsvd;
+} __packed;
+
+struct GET_QOS_OUT {
+	u32 max_bits_per_second_NIC;
+	u32 max_packets_per_second_NIC;
+	u32 max_ios_per_second_iSCSI;
+	u32 max_bytes_per_second_iSCSI;
+	u16 domain_VLAN_tag;
+	u16 fabric_domain_ID;
+	u32 qos_params_oem[4];
+} __packed;
+
+union GET_QOS_PARAMS {
+	struct GET_QOS_IN request;
+	struct GET_QOS_OUT response;
+} __packed;
+
+/* QOS/Bandwidth settings per domain. Applicable only in VMs.  */
+struct FWCMD_COMMON_GET_QOS {
+	union FWCMD_HEADER header;
+	union GET_QOS_PARAMS params;
+} __packed;
+
+struct SET_QOS_IN {
+	u32 valid_flags;
+	u32 max_bits_per_second_NIC;
+	u32 max_packets_per_second_NIC;
+	u32 max_ios_per_second_iSCSI;
+	u32 max_bytes_per_second_iSCSI;
+	u16 domain_VLAN_tag;
+	u16 fabric_domain_ID;
+	u32 qos_params_oem[4];
+} __packed;
+
+struct SET_QOS_OUT {
+	u32 qos_params_rsvd;
+} __packed;
+
+union SET_QOS_PARAMS {
+	struct SET_QOS_IN request;
+	struct SET_QOS_OUT response;
+} __packed;
+
+/* QOS/Bandwidth settings per domain. Applicable only in VMs.  */
+struct FWCMD_COMMON_SET_QOS {
+	union FWCMD_HEADER header;
+	union SET_QOS_PARAMS params;
+} __packed;
+
+struct SET_FRAME_SIZE_IN {
+	u32 max_tx_frame_size;
+	u32 max_rx_frame_size;
+} __packed;
+
+struct SET_FRAME_SIZE_OUT {
+	u32 chip_max_tx_frame_size;
+	u32 chip_max_rx_frame_size;
+} __packed;
+
+union SET_FRAME_SIZE_PARAMS {
+	struct SET_FRAME_SIZE_IN request;
+	struct SET_FRAME_SIZE_OUT response;
+} __packed;
+
+/* Set frame size command. Only host domain may issue this command.  */
+struct FWCMD_COMMON_SET_FRAME_SIZE {
+	union FWCMD_HEADER header;
+	union SET_FRAME_SIZE_PARAMS params;
+} __packed;
+
+struct FORCE_FAILOVER_IN {
+	u32 move_to_port;
+	u32 failover_config;
+} __packed;
+
+struct FWCMD_COMMON_ANON_231_RESPONSE {
+	u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_230_PARAMS {
+	struct FORCE_FAILOVER_IN request;
+	struct FWCMD_COMMON_ANON_231_RESPONSE response;
+} __packed;
+
+/*
+ *  Use this command to control failover in BladeEngine. It may be used
+ *  to failback to a  restored port or to forcibly move traffic from
+ *  one port to another. It may also be used  to enable or disable the
+ *  automatic failover feature. This command can only be issued by  domain
+ *  0.
+ */
+struct FWCMD_COMMON_FORCE_FAILOVER {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_230_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_240_REQUEST {
+	u64 context;
+} __packed;
+
+struct FWCMD_COMMON_ANON_241_RESPONSE {
+	u64 context;
+} __packed;
+
+union FWCMD_COMMON_ANON_239_PARAMS {
+	struct FWCMD_COMMON_ANON_240_REQUEST request;
+	struct FWCMD_COMMON_ANON_241_RESPONSE response;
+} __packed;
+
+/*
+ *  This command can be used by clients as a no-operation request. Typical
+ *  uses for drivers  are as a heartbeat mechanism, or deferred processing
+ *  catalyst. The ARM will always  complete this command with a good completion.
+ *  The 64-bit parameter is not touched by the  ARM processor.
+ */
+struct FWCMD_COMMON_NOP {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_239_PARAMS params;
+} __packed;
+
+struct NTWK_RX_FILTER_SETTINGS {
+	u8 promiscuous;
+	u8 ip_cksum;
+	u8 tcp_cksum;
+	u8 udp_cksum;
+	u8 pass_err;
+	u8 pass_ckerr;
+	u8 strip_crc;
+	u8 mcast_en;
+	u8 bcast_en;
+	u8 mcast_promiscuous_en;
+	u8 unicast_en;
+	u8 vlan_promiscuous;
+} __packed;
+
+union FWCMD_COMMON_ANON_242_PARAMS {
+	struct NTWK_RX_FILTER_SETTINGS request;
+	struct NTWK_RX_FILTER_SETTINGS response;
+} __packed;
+
+/*
+ *  This command is used to modify the ethernet receive filter configuration.
+ *  Only domain 0  network function drivers may issue this command. The
+ *  applied configuration is returned in  the response payload. Note:
+ *  Some receive packet filter settings are global on  BladeEngine and
+ *  can affect both the storage and network function clients that the
+ *   BladeEngine hardware and firmware serve. Additionaly, depending
+ *  on the revision of  BladeEngine, some ethernet receive filter settings
+ *  are dependent on others. If a  dependency exists between settings
+ *  for the BladeEngine revision, and the command request  settings do
+ *  not meet the dependency requirement, the invalid settings will not
+ *  be  applied despite the comand succeeding. For example: a driver may
+ *  request to enable  broadcast packets, but not enable multicast packets.
+ *  On early revisions of BladeEngine,  there may be no distinction between
+ *  broadcast and multicast filters, so broadcast could  not be enabled
+ *  without enabling multicast. In this scenario, the comand would still
+ *   succeed, but the response payload would indicate the previously
+ *  configured broadcast  and multicast setting.
+ */
+struct FWCMD_COMMON_NTWK_RX_FILTER {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_242_PARAMS params;
+} __packed;
+
+
+struct FWCMD_COMMON_ANON_244_REQUEST {
+	u32 rsvd0;
+} __packed;
+
+struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD {
+	u8 firmware_version_string[32];
+	u8 fw_on_flash_version_string[32];
+} __packed;
+
+union FWCMD_COMMON_ANON_243_PARAMS {
+	struct FWCMD_COMMON_ANON_244_REQUEST request;
+	struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD response;
+} __packed;
+
+/* This comand retrieves the firmware version.  */
+struct FWCMD_COMMON_GET_FW_VERSION {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_243_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_246_REQUEST {
+	u16 tx_flow_control;
+	u16 rx_flow_control;
+} __packed;
+
+struct FWCMD_COMMON_ANON_247_RESPONSE {
+	u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_245_PARAMS {
+	struct FWCMD_COMMON_ANON_246_REQUEST request;
+	struct FWCMD_COMMON_ANON_247_RESPONSE response;
+} __packed;
+
+/*
+ *  This comand is used to program BladeEngine flow control behavior.
+ *  Only the host  networking driver is allowed to use this comand.
+ */
+struct FWCMD_COMMON_SET_FLOW_CONTROL {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_245_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_249_REQUEST {
+	u32 rsvd0;
+} __packed;
+
+struct FWCMD_COMMON_ANON_250_RESPONSE {
+	u16 tx_flow_control;
+	u16 rx_flow_control;
+} __packed;
+
+union FWCMD_COMMON_ANON_248_PARAMS {
+	struct FWCMD_COMMON_ANON_249_REQUEST request;
+	struct FWCMD_COMMON_ANON_250_RESPONSE response;
+} __packed;
+
+/* This comand is used to read BladeEngine flow control settings.  */
+struct FWCMD_COMMON_GET_FLOW_CONTROL {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_248_PARAMS params;
+} __packed;
+
+struct EQ_DELAY_PARAMS {
+	u32 eq_id;
+	u32 delay_in_microseconds;
+} __packed;
+
+struct FWCMD_COMMON_ANON_257_REQUEST {
+	u32 num_eq;
+	u32 rsvd0;
+	struct EQ_DELAY_PARAMS delay[16];
+} __packed;
+
+struct FWCMD_COMMON_ANON_258_RESPONSE {
+	u32 delay_resolution_in_microseconds;
+	u32 delay_max_in_microseconds;
+} __packed;
+
+union MODIFY_EQ_DELAY_PARAMS {
+	struct FWCMD_COMMON_ANON_257_REQUEST request;
+	struct FWCMD_COMMON_ANON_258_RESPONSE response;
+} __packed;
+
+/* This comand changes the EQ delay for a given set of EQs.  */
+struct FWCMD_COMMON_MODIFY_EQ_DELAY {
+	union FWCMD_HEADER header;
+	union MODIFY_EQ_DELAY_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_260_REQUEST {
+	u32 rsvd0;
+} __packed;
+
+struct BE_FIRMWARE_CONFIG {
+	u16 be_config_number;
+	u16 asic_revision;
+	u32 nic_ulp_mask;
+	u32 tulp_mask;
+	u32 iscsi_ulp_mask;
+	u32 rdma_ulp_mask;
+	u32 rsvd0[4];
+	u32 eth_tx_id_start;
+	u32 eth_tx_id_count;
+	u32 eth_rx_id_start;
+	u32 eth_rx_id_count;
+	u32 tpm_wrbq_id_start;
+	u32 tpm_wrbq_id_count;
+	u32 tpm_defq_id_start;
+	u32 tpm_defq_id_count;
+	u32 iscsi_wrbq_id_start;
+	u32 iscsi_wrbq_id_count;
+	u32 iscsi_defq_id_start;
+	u32 iscsi_defq_id_count;
+	u32 rdma_qp_id_start;
+	u32 rdma_qp_id_count;
+	u32 rsvd1[8];
+} __packed;
+
+union FWCMD_COMMON_ANON_259_PARAMS {
+	struct FWCMD_COMMON_ANON_260_REQUEST request;
+	struct BE_FIRMWARE_CONFIG response;
+} __packed;
+
+/*
+ *  This comand queries the current firmware configuration parameters.
+ *   The static  configuration type is defined by be_config_number. This
+ *  differentiates different  BladeEngine builds, such as iSCSI Initiator
+ *  versus iSCSI Target.  For a given static  configuration, the Upper
+ *  Layer Protocol (ULP) processors may be reconfigured to support  different
+ *  protocols. Each ULP processor supports one or more protocols. The
+ *  masks  indicate which processors are configured for each protocol.
+ *   For a given static  configuration, the number of TCP connections
+ *  supported for each protocol may vary. The  *_id_start and *_id_count
+ *  variables define a linear range of IDs that are available for  each
+ *  supported protocol. The *_id_count may be used by the driver to allocate
+ *  the  appropriate number of connection resources. The *_id_start may
+ *  be used to map the  arbitrary range of IDs to a zero-based range
+ *  of indices.
+ */
+struct FWCMD_COMMON_FIRMWARE_CONFIG {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_259_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS {
+	u32 emph_lev_sel_port0;
+	u32 emph_lev_sel_port1;
+	u8 xaui_vo_sel;
+	u8 xaui_state;
+	u16 rsvd0;
+	u32 xaui_eq_vector;
+} __packed;
+
+struct FWCMD_COMMON_ANON_262_REQUEST {
+	u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_261_PARAMS {
+	struct FWCMD_COMMON_ANON_262_REQUEST request;
+	struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS response;
+} __packed;
+
+/*
+ *  This comand can be used to read XAUI equalization parameters. The
+ *  ARM firmware applies  default equalization parameters during initialization.
+ *  These parameters may be  customer-specific when derived from the
+ *  SEEPROM. See SEEPROM_DATA for equalization  specific fields.
+ */
+struct FWCMD_COMMON_GET_PORT_EQUALIZATION {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_261_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_264_RESPONSE {
+	u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_263_PARAMS {
+	struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS request;
+	struct FWCMD_COMMON_ANON_264_RESPONSE response;
+} __packed;
+
+/*
+ *  This comand can be used to set XAUI equalization parameters. The ARM
+ *  firmware applies  default equalization parameters during initialization.
+ *  These parameters may be  customer-specific when derived from the
+ *  SEEPROM. See SEEPROM_DATA for equalization  specific fields.
+ */
+struct FWCMD_COMMON_SET_PORT_EQUALIZATION {
+	union FWCMD_HEADER header;
+	union FWCMD_COMMON_ANON_263_PARAMS params;
+} __packed;
+
+#endif /* __fwcmd_common_bmap_h__ */

+ 280 - 0
drivers/staging/benet/fwcmd_eth_bmap.h

@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_eth_bmap_h__
+#define __fwcmd_eth_bmap_h__
+#include "fwcmd_hdr_bmap.h"
+#include "fwcmd_types_bmap.h"
+
+struct MIB_ETH_STATISTICS_PARAMS_IN {
+	u32 rsvd0;
+} __packed;
+
+struct BE_RXF_STATS {
+	u32 p0recvdtotalbytesLSD;	/* DWORD 0 */
+	u32 p0recvdtotalbytesMSD;	/* DWORD 1 */
+	u32 p0recvdtotalframes;	/* DWORD 2 */
+	u32 p0recvdunicastframes;	/* DWORD 3 */
+	u32 p0recvdmulticastframes;	/* DWORD 4 */
+	u32 p0recvdbroadcastframes;	/* DWORD 5 */
+	u32 p0crcerrors;	/* DWORD 6 */
+	u32 p0alignmentsymerrs;	/* DWORD 7 */
+	u32 p0pauseframesrecvd;	/* DWORD 8 */
+	u32 p0controlframesrecvd;	/* DWORD 9 */
+	u32 p0inrangelenerrors;	/* DWORD 10 */
+	u32 p0outrangeerrors;	/* DWORD 11 */
+	u32 p0frametoolongerrors;	/* DWORD 12 */
+	u32 p0droppedaddressmatch;	/* DWORD 13 */
+	u32 p0droppedvlanmismatch;	/* DWORD 14 */
+	u32 p0ipdroppedtoosmall;	/* DWORD 15 */
+	u32 p0ipdroppedtooshort;	/* DWORD 16 */
+	u32 p0ipdroppedhdrtoosmall;	/* DWORD 17 */
+	u32 p0tcpdroppedlen;	/* DWORD 18 */
+	u32 p0droppedrunt;	/* DWORD 19 */
+	u32 p0recvd64;		/* DWORD 20 */
+	u32 p0recvd65_127;	/* DWORD 21 */
+	u32 p0recvd128_256;	/* DWORD 22 */
+	u32 p0recvd256_511;	/* DWORD 23 */
+	u32 p0recvd512_1023;	/* DWORD 24 */
+	u32 p0recvd1518_1522;	/* DWORD 25 */
+	u32 p0recvd1522_2047;	/* DWORD 26 */
+	u32 p0recvd2048_4095;	/* DWORD 27 */
+	u32 p0recvd4096_8191;	/* DWORD 28 */
+	u32 p0recvd8192_9216;	/* DWORD 29 */
+	u32 p0rcvdipcksmerrs;	/* DWORD 30 */
+	u32 p0recvdtcpcksmerrs;	/* DWORD 31 */
+	u32 p0recvdudpcksmerrs;	/* DWORD 32 */
+	u32 p0recvdnonrsspackets;	/* DWORD 33 */
+	u32 p0recvdippackets;	/* DWORD 34 */
+	u32 p0recvdchute1packets;	/* DWORD 35 */
+	u32 p0recvdchute2packets;	/* DWORD 36 */
+	u32 p0recvdchute3packets;	/* DWORD 37 */
+	u32 p0recvdipsecpackets;	/* DWORD 38 */
+	u32 p0recvdmanagementpackets;	/* DWORD 39 */
+	u32 p0xmitbyteslsd;	/* DWORD 40 */
+	u32 p0xmitbytesmsd;	/* DWORD 41 */
+	u32 p0xmitunicastframes;	/* DWORD 42 */
+	u32 p0xmitmulticastframes;	/* DWORD 43 */
+	u32 p0xmitbroadcastframes;	/* DWORD 44 */
+	u32 p0xmitpauseframes;	/* DWORD 45 */
+	u32 p0xmitcontrolframes;	/* DWORD 46 */
+	u32 p0xmit64;		/* DWORD 47 */
+	u32 p0xmit65_127;	/* DWORD 48 */
+	u32 p0xmit128_256;	/* DWORD 49 */
+	u32 p0xmit256_511;	/* DWORD 50 */
+	u32 p0xmit512_1023;	/* DWORD 51 */
+	u32 p0xmit1518_1522;	/* DWORD 52 */
+	u32 p0xmit1522_2047;	/* DWORD 53 */
+	u32 p0xmit2048_4095;	/* DWORD 54 */
+	u32 p0xmit4096_8191;	/* DWORD 55 */
+	u32 p0xmit8192_9216;	/* DWORD 56 */
+	u32 p0rxfifooverflowdropped;	/* DWORD 57 */
+	u32 p0ipseclookupfaileddropped;	/* DWORD 58 */
+	u32 p1recvdtotalbytesLSD;	/* DWORD 59 */
+	u32 p1recvdtotalbytesMSD;	/* DWORD 60 */
+	u32 p1recvdtotalframes;	/* DWORD 61 */
+	u32 p1recvdunicastframes;	/* DWORD 62 */
+	u32 p1recvdmulticastframes;	/* DWORD 63 */
+	u32 p1recvdbroadcastframes;	/* DWORD 64 */
+	u32 p1crcerrors;	/* DWORD 65 */
+	u32 p1alignmentsymerrs;	/* DWORD 66 */
+	u32 p1pauseframesrecvd;	/* DWORD 67 */
+	u32 p1controlframesrecvd;	/* DWORD 68 */
+	u32 p1inrangelenerrors;	/* DWORD 69 */
+	u32 p1outrangeerrors;	/* DWORD 70 */
+	u32 p1frametoolongerrors;	/* DWORD 71 */
+	u32 p1droppedaddressmatch;	/* DWORD 72 */
+	u32 p1droppedvlanmismatch;	/* DWORD 73 */
+	u32 p1ipdroppedtoosmall;	/* DWORD 74 */
+	u32 p1ipdroppedtooshort;	/* DWORD 75 */
+	u32 p1ipdroppedhdrtoosmall;	/* DWORD 76 */
+	u32 p1tcpdroppedlen;	/* DWORD 77 */
+	u32 p1droppedrunt;	/* DWORD 78 */
+	u32 p1recvd64;		/* DWORD 79 */
+	u32 p1recvd65_127;	/* DWORD 80 */
+	u32 p1recvd128_256;	/* DWORD 81 */
+	u32 p1recvd256_511;	/* DWORD 82 */
+	u32 p1recvd512_1023;	/* DWORD 83 */
+	u32 p1recvd1518_1522;	/* DWORD 84 */
+	u32 p1recvd1522_2047;	/* DWORD 85 */
+	u32 p1recvd2048_4095;	/* DWORD 86 */
+	u32 p1recvd4096_8191;	/* DWORD 87 */
+	u32 p1recvd8192_9216;	/* DWORD 88 */
+	u32 p1rcvdipcksmerrs;	/* DWORD 89 */
+	u32 p1recvdtcpcksmerrs;	/* DWORD 90 */
+	u32 p1recvdudpcksmerrs;	/* DWORD 91 */
+	u32 p1recvdnonrsspackets;	/* DWORD 92 */
+	u32 p1recvdippackets;	/* DWORD 93 */
+	u32 p1recvdchute1packets;	/* DWORD 94 */
+	u32 p1recvdchute2packets;	/* DWORD 95 */
+	u32 p1recvdchute3packets;	/* DWORD 96 */
+	u32 p1recvdipsecpackets;	/* DWORD 97 */
+	u32 p1recvdmanagementpackets;	/* DWORD 98 */
+	u32 p1xmitbyteslsd;	/* DWORD 99 */
+	u32 p1xmitbytesmsd;	/* DWORD 100 */
+	u32 p1xmitunicastframes;	/* DWORD 101 */
+	u32 p1xmitmulticastframes;	/* DWORD 102 */
+	u32 p1xmitbroadcastframes;	/* DWORD 103 */
+	u32 p1xmitpauseframes;	/* DWORD 104 */
+	u32 p1xmitcontrolframes;	/* DWORD 105 */
+	u32 p1xmit64;		/* DWORD 106 */
+	u32 p1xmit65_127;	/* DWORD 107 */
+	u32 p1xmit128_256;	/* DWORD 108 */
+	u32 p1xmit256_511;	/* DWORD 109 */
+	u32 p1xmit512_1023;	/* DWORD 110 */
+	u32 p1xmit1518_1522;	/* DWORD 111 */
+	u32 p1xmit1522_2047;	/* DWORD 112 */
+	u32 p1xmit2048_4095;	/* DWORD 113 */
+	u32 p1xmit4096_8191;	/* DWORD 114 */
+	u32 p1xmit8192_9216;	/* DWORD 115 */
+	u32 p1rxfifooverflowdropped;	/* DWORD 116 */
+	u32 p1ipseclookupfaileddropped;	/* DWORD 117 */
+	u32 pxdroppednopbuf;	/* DWORD 118 */
+	u32 pxdroppednotxpb;	/* DWORD 119 */
+	u32 pxdroppednoipsecbuf;	/* DWORD 120 */
+	u32 pxdroppednoerxdescr;	/* DWORD 121 */
+	u32 pxdroppednotpredescr;	/* DWORD 122 */
+	u32 pxrecvdmanagementportpackets;	/* DWORD 123 */
+	u32 pxrecvdmanagementportbytes;	/* DWORD 124 */
+	u32 pxrecvdmanagementportpauseframes;	/* DWORD 125 */
+	u32 pxrecvdmanagementporterrors;	/* DWORD 126 */
+	u32 pxxmitmanagementportpackets;	/* DWORD 127 */
+	u32 pxxmitmanagementportbytes;	/* DWORD 128 */
+	u32 pxxmitmanagementportpause;	/* DWORD 129 */
+	u32 pxxmitmanagementportrxfifooverflow;	/* DWORD 130 */
+	u32 pxrecvdipsecipcksmerrs;	/* DWORD 131 */
+	u32 pxrecvdtcpsecipcksmerrs;	/* DWORD 132 */
+	u32 pxrecvdudpsecipcksmerrs;	/* DWORD 133 */
+	u32 pxipsecrunt;	/* DWORD 134 */
+	u32 pxipsecaddressmismatchdropped;	/* DWORD 135 */
+	u32 pxipsecrxfifooverflowdropped;	/* DWORD 136 */
+	u32 pxipsecframestoolong;	/* DWORD 137 */
+	u32 pxipsectotalipframes;	/* DWORD 138 */
+	u32 pxipseciptoosmall;	/* DWORD 139 */
+	u32 pxipseciptooshort;	/* DWORD 140 */
+	u32 pxipseciphdrtoosmall;	/* DWORD 141 */
+	u32 pxipsectcphdrbad;	/* DWORD 142 */
+	u32 pxrecvdipsecchute1;	/* DWORD 143 */
+	u32 pxrecvdipsecchute2;	/* DWORD 144 */
+	u32 pxrecvdipsecchute3;	/* DWORD 145 */
+	u32 pxdropped7frags;	/* DWORD 146 */
+	u32 pxdroppedfrags;	/* DWORD 147 */
+	u32 pxdroppedinvalidfragring;	/* DWORD 148 */
+	u32 pxnumforwardedpackets;	/* DWORD 149 */
+} __packed;
+
+union MIB_ETH_STATISTICS_PARAMS {
+	struct MIB_ETH_STATISTICS_PARAMS_IN request;
+	struct BE_RXF_STATS response;
+} __packed;
+
+/*
+ *  Query ethernet statistics. All domains may issue this command. The
+ *  host domain drivers  may optionally reset internal statistic counters
+ *  with a query.
+ */
+struct FWCMD_ETH_GET_STATISTICS {
+	union FWCMD_HEADER header;
+	union MIB_ETH_STATISTICS_PARAMS params;
+} __packed;
+
+
+struct FWCMD_ETH_ANON_175_REQUEST {
+	u8 port0_promiscuous;
+	u8 port1_promiscuous;
+	u16 rsvd0;
+} __packed;
+
+struct FWCMD_ETH_ANON_176_RESPONSE {
+	u32 rsvd0;
+} __packed;
+
+union FWCMD_ETH_ANON_174_PARAMS {
+	struct FWCMD_ETH_ANON_175_REQUEST request;
+	struct FWCMD_ETH_ANON_176_RESPONSE response;
+} __packed;
+
+/* Enables/Disables promiscuous ethernet receive mode.  */
+struct FWCMD_ETH_PROMISCUOUS {
+	union FWCMD_HEADER header;
+	union FWCMD_ETH_ANON_174_PARAMS params;
+} __packed;
+
+struct FWCMD_ETH_ANON_178_REQUEST {
+	u32 new_fragsize_log2;
+} __packed;
+
+struct FWCMD_ETH_ANON_179_RESPONSE {
+	u32 actual_fragsize_log2;
+} __packed;
+
+union FWCMD_ETH_ANON_177_PARAMS {
+	struct FWCMD_ETH_ANON_178_REQUEST request;
+	struct FWCMD_ETH_ANON_179_RESPONSE response;
+} __packed;
+
+/*
+ *  Sets the Ethernet RX fragment size. Only host (domain 0) networking
+ *  drivers may issue  this command.  This call will fail for non-host
+ *  protection domains. In this situation the  MCC CQ status will indicate
+ *  a failure due to insufficient priviledges. The response  should be
+ *  ignored, and the driver should use the FWCMD_ETH_GET_FRAG_SIZE to
+ *  query the  existing ethernet receive fragment size. It must use this
+ *  fragment size for all  fragments in the ethernet receive ring.  If
+ *  the command succeeds, the driver must use the  frag size indicated
+ *  in the command response since the requested frag size may not be  applied
+ *  until the next reboot. When the requested fragsize matches the response
+ *   fragsize, this indicates the request was applied immediately.
+ */
+struct FWCMD_ETH_SET_RX_FRAG_SIZE {
+	union FWCMD_HEADER header;
+	union FWCMD_ETH_ANON_177_PARAMS params;
+} __packed;
+
+struct FWCMD_ETH_ANON_181_REQUEST {
+	u32 rsvd0;
+} __packed;
+
+struct FWCMD_ETH_ANON_182_RESPONSE {
+	u32 actual_fragsize_log2;
+} __packed;
+
+union FWCMD_ETH_ANON_180_PARAMS {
+	struct FWCMD_ETH_ANON_181_REQUEST request;
+	struct FWCMD_ETH_ANON_182_RESPONSE response;
+} __packed;
+
+/*
+ *  Queries the Ethernet RX fragment size. All domains may issue this
+ *  command.  The driver  should call this command to determine the minimum
+ *  required fragment size for the ethernet  RX ring buffers. Drivers
+ *  may choose to use a larger size for each fragment buffer, but  BladeEngine
+ *  will use up to the configured minimum required fragsize in each ethernet
+ *   receive fragment buffer. For example, if the ethernet receive fragment
+ *  size is  configured to 4kB, and a driver uses 8kB fragments, a 6kB
+ *  ethernet packet received by  BladeEngine will be split accross two
+ *  of the driver's receive framgents (4kB in one  fragment buffer, and
+ *  2kB in the subsequent fragment buffer).
+ */
+struct FWCMD_ETH_GET_RX_FRAG_SIZE {
+	union FWCMD_HEADER header;
+	union FWCMD_ETH_ANON_180_PARAMS params;
+} __packed;
+
+#endif /* __fwcmd_eth_bmap_h__ */

+ 54 - 0
drivers/staging/benet/fwcmd_hdr_bmap.h

@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_hdr_bmap_h__
+#define __fwcmd_hdr_bmap_h__
+
+struct FWCMD_REQUEST_HEADER {
+	u8 opcode;
+	u8 subsystem;
+	u8 port_number;
+	u8 domain;
+	u32 timeout;
+	u32 request_length;
+	u32 rsvd0;
+} __packed;
+
+struct FWCMD_RESPONSE_HEADER {
+	u8 opcode;
+	u8 subsystem;
+	u8 rsvd0;
+	u8 domain;
+	u8 status;
+	u8 additional_status;
+	u16 rsvd1;
+	u32 response_length;
+	u32 actual_response_length;
+} __packed;
+
+/*
+ *  The firmware/driver overwrites the input FWCMD_REQUEST_HEADER with
+ *  the output  FWCMD_RESPONSE_HEADER.
+ */
+union FWCMD_HEADER {
+	struct FWCMD_REQUEST_HEADER request;
+	struct FWCMD_RESPONSE_HEADER response;
+} __packed;
+
+#endif /* __fwcmd_hdr_bmap_h__ */

+ 94 - 0
drivers/staging/benet/fwcmd_mcc.h

@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_mcc_amap_h__
+#define __fwcmd_mcc_amap_h__
+#include "fwcmd_opcodes.h"
+/*
+ * Where applicable, a WRB, may contain a list of Scatter-gather elements.
+ * Each element supports a 64 bit address and a 32bit length field.
+ */
+struct BE_MCC_SGE_AMAP {
+	u8 pa_lo[32];	/* DWORD 0 */
+	u8 pa_hi[32];	/* DWORD 1 */
+	u8 length[32];	/* DWORD 2 */
+} __packed;
+struct MCC_SGE_AMAP {
+	u32 dw[3];
+};
+/*
+ * The design of an MCC_SGE allows up to 19 elements to be embedded
+ * in a WRB, supporting 64KB data transfers (assuming a 4KB page size).
+ */
+struct BE_MCC_WRB_PAYLOAD_AMAP {
+	union {
+		struct BE_MCC_SGE_AMAP sgl[19];
+		u8 embedded[59][32];	/* DWORD 0 */
+	};
+} __packed;
+struct MCC_WRB_PAYLOAD_AMAP {
+	u32 dw[59];
+};
+
+/*
+ * This is the structure of the MCC Command WRB for commands
+ * sent to the Management Processing Unit (MPU). See section
+ * for usage in embedded and non-embedded modes.
+ */
+struct BE_MCC_WRB_AMAP {
+	u8 embedded;	/* DWORD 0 */
+	u8 rsvd0[2];	/* DWORD 0 */
+	u8 sge_count[5];	/* DWORD 0 */
+	u8 rsvd1[16];	/* DWORD 0 */
+	u8 special[8];	/* DWORD 0 */
+	u8 payload_length[32];	/* DWORD 1 */
+	u8 tag[2][32];	/* DWORD 2 */
+	u8 rsvd2[32];	/* DWORD 4 */
+	struct BE_MCC_WRB_PAYLOAD_AMAP payload;
+} __packed;
+struct MCC_WRB_AMAP {
+	u32 dw[64];
+};
+
+/*  This is the structure of the MCC Completion queue entry  */
+struct BE_MCC_CQ_ENTRY_AMAP {
+	u8 completion_status[16];	/* DWORD 0 */
+	u8 extended_status[16];	/* DWORD 0 */
+	u8 mcc_tag[2][32];	/* DWORD 1 */
+	u8 rsvd0[27];	/* DWORD 3 */
+	u8 consumed;	/* DWORD 3 */
+	u8 completed;	/* DWORD 3 */
+	u8 hpi_buffer_completion;	/* DWORD 3 */
+	u8 async_event;	/* DWORD 3 */
+	u8 valid;		/* DWORD 3 */
+} __packed;
+struct MCC_CQ_ENTRY_AMAP {
+	u32 dw[4];
+};
+
+/* Mailbox structures used by the MPU during bootstrap */
+struct BE_MCC_MAILBOX_AMAP {
+	struct BE_MCC_WRB_AMAP wrb;
+	struct BE_MCC_CQ_ENTRY_AMAP cq;
+} __packed;
+struct MCC_MAILBOX_AMAP {
+	u32 dw[68];
+};
+
+#endif /* __fwcmd_mcc_amap_h__ */

+ 244 - 0
drivers/staging/benet/fwcmd_opcodes.h

@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_opcodes_amap_h__
+#define __fwcmd_opcodes_amap_h__
+
+/*
+ * --- FWCMD_SUBSYSTEMS ---
+ * The commands are grouped into the following subsystems. The subsystem
+ * code along with the opcode uniquely identify a particular fwcmd.
+ */
+#define FWCMD_SUBSYSTEM_RSVD  (0)	/* This subsystem is reserved. It is */
+						  /* never used. */
+#define FWCMD_SUBSYSTEM_COMMON (1)	/* CMDs in this group are common to
+					* all subsystems. See
+					* COMMON_SUBSYSTEM_OPCODES for opcodes
+					* and Common Host Configuration CMDs
+					* for the FWCMD descriptions.
+					*/
+#define FWCMD_SUBSYSTEM_COMMON_ISCSI    (2) /* CMDs in this group are */
+					/*
+					* common to Initiator and Target. See
+					* COMMON_ISCSI_SUBSYSTEM_OPCODES and
+					* Common iSCSI Initiator and Target
+					* CMDs for the command descriptions.
+					*/
+#define FWCMD_SUBSYSTEM_ETH             (3)	/* This subsystem is used to
+						execute  Ethernet commands.  */
+
+#define FWCMD_SUBSYSTEM_TPM             (4)	/* This subsystem is used
+						 to execute TPM  commands.  */
+#define FWCMD_SUBSYSTEM_PXE_UNDI        (5)	/* This subsystem is used
+						* to execute PXE
+						* and UNDI specific commands.
+						*/
+
+#define FWCMD_SUBSYSTEM_ISCSI_INI       (6)	/* This subsystem is used to
+						execute ISCSI Initiator
+						specific commands.
+						*/
+#define FWCMD_SUBSYSTEM_ISCSI_TGT       (7)	/* This subsystem is used
+						to execute iSCSI Target
+						specific commands.between
+						PTL and ARM firmware.
+						*/
+#define FWCMD_SUBSYSTEM_MILI_PTL        (8)	/* This subsystem is used to
+						execute iSCSI Target specific
+						commands.between MILI
+						and PTL.  */
+#define FWCMD_SUBSYSTEM_MILI_TMD        (9)	/* This subsystem is used to
+						execute iSCSI Target specific
+						commands between MILI
+						and TMD.  */
+#define FWCMD_SUBSYSTEM_PROXY           (11)	/* This subsystem is used
+						to execute proxied commands
+						within the host at the
+						explicit request of a
+						non priviledged domain.
+						This 'subsystem' is entirely
+						virtual from the controller
+						and firmware perspective as
+						it is implemented in host
+						drivers.
+						*/
+
+/*
+ * --- COMMON_SUBSYSTEM_OPCODES ---
+ * These opcodes are common to both networking and storage PCI
+ * functions. They are used to reserve resources and configure
+ * BladeEngine. These opcodes all use the FWCMD_SUBSYSTEM_COMMON
+ * subsystem code.
+ */
+#define OPCODE_COMMON_NTWK_MAC_QUERY    (1)
+#define SUBSYSTEM_COMMON_NTWK_MAC_QUERY (1)
+#define SUBSYSTEM_COMMON_NTWK_MAC_SET   (1)
+#define SUBSYSTEM_COMMON_NTWK_MULTICAST_SET (1)
+#define SUBSYSTEM_COMMON_NTWK_VLAN_CONFIG (1)
+#define SUBSYSTEM_COMMON_NTWK_LINK_STATUS_QUERY (1)
+#define SUBSYSTEM_COMMON_READ_FLASHROM  (1)
+#define SUBSYSTEM_COMMON_WRITE_FLASHROM (1)
+#define SUBSYSTEM_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (1)
+#define SUBSYSTEM_COMMON_ADD_PAGE_TABLES (1)
+#define SUBSYSTEM_COMMON_REMOVE_PAGE_TABLES (1)
+#define SUBSYSTEM_COMMON_RING_DESTROY   (1)
+#define SUBSYSTEM_COMMON_CQ_CREATE      (1)
+#define SUBSYSTEM_COMMON_EQ_CREATE      (1)
+#define SUBSYSTEM_COMMON_ETH_RX_CREATE  (1)
+#define SUBSYSTEM_COMMON_ETH_TX_CREATE  (1)
+#define SUBSYSTEM_COMMON_ISCSI_DEFQ_CREATE (1)
+#define SUBSYSTEM_COMMON_ISCSI_WRBQ_CREATE (1)
+#define SUBSYSTEM_COMMON_MCC_CREATE     (1)
+#define SUBSYSTEM_COMMON_JELL_CONFIG    (1)
+#define SUBSYSTEM_COMMON_FORCE_FAILOVER (1)
+#define SUBSYSTEM_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (1)
+#define SUBSYSTEM_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (1)
+#define SUBSYSTEM_COMMON_POST_ZERO_BUFFER (1)
+#define SUBSYSTEM_COMMON_GET_QOS        (1)
+#define SUBSYSTEM_COMMON_SET_QOS        (1)
+#define SUBSYSTEM_COMMON_TCP_GET_STATISTICS (1)
+#define SUBSYSTEM_COMMON_SEEPROM_READ   (1)
+#define SUBSYSTEM_COMMON_TCP_STATE_QUERY (1)
+#define SUBSYSTEM_COMMON_GET_CNTL_ATTRIBUTES (1)
+#define SUBSYSTEM_COMMON_NOP            (1)
+#define SUBSYSTEM_COMMON_NTWK_RX_FILTER (1)
+#define SUBSYSTEM_COMMON_GET_FW_VERSION (1)
+#define SUBSYSTEM_COMMON_SET_FLOW_CONTROL (1)
+#define SUBSYSTEM_COMMON_GET_FLOW_CONTROL (1)
+#define SUBSYSTEM_COMMON_SET_TCP_PARAMETERS (1)
+#define SUBSYSTEM_COMMON_SET_FRAME_SIZE (1)
+#define SUBSYSTEM_COMMON_GET_FAT        (1)
+#define SUBSYSTEM_COMMON_MODIFY_EQ_DELAY (1)
+#define SUBSYSTEM_COMMON_FIRMWARE_CONFIG (1)
+#define SUBSYSTEM_COMMON_ENABLE_DISABLE_DOMAINS (1)
+#define SUBSYSTEM_COMMON_GET_DOMAIN_CONFIG (1)
+#define SUBSYSTEM_COMMON_SET_VLD_CONFIG (1)
+#define SUBSYSTEM_COMMON_GET_VLD_CONFIG (1)
+#define SUBSYSTEM_COMMON_GET_PORT_EQUALIZATION (1)
+#define SUBSYSTEM_COMMON_SET_PORT_EQUALIZATION (1)
+#define SUBSYSTEM_COMMON_RED_CONFIG     (1)
+#define OPCODE_COMMON_NTWK_MAC_SET      (2)
+#define OPCODE_COMMON_NTWK_MULTICAST_SET (3)
+#define OPCODE_COMMON_NTWK_VLAN_CONFIG  (4)
+#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY (5)
+#define OPCODE_COMMON_READ_FLASHROM     (6)
+#define OPCODE_COMMON_WRITE_FLASHROM    (7)
+#define OPCODE_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (8)
+#define OPCODE_COMMON_ADD_PAGE_TABLES   (9)
+#define OPCODE_COMMON_REMOVE_PAGE_TABLES (10)
+#define OPCODE_COMMON_RING_DESTROY      (11)
+#define OPCODE_COMMON_CQ_CREATE         (12)
+#define OPCODE_COMMON_EQ_CREATE         (13)
+#define OPCODE_COMMON_ETH_RX_CREATE     (14)
+#define OPCODE_COMMON_ETH_TX_CREATE     (15)
+#define OPCODE_COMMON_NET_RESERVED0     (16)	/* Reserved */
+#define OPCODE_COMMON_NET_RESERVED1     (17)	/* Reserved */
+#define OPCODE_COMMON_NET_RESERVED2     (18)	/* Reserved */
+#define OPCODE_COMMON_ISCSI_DEFQ_CREATE (19)
+#define OPCODE_COMMON_ISCSI_WRBQ_CREATE (20)
+#define OPCODE_COMMON_MCC_CREATE        (21)
+#define OPCODE_COMMON_JELL_CONFIG       (22)
+#define OPCODE_COMMON_FORCE_FAILOVER    (23)
+#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (24)
+#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (25)
+#define OPCODE_COMMON_POST_ZERO_BUFFER  (26)
+#define OPCODE_COMMON_GET_QOS           (27)
+#define OPCODE_COMMON_SET_QOS           (28)
+#define OPCODE_COMMON_TCP_GET_STATISTICS (29)
+#define OPCODE_COMMON_SEEPROM_READ      (30)
+#define OPCODE_COMMON_TCP_STATE_QUERY   (31)
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES (32)
+#define OPCODE_COMMON_NOP               (33)
+#define OPCODE_COMMON_NTWK_RX_FILTER    (34)
+#define OPCODE_COMMON_GET_FW_VERSION    (35)
+#define OPCODE_COMMON_SET_FLOW_CONTROL  (36)
+#define OPCODE_COMMON_GET_FLOW_CONTROL  (37)
+#define OPCODE_COMMON_SET_TCP_PARAMETERS (38)
+#define OPCODE_COMMON_SET_FRAME_SIZE    (39)
+#define OPCODE_COMMON_GET_FAT           (40)
+#define OPCODE_COMMON_MODIFY_EQ_DELAY   (41)
+#define OPCODE_COMMON_FIRMWARE_CONFIG   (42)
+#define OPCODE_COMMON_ENABLE_DISABLE_DOMAINS (43)
+#define OPCODE_COMMON_GET_DOMAIN_CONFIG (44)
+#define OPCODE_COMMON_SET_VLD_CONFIG    (45)
+#define OPCODE_COMMON_GET_VLD_CONFIG    (46)
+#define OPCODE_COMMON_GET_PORT_EQUALIZATION (47)
+#define OPCODE_COMMON_SET_PORT_EQUALIZATION (48)
+#define OPCODE_COMMON_RED_CONFIG        (49)
+
+
+
+/*
+ * --- ETH_SUBSYSTEM_OPCODES ---
+ * These opcodes are used for configuring the Ethernet interfaces. These
+ * opcodes all use the FWCMD_SUBSYSTEM_ETH subsystem code.
+ */
+#define OPCODE_ETH_RSS_CONFIG           (1)
+#define OPCODE_ETH_ACPI_CONFIG          (2)
+#define SUBSYSTEM_ETH_RSS_CONFIG        (3)
+#define SUBSYSTEM_ETH_ACPI_CONFIG       (3)
+#define OPCODE_ETH_PROMISCUOUS          (3)
+#define SUBSYSTEM_ETH_PROMISCUOUS       (3)
+#define SUBSYSTEM_ETH_GET_STATISTICS    (3)
+#define SUBSYSTEM_ETH_GET_RX_FRAG_SIZE  (3)
+#define SUBSYSTEM_ETH_SET_RX_FRAG_SIZE  (3)
+#define OPCODE_ETH_GET_STATISTICS       (4)
+#define OPCODE_ETH_GET_RX_FRAG_SIZE     (5)
+#define OPCODE_ETH_SET_RX_FRAG_SIZE     (6)
+
+
+
+
+
+/*
+ * --- MCC_STATUS_CODE ---
+ * These are the global status codes used by all subsystems
+ */
+#define MCC_STATUS_SUCCESS              (0)	/* Indicates a successful
+						completion of  the command */
+#define MCC_STATUS_INSUFFICIENT_PRIVILEGES (1)	/* The client does not have
+						sufficient privileges to
+						execute the command */
+#define MCC_STATUS_INVALID_PARAMETER    (2)	/* A parameter in the command
+						was invalid. The extended
+						status contains the index
+						of the parameter */
+#define MCC_STATUS_INSUFFICIENT_RESOURCES (3)	/* There are insufficient
+						chip resources to execute
+						the command */
+#define MCC_STATUS_QUEUE_FLUSHING       (4)	/* The command is completing
+						because the queue was
+						getting flushed */
+#define MCC_STATUS_DMA_FAILED           (5)	/* The command is completing
+						with a DMA error */
+
+/*
+ * --- MGMT_ERROR_CODES ---
+ * Error Codes returned in the status field of the FWCMD response header
+ */
+#define MGMT_STATUS_SUCCESS             (0)	/* The FWCMD completed
+						without errors */
+#define MGMT_STATUS_FAILED              (1)	/* Error status in the Status
+						field of  the
+						struct FWCMD_RESPONSE_HEADER */
+#define MGMT_STATUS_ILLEGAL_REQUEST     (2)	/* Invalid FWCMD opcode */
+#define MGMT_STATUS_ILLEGAL_FIELD       (3)	/* Invalid parameter in
+						the FWCMD  payload */
+
+#endif /* __fwcmd_opcodes_amap_h__ */

+ 29 - 0
drivers/staging/benet/fwcmd_types_bmap.h

@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_types_bmap_h__
+#define __fwcmd_types_bmap_h__
+
+/* MAC address format  */
+struct MAC_ADDRESS_FORMAT {
+	u16 SizeOfStructure;
+	u8 MACAddress[6];
+} __packed;
+
+#endif /* __fwcmd_types_bmap_h__ */

+ 182 - 0
drivers/staging/benet/host_struct.h

@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __host_struct_amap_h__
+#define __host_struct_amap_h__
+#include "be_cm.h"
+#include "be_common.h"
+#include "descriptors.h"
+
+/* --- EQ_COMPLETION_MAJOR_CODE_ENUM --- */
+#define EQ_MAJOR_CODE_COMPLETION        (0)	/* Completion event on a */
+						  /* qcompletion ueue. */
+#define EQ_MAJOR_CODE_ETH               (1)	/* Affiliated Ethernet Event. */
+#define EQ_MAJOR_CODE_RESERVED          (2)	/* Reserved */
+#define EQ_MAJOR_CODE_RDMA              (3)	/* Affiliated RDMA Event. */
+#define EQ_MAJOR_CODE_ISCSI             (4)	/* Affiliated ISCSI Event */
+#define EQ_MAJOR_CODE_UNAFFILIATED      (5)	/* Unaffiliated Event */
+
+/* --- EQ_COMPLETION_MINOR_CODE_ENUM --- */
+#define EQ_MINOR_CODE_COMPLETION        (0)	/* Completion event on a */
+						  /* completion queue. */
+#define EQ_MINOR_CODE_OTHER             (1)	/* Other Event (TBD). */
+
+/* Queue Entry Definition for all 4 byte event queue types. */
+struct BE_EQ_ENTRY_AMAP {
+	u8 Valid;		/* DWORD 0 */
+	u8 MajorCode[3];	/* DWORD 0 */
+	u8 MinorCode[12];	/* DWORD 0 */
+	u8 ResourceID[16];	/* DWORD 0 */
+} __packed;
+struct EQ_ENTRY_AMAP {
+	u32 dw[1];
+};
+
+/*
+ * --- ETH_EVENT_CODE ---
+ * These codes are returned by the MPU when one of these events has occurred,
+ * and the event is configured to report to an Event Queue when an event
+ * is detected.
+ */
+#define ETH_EQ_LINK_STATUS              (0)	/* Link status change event */
+						  /* detected. */
+#define ETH_EQ_WATERMARK                (1)	/* watermark event detected. */
+#define ETH_EQ_MAGIC_PKT                (2)	/* magic pkt event detected. */
+#define ETH_EQ_ACPI_PKT0                (3)	/* ACPI interesting packet */
+						  /* detected. */
+#define ETH_EQ_ACPI_PKT1                (3)	/* ACPI interesting packet */
+						  /* detected. */
+#define ETH_EQ_ACPI_PKT2                (3)	/* ACPI interesting packet */
+						  /* detected. */
+#define ETH_EQ_ACPI_PKT3                (3)	/* ACPI interesting packet */
+						  /* detected. */
+
+/*
+ * --- ETH_TX_COMPL_STATUS_ENUM ---
+ * Status codes contained in Ethernet TX completion descriptors.
+ */
+#define ETH_COMP_VALID                  (0)
+#define ETH_COMP_ERROR                  (1)
+#define ETH_COMP_INVALID                (15)
+
+/*
+ * --- ETH_TX_COMPL_PORT_ENUM ---
+ * Port indicator contained in Ethernet TX completion descriptors.
+ */
+#define ETH_COMP_PORT0                  (0)
+#define ETH_COMP_PORT1                  (1)
+#define ETH_COMP_MGMT                   (2)
+
+/*
+ * --- ETH_TX_COMPL_CT_ENUM ---
+ * Completion type indicator contained in Ethernet TX completion descriptors.
+ */
+#define ETH_COMP_ETH                    (0)
+
+/*
+ * Work request block that the driver issues to the chip for
+ * Ethernet transmissions. All control fields must be valid in each WRB for
+ * a message. The controller, as specified by the flags, optionally writes
+ * an entry to the Completion Ring and generate an event.
+ */
+struct BE_ETH_WRB_AMAP {
+	u8 frag_pa_hi[32];	/* DWORD 0 */
+	u8 frag_pa_lo[32];	/* DWORD 1 */
+	u8 complete;	/* DWORD 2 */
+	u8 event;		/* DWORD 2 */
+	u8 crc;		/* DWORD 2 */
+	u8 forward;		/* DWORD 2 */
+	u8 ipsec;		/* DWORD 2 */
+	u8 mgmt;		/* DWORD 2 */
+	u8 ipcs;		/* DWORD 2 */
+	u8 udpcs;		/* DWORD 2 */
+	u8 tcpcs;		/* DWORD 2 */
+	u8 lso;		/* DWORD 2 */
+	u8 last;		/* DWORD 2 */
+	u8 vlan;		/* DWORD 2 */
+	u8 dbg[3];		/* DWORD 2 */
+	u8 hash_val[3];	/* DWORD 2 */
+	u8 lso_mss[14];	/* DWORD 2 */
+	u8 frag_len[16];	/* DWORD 3 */
+	u8 vlan_tag[16];	/* DWORD 3 */
+} __packed;
+struct ETH_WRB_AMAP {
+	u32 dw[4];
+};
+
+/* This is an Ethernet transmit completion descriptor */
+struct BE_ETH_TX_COMPL_AMAP {
+	u8 user_bytes[16];	/* DWORD 0 */
+	u8 nwh_bytes[8];	/* DWORD 0 */
+	u8 lso;		/* DWORD 0 */
+	u8 rsvd0[7];	/* DWORD 0 */
+	u8 wrb_index[16];	/* DWORD 1 */
+	u8 ct[2];		/* DWORD 1 */
+	u8 port[2];		/* DWORD 1 */
+	u8 rsvd1[8];	/* DWORD 1 */
+	u8 status[4];	/* DWORD 1 */
+	u8 rsvd2[16];	/* DWORD 2 */
+	u8 ringid[11];	/* DWORD 2 */
+	u8 hash_val[4];	/* DWORD 2 */
+	u8 valid;		/* DWORD 2 */
+	u8 rsvd3[32];	/* DWORD 3 */
+} __packed;
+struct ETH_TX_COMPL_AMAP {
+	u32 dw[4];
+};
+
+/* Ethernet Receive Buffer descriptor */
+struct BE_ETH_RX_D_AMAP {
+	u8 fragpa_hi[32];	/* DWORD 0 */
+	u8 fragpa_lo[32];	/* DWORD 1 */
+} __packed;
+struct ETH_RX_D_AMAP {
+	u32 dw[2];
+};
+
+/* This is an Ethernet Receive Completion Descriptor */
+struct BE_ETH_RX_COMPL_AMAP {
+	u8 vlan_tag[16];	/* DWORD 0 */
+	u8 pktsize[14];	/* DWORD 0 */
+	u8 port;		/* DWORD 0 */
+	u8 rsvd0;		/* DWORD 0 */
+	u8 err;		/* DWORD 1 */
+	u8 rsshp;		/* DWORD 1 */
+	u8 ipf;		/* DWORD 1 */
+	u8 tcpf;		/* DWORD 1 */
+	u8 udpf;		/* DWORD 1 */
+	u8 ipcksm;		/* DWORD 1 */
+	u8 tcpcksm;		/* DWORD 1 */
+	u8 udpcksm;		/* DWORD 1 */
+	u8 macdst[6];	/* DWORD 1 */
+	u8 vtp;		/* DWORD 1 */
+	u8 vtm;		/* DWORD 1 */
+	u8 fragndx[10];	/* DWORD 1 */
+	u8 ct[2];		/* DWORD 1 */
+	u8 ipsec;		/* DWORD 1 */
+	u8 numfrags[3];	/* DWORD 1 */
+	u8 rsvd1[31];	/* DWORD 2 */
+	u8 valid;		/* DWORD 2 */
+	u8 rsshash[32];	/* DWORD 3 */
+} __packed;
+struct ETH_RX_COMPL_AMAP {
+	u32 dw[4];
+};
+
+#endif /* __host_struct_amap_h__ */

+ 830 - 0
drivers/staging/benet/hwlib.h

@@ -0,0 +1,830 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#ifndef __hwlib_h__
+#define __hwlib_h__
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "regmap.h"		/* srcgen array map output */
+
+#include "asyncmesg.h"
+#include "fwcmd_opcodes.h"
+#include "post_codes.h"
+#include "fwcmd_mcc.h"
+
+#include "fwcmd_types_bmap.h"
+#include "fwcmd_common_bmap.h"
+#include "fwcmd_eth_bmap.h"
+#include "bestatus.h"
+/*
+ *
+ * Macros for reading/writing a protection domain or CSR registers
+ * in BladeEngine.
+ */
+#define PD_READ(fo, field)	ioread32((fo)->db_va + \
+		offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
+
+#define PD_WRITE(fo, field, val) iowrite32(val, (fo)->db_va + \
+		offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
+
+#define CSR_READ(fo, field)	ioread32((fo)->csr_va + \
+		offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
+
+#define CSR_WRITE(fo, field, val)	iowrite32(val, (fo)->csr_va + \
+		offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
+
+#define PCICFG0_READ(fo, field)	ioread32((fo)->pci_va + \
+		offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
+
+#define PCICFG0_WRITE(fo, field, val)	iowrite32(val, (fo)->pci_va + \
+		offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
+
+#define PCICFG1_READ(fo, field)	ioread32((fo)->pci_va + \
+		offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
+
+#define PCICFG1_WRITE(fo, field, val)	iowrite32(val, (fo)->pci_va + \
+		offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
+
+#ifdef BE_DEBUG
+#define ASSERT(c)       BUG_ON(!(c));
+#else
+#define ASSERT(c)
+#endif
+
+/* debug levels */
+enum BE_DEBUG_LEVELS {
+	DL_ALWAYS = 0,		/* cannot be masked */
+	DL_ERR = 0x1,		/* errors that should never happen */
+	DL_WARN = 0x2,		/* something questionable.
+				   recoverable errors */
+	DL_NOTE = 0x4,		/* infrequent, important debug info */
+	DL_INFO = 0x8,		/* debug information */
+	DL_VERBOSE = 0x10,	/* detailed info, such as buffer traces */
+	BE_DL_MIN_VALUE = 0x1,	/* this is the min value used */
+	BE_DL_MAX_VALUE = 0x80	/* this is the higheset value used */
+} ;
+
+extern unsigned int trace_level;
+
+#define TRACE(lm, fmt, args...)  {				\
+		if (trace_level & lm) {				\
+			printk(KERN_NOTICE "BE: %s:%d \n" fmt,	\
+			__FILE__ , __LINE__ , ## args);		\
+		}						\
+	}
+
+static inline unsigned int be_trace_set_level(unsigned int level)
+{
+	unsigned int old_level = trace_level;
+	trace_level = level;
+	return old_level;
+}
+
+#define be_trace_get_level() 	trace_level
+/*
+ * Returns number of pages spanned by the size of data
+ * starting at the given address.
+ */
+#define PAGES_SPANNED(_address, _size) \
+   ((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \
+		(_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
+/* Byte offset into the page corresponding to given address */
+#define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1))
+
+/*
+ * circular subtract.
+ * Returns a - b assuming a circular number system, where a and b are
+ * in range (0, maxValue-1). If a==b, zero is returned so the
+ * highest value possible with this subtraction is maxValue-1.
+ */
+static inline u32 be_subc(u32 a, u32 b, u32 max)
+{
+	ASSERT(a <= max && b <= max);
+	ASSERT(max > 0);
+	return a >= b ? (a - b) : (max - b + a);
+}
+
+static inline u32 be_addc(u32 a, u32 b, u32 max)
+{
+	ASSERT(a < max);
+	ASSERT(max > 0);
+	return (max - a > b) ? (a + b) : (b + a - max);
+}
+
+/* descriptor for a physically contiguous memory used for ring */
+struct ring_desc {
+	u32 length;	/* length in bytes */
+	void *va; 	/* virtual address */
+	u64 pa;		/* bus address */
+} ;
+
+/*
+ * This structure stores information about a ring shared between hardware
+ * and software.  Each ring is allocated by the driver in the uncached
+ * extension and mapped into BladeEngine's unified table.
+ */
+struct mp_ring {
+	u32 pages;		/* queue size in pages */
+	u32 id;			/* queue id assigned by beklib */
+	u32 num;		/* number of elements in queue */
+	u32 cidx;		/* consumer index */
+	u32 pidx;		/* producer index -- not used by most rings */
+	u32 itemSize;		/* size in bytes of one object */
+
+	void *va;		/* The virtual address of the ring.
+				   This should be last to allow 32 & 64
+				   bit debugger extensions to work. */
+} ;
+
+/*-----------  amap bit filed get / set macros and functions -----*/
+/*
+ * Structures defined in the map header files (under fw/amap/) with names
+ * in the format BE_<name>_AMAP are pseudo structures with members
+ * of type u8. These structures are templates that are used in
+ * conjuntion with the structures with names in the format
+ * <name>_AMAP to calculate the bit masks and bit offsets to get or set
+ * bit fields in structures. The structures <name>_AMAP are arrays
+ * of 32 bits words and have the correct size.  The following macros
+ * provide convenient ways to get and set the various members
+ * in the structures without using strucctures with bit fields.
+ * Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR
+ * macros to extract and set various members.
+ */
+
+/*
+ * Returns the a bit mask for the register that is NOT shifted into location.
+ * That means return values always look like: 0x1, 0xFF, 0x7FF, etc...
+ */
+static inline u32 amap_mask(u32 bit_size)
+{
+	return bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1;
+}
+
+#define AMAP_BIT_MASK(_struct_, field)       \
+	amap_mask(AMAP_BIT_SIZE(_struct_, field))
+
+/*
+ * non-optimized set bits function. First clears the bits and then assigns them.
+ * This does not require knowledge of the particular DWORD you are setting.
+ * e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123);
+ */
+static inline void
+amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
+{
+	u32 *dw = (u32 *)ptr;
+	*(dw + dw_offset) &= ~(mask << offset);
+	*(dw + dw_offset) |= (mask & value) << offset;
+}
+
+#define AMAP_SET_BITS_PTR(_struct_, field, _structPtr_, val)	\
+	amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, field),\
+		AMAP_BIT_MASK(_struct_, field),			\
+		AMAP_BIT_OFFSET(_struct_, field), val)
+/*
+ * Non-optimized routine that gets the bits without knowing the correct DWORD.
+ * e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory);
+ */
+static inline u32
+amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
+{
+	u32 *dw = (u32 *)ptr;
+	return mask & (*(dw + dw_offset) >> offset);
+}
+#define AMAP_GET_BITS_PTR(_struct_, field, _structPtr_)			\
+	amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, field),	\
+		AMAP_BIT_MASK(_struct_, field),				\
+		AMAP_BIT_OFFSET(_struct_, field))
+
+/* Returns 0-31 representing bit offset within a DWORD of a bitfield. */
+#define AMAP_BIT_OFFSET(_struct_, field)                  \
+	(offsetof(struct BE_ ## _struct_ ## _AMAP, field) % 32)
+
+/* Returns 0-n representing DWORD offset of bitfield within the structure. */
+#define AMAP_WORD_OFFSET(_struct_, field)  \
+		  (offsetof(struct BE_ ## _struct_ ## _AMAP, field)/32)
+
+/* Returns size of bitfield in bits. */
+#define AMAP_BIT_SIZE(_struct_, field) \
+		sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->field)
+
+struct be_mcc_wrb_response_copy {
+	u16 length;		/* bytes in response */
+	u16 fwcmd_offset;	/* offset within the wrb of the response */
+	void *va;		/* user's va to copy response into */
+
+} ;
+typedef void (*mcc_wrb_cqe_callback) (void *context, int status,
+				struct MCC_WRB_AMAP *optional_wrb);
+struct be_mcc_wrb_context {
+
+	mcc_wrb_cqe_callback internal_cb;	/* Function to call on
+						completion */
+	void *internal_cb_context;	/* Parameter to pass
+						   to completion function */
+
+	mcc_wrb_cqe_callback cb;	/* Function to call on completion */
+	void *cb_context;	/* Parameter to pass to completion function */
+
+	int *users_final_status;	/* pointer to a local
+						variable for synchronous
+						commands */
+	struct MCC_WRB_AMAP *wrb;	/* pointer to original wrb for embedded
+						commands only */
+	struct list_head next;	/* links context structs together in
+				   free list */
+
+	struct be_mcc_wrb_response_copy copy;	/* Optional parameters to copy
+					   embedded response to user's va */
+
+#if defined(BE_DEBUG)
+	u16 subsystem, opcode;	/* Track this FWCMD for debug builds. */
+	struct MCC_WRB_AMAP *ring_wrb;
+	u32 consumed_count;
+#endif
+} ;
+
+/*
+    Represents a function object for network or storage.  This
+    is used to manage per-function resources like MCC CQs, etc.
+*/
+struct be_function_object {
+
+	u32 magic;		/*!< magic for detecting memory corruption. */
+
+	/* PCI BAR mapped addresses */
+	u8 __iomem *csr_va;	/* CSR */
+	u8 __iomem *db_va;	/* Door Bell */
+	u8 __iomem *pci_va;	/* PCI config space */
+	u32 emulate;		/* if set, MPU is not available.
+				  Emulate everything.     */
+	u32 pend_queue_driving;	/* if set, drive the queued WRBs
+				   after releasing the WRB lock */
+
+	spinlock_t post_lock;	/* lock for verifying one thread posting wrbs */
+	spinlock_t cq_lock;	/* lock for verifying one thread
+				   processing cq */
+	spinlock_t mcc_context_lock;	/* lock for protecting mcc
+					   context free list */
+	unsigned long post_irq;
+	unsigned long cq_irq;
+
+	u32 type;
+	u32 pci_function_number;
+
+	struct be_mcc_object *mcc;	/* mcc rings. */
+
+	struct {
+		struct MCC_MAILBOX_AMAP *va;	/* VA to the mailbox */
+		u64 pa;	/* PA to the mailbox */
+		u32 length;	/* byte length of mailbox */
+
+		/* One default context struct used for posting at
+		 * least one MCC_WRB
+		 */
+		struct be_mcc_wrb_context default_context;
+		bool default_context_allocated;
+	} mailbox;
+
+	struct {
+
+		/* Wake on lans configured. */
+		u32 wol_bitmask;	/* bits 0,1,2,3 are set if
+					   corresponding index is enabled */
+	} config;
+
+
+	struct BE_FIRMWARE_CONFIG fw_config;
+} ;
+
+/*
+      Represents an Event Queue
+*/
+struct be_eq_object {
+	u32 magic;
+	atomic_t ref_count;
+
+	struct be_function_object *parent_function;
+
+	struct list_head eq_list;
+	struct list_head cq_list_head;
+
+	u32 eq_id;
+	void *cb_context;
+
+} ;
+
+/*
+    Manages a completion queue
+*/
+struct be_cq_object {
+	u32 magic;
+	atomic_t ref_count;
+
+	struct be_function_object *parent_function;
+	struct be_eq_object *eq_object;
+
+	struct list_head cq_list;
+	struct list_head cqlist_for_eq;
+
+	void *va;
+	u32 num_entries;
+
+	void *cb_context;
+
+	u32 cq_id;
+
+} ;
+
+/*
+    Manages an ethernet send queue
+*/
+struct be_ethsq_object {
+	u32 magic;
+
+	struct list_head list;
+
+	struct be_function_object *parent_function;
+	struct be_cq_object *cq_object;
+	u32 bid;
+
+} ;
+
+/*
+@brief
+    Manages an ethernet receive queue
+*/
+struct be_ethrq_object {
+	u32 magic;
+	struct list_head list;
+	struct be_function_object *parent_function;
+	u32 rid;
+	struct be_cq_object *cq_object;
+	struct be_cq_object *rss_cq_object[4];
+
+} ;
+
+/*
+    Manages an MCC
+*/
+typedef void (*mcc_async_event_callback) (void *context, u32 event_code,
+				void *event);
+struct be_mcc_object {
+	u32 magic;
+
+	struct be_function_object *parent_function;
+	struct list_head mcc_list;
+
+	struct be_cq_object *cq_object;
+
+	/* Async event callback for MCC CQ. */
+	mcc_async_event_callback async_cb;
+	void *async_context;
+
+	struct {
+		struct be_mcc_wrb_context *base;
+		u32 num;
+		struct list_head list_head;
+	} wrb_context;
+
+	struct {
+		struct ring_desc *rd;
+		struct mp_ring ring;
+	} sq;
+
+	struct {
+		struct mp_ring ring;
+	} cq;
+
+	u32 processing;		/* flag indicating that one thread
+				   is processing CQ */
+	u32 rearm;		/* doorbell rearm setting to make
+				   sure the active processing thread */
+	/* rearms the CQ if any of the threads requested it. */
+
+	struct list_head backlog;
+	u32 backlog_length;
+	u32 driving_backlog;
+	u32 consumed_index;
+
+} ;
+
+
+/* Queue context header -- the required software information for
+ * queueing a WRB.
+ */
+struct be_queue_driver_context {
+	mcc_wrb_cqe_callback internal_cb;	/* Function to call on
+						   completion */
+	void *internal_cb_context;	/* Parameter to pass
+						   to completion function */
+
+	mcc_wrb_cqe_callback cb;	/* Function to call on completion */
+	void *cb_context;	/* Parameter to pass to completion function */
+
+	struct be_mcc_wrb_response_copy copy;	/* Optional parameters to copy
+					   embedded response to user's va */
+	void *optional_fwcmd_va;
+	struct list_head list;
+	u32 bytes;
+} ;
+
+/*
+ * Common MCC WRB header that all commands require.
+ */
+struct be_mcc_wrb_header {
+	u8 rsvd[offsetof(struct BE_MCC_WRB_AMAP, payload)/8];
+} ;
+
+/*
+ * All non embedded commands supported by hwlib functions only allow
+ * 1 SGE.  This queue context handles them all.
+ */
+struct be_nonembedded_q_ctxt {
+	struct be_queue_driver_context context;
+	struct be_mcc_wrb_header wrb_header;
+	struct MCC_SGE_AMAP sge[1];
+} ;
+
+/*
+ * ------------------------------------------------------------------------
+ *  This section contains the specific queue struct for each command.
+ *  The user could always provide a be_generic_q_ctxt but this is a
+ *  rather large struct.  By using the specific struct, memory consumption
+ *  can be reduced.
+ * ------------------------------------------------------------------------
+ */
+
+struct be_link_status_q_ctxt {
+	struct be_queue_driver_context context;
+	struct be_mcc_wrb_header wrb_header;
+	struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd;
+} ;
+
+struct be_multicast_q_ctxt {
+	struct be_queue_driver_context context;
+	struct be_mcc_wrb_header wrb_header;
+	struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd;
+} ;
+
+
+struct be_vlan_q_ctxt {
+	struct be_queue_driver_context context;
+	struct be_mcc_wrb_header wrb_header;
+	struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd;
+} ;
+
+struct be_promiscuous_q_ctxt {
+	struct be_queue_driver_context context;
+	struct be_mcc_wrb_header wrb_header;
+	struct FWCMD_ETH_PROMISCUOUS fwcmd;
+} ;
+
+struct be_force_failover_q_ctxt {
+	struct be_queue_driver_context context;
+	struct be_mcc_wrb_header wrb_header;
+	struct FWCMD_COMMON_FORCE_FAILOVER fwcmd;
+} ;
+
+
+struct be_rxf_filter_q_ctxt {
+	struct be_queue_driver_context context;
+	struct be_mcc_wrb_header wrb_header;
+	struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd;
+} ;
+
+struct be_eq_modify_delay_q_ctxt {
+	struct be_queue_driver_context context;
+	struct be_mcc_wrb_header wrb_header;
+	struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd;
+} ;
+
+/*
+ * The generic context is the largest size that would be required.
+ * It is the software context plus an entire WRB.
+ */
+struct be_generic_q_ctxt {
+	struct be_queue_driver_context context;
+	struct be_mcc_wrb_header wrb_header;
+	struct MCC_WRB_PAYLOAD_AMAP payload;
+} ;
+
+/*
+ * Types for the BE_QUEUE_CONTEXT object.
+ */
+#define BE_QUEUE_INVALID	(0)
+#define BE_QUEUE_LINK_STATUS	(0xA006)
+#define BE_QUEUE_ETH_STATS	(0xA007)
+#define BE_QUEUE_TPM_STATS	(0xA008)
+#define BE_QUEUE_TCP_STATS	(0xA009)
+#define BE_QUEUE_MULTICAST	(0xA00A)
+#define BE_QUEUE_VLAN		(0xA00B)
+#define BE_QUEUE_RSS		(0xA00C)
+#define BE_QUEUE_FORCE_FAILOVER	(0xA00D)
+#define BE_QUEUE_PROMISCUOUS	(0xA00E)
+#define BE_QUEUE_WAKE_ON_LAN	(0xA00F)
+#define BE_QUEUE_NOP		(0xA010)
+
+/* --- BE_FUNCTION_ENUM --- */
+#define BE_FUNCTION_TYPE_ISCSI          (0)
+#define BE_FUNCTION_TYPE_NETWORK        (1)
+#define BE_FUNCTION_TYPE_ARM            (2)
+
+/* --- BE_ETH_TX_RING_TYPE_ENUM --- */
+#define BE_ETH_TX_RING_TYPE_FORWARDING  (1) 	/* Ether ring for forwarding */
+#define BE_ETH_TX_RING_TYPE_STANDARD    (2)	/* Ether ring for sending */
+						/* network packets. */
+#define BE_ETH_TX_RING_TYPE_BOUND       (3)	/* Ethernet ring for sending */
+						/* network packets, bound */
+						/* to a physical port. */
+/*
+ * ----------------------------------------------------------------------
+ *   API MACROS
+ * ----------------------------------------------------------------------
+ */
+#define BE_FWCMD_NAME(_short_name_)     struct FWCMD_##_short_name_
+#define BE_OPCODE_NAME(_short_name_)    OPCODE_##_short_name_
+#define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_
+
+
+#define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_)	\
+	((BE_FWCMD_NAME(_short_name_) *)				\
+	be_function_prepare_embedded_fwcmd(_pfob_, _wrb_,	\
+		sizeof(BE_FWCMD_NAME(_short_name_)),		\
+		FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
+		FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
+		BE_OPCODE_NAME(_short_name_),				\
+		BE_SUBSYSTEM_NAME(_short_name_)));
+
+#define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\
+	((BE_FWCMD_NAME(_short_name_) *)				\
+	be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \
+		sizeof(BE_FWCMD_NAME(_short_name_)),		\
+		FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
+		FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
+		BE_OPCODE_NAME(_short_name_),				\
+		BE_SUBSYSTEM_NAME(_short_name_)));
+
+int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
+	u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd,
+	  struct be_function_object *pfob);
+
+int be_function_object_destroy(struct be_function_object *pfob);
+int be_function_cleanup(struct be_function_object *pfob);
+
+
+int be_function_get_fw_version(struct be_function_object *pfob,
+	struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version,
+	mcc_wrb_cqe_callback cb, void *cb_context);
+
+
+int be_eq_modify_delay(struct be_function_object *pfob,
+		   u32 num_eq, struct be_eq_object **eq_array,
+		   u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
+		   void *cb_context,
+		   struct be_eq_modify_delay_q_ctxt *q_ctxt);
+
+
+
+int be_eq_create(struct be_function_object *pfob,
+	     struct ring_desc *rd, u32 eqe_size, u32 num_entries,
+	     u32 watermark, u32 timer_delay, struct be_eq_object *eq_object);
+
+int be_eq_destroy(struct be_eq_object *eq);
+
+int be_cq_create(struct be_function_object *pfob,
+	struct ring_desc *rd, u32 length,
+	bool solicited_eventable, bool no_delay,
+	u32 wm_thresh, struct be_eq_object *eq_object,
+	struct be_cq_object *cq_object);
+
+int be_cq_destroy(struct be_cq_object *cq);
+
+int be_mcc_ring_create(struct be_function_object *pfob,
+		   struct ring_desc *rd, u32 length,
+		   struct be_mcc_wrb_context *context_array,
+		   u32 num_context_entries,
+		   struct be_cq_object *cq, struct be_mcc_object *mcc);
+int be_mcc_ring_destroy(struct be_mcc_object *mcc_object);
+
+int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm);
+
+int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
+		mcc_async_event_callback cb, void *cb_context);
+
+int be_pci_soft_reset(struct be_function_object *pfob);
+
+
+int be_drive_POST(struct be_function_object *pfob);
+
+
+int be_eth_sq_create(struct be_function_object *pfob,
+		struct ring_desc *rd, u32 length_in_bytes,
+		u32 type, u32 ulp, struct be_cq_object *cq_object,
+		struct be_ethsq_object *eth_sq);
+
+struct be_eth_sq_parameters {
+	u32 port;
+	u32 rsvd0[2];
+} ;
+
+int be_eth_sq_create_ex(struct be_function_object *pfob,
+		    struct ring_desc *rd, u32 length_in_bytes,
+		    u32 type, u32 ulp, struct be_cq_object *cq_object,
+		    struct be_eth_sq_parameters *ex_parameters,
+		    struct be_ethsq_object *eth_sq);
+int be_eth_sq_destroy(struct be_ethsq_object *eth_sq);
+
+int be_eth_set_flow_control(struct be_function_object *pfob,
+			bool txfc_enable, bool rxfc_enable);
+
+int be_eth_get_flow_control(struct be_function_object *pfob,
+			bool *txfc_enable, bool *rxfc_enable);
+int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps);
+
+int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps);
+
+int be_eth_set_frame_size(struct be_function_object *pfob,
+		      u32 *tx_frame_size, u32 *rx_frame_size);
+
+int be_eth_rq_create(struct be_function_object *pfob,
+		 struct ring_desc *rd, struct be_cq_object *cq_object,
+		 struct be_cq_object *bcmc_cq_object,
+		 struct be_ethrq_object *eth_rq);
+
+int be_eth_rq_destroy(struct be_ethrq_object *eth_rq);
+
+int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
+		mcc_wrb_cqe_callback cb, void *cb_context);
+int be_eth_rq_set_frag_size(struct be_function_object *pfob,
+		u32 new_frag_size_bytes, u32 *actual_frag_size_bytes);
+int be_eth_rq_get_frag_size(struct be_function_object *pfob,
+						u32 *frag_size_bytes);
+
+void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
+		   struct MCC_WRB_AMAP *wrb,
+		   u32 payload_length, u32 request_length,
+		   u32 response_length, u32 opcode, u32 subsystem);
+void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
+	struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa,
+	u32 payload_length, u32 request_length, u32 response_length,
+	u32 opcode, u32 subsystem);
+
+
+struct MCC_WRB_AMAP *
+be_function_peek_mcc_wrb(struct be_function_object *pfob);
+
+int be_rxf_mac_address_read_write(struct be_function_object *pfob,
+	      bool port1, bool mac1, bool mgmt,
+	      bool write, bool permanent, u8 *mac_address,
+	      mcc_wrb_cqe_callback cb,
+	      void *cb_context);
+
+int be_rxf_multicast_config(struct be_function_object *pfob,
+			bool promiscuous, u32 num, u8 *mac_table,
+			mcc_wrb_cqe_callback cb,
+			void *cb_context,
+			struct be_multicast_q_ctxt *q_ctxt);
+
+int be_rxf_vlan_config(struct be_function_object *pfob,
+	   bool promiscuous, u32 num, u16 *vlan_tag_array,
+	   mcc_wrb_cqe_callback cb, void *cb_context,
+	   struct be_vlan_q_ctxt *q_ctxt);
+
+
+int be_rxf_link_status(struct be_function_object *pfob,
+		   struct BE_LINK_STATUS *link_status,
+		   mcc_wrb_cqe_callback cb,
+		   void *cb_context,
+		   struct be_link_status_q_ctxt *q_ctxt);
+
+
+int be_rxf_query_eth_statistics(struct be_function_object *pfob,
+		struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
+		u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
+		void *cb_context,
+		struct be_nonembedded_q_ctxt *q_ctxt);
+
+int be_rxf_promiscuous(struct be_function_object *pfob,
+		   bool enable_port0, bool enable_port1,
+		   mcc_wrb_cqe_callback cb, void *cb_context,
+		   struct be_promiscuous_q_ctxt *q_ctxt);
+
+
+int be_rxf_filter_config(struct be_function_object *pfob,
+		     struct NTWK_RX_FILTER_SETTINGS *settings,
+		     mcc_wrb_cqe_callback cb,
+		     void *cb_context,
+		     struct be_rxf_filter_q_ctxt *q_ctxt);
+
+/*
+ * ------------------------------------------------------
+ *  internal functions used by hwlib
+ * ------------------------------------------------------
+ */
+
+
+int be_function_ring_destroy(struct be_function_object *pfob,
+		       u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
+		       void *cb_context,
+		       mcc_wrb_cqe_callback internal_cb,
+		       void *internal_callback_context);
+
+int be_function_post_mcc_wrb(struct be_function_object *pfob,
+		struct MCC_WRB_AMAP *wrb,
+		struct be_generic_q_ctxt *q_ctxt,
+		mcc_wrb_cqe_callback cb, void *cb_context,
+		mcc_wrb_cqe_callback internal_cb,
+		void *internal_cb_context, void *optional_fwcmd_va,
+		struct be_mcc_wrb_response_copy *response_copy);
+
+int be_function_queue_mcc_wrb(struct be_function_object *pfob,
+			  struct be_generic_q_ctxt *q_ctxt);
+
+/*
+ * ------------------------------------------------------
+ *  MCC QUEUE
+ * ------------------------------------------------------
+ */
+
+int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd);
+
+
+struct MCC_WRB_AMAP *
+_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue);
+
+struct be_mcc_wrb_context *
+_be_mcc_allocate_wrb_context(struct be_function_object *pfob);
+
+void _be_mcc_free_wrb_context(struct be_function_object *pfob,
+			 struct be_mcc_wrb_context *context);
+
+int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
+	 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
+
+int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc,
+	struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
+
+void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc);
+
+
+/*
+ * ------------------------------------------------------
+ *  Ring Sizes
+ * ------------------------------------------------------
+ */
+static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size)
+{
+
+	ASSERT(encoding != 1);	/* 1 is rsvd */
+	ASSERT(encoding < 16);
+	ASSERT(object_size > 0);
+
+	if (encoding == 0)	/* 32k deep */
+		encoding = 16;
+
+	return (1 << (encoding - 1)) * object_size;
+}
+
+static inline
+u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size)
+{
+
+	u32 count, encoding;
+
+	ASSERT(object_size > 0);
+	ASSERT(length_in_bytes % object_size == 0);
+
+	count = length_in_bytes / object_size;
+
+	ASSERT(count > 1);
+	ASSERT(count <= 32 * 1024);
+	ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */
+
+	encoding = __ilog2_u32(count) + 1;
+
+	if (encoding == 16)
+		encoding = 0;	/* 32k deep */
+
+	return encoding;
+}
+
+void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list,
+						u32 max_num);
+#endif /* __hwlib_h__ */

+ 1364 - 0
drivers/staging/benet/mpu.c

@@ -0,0 +1,1364 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include <linux/delay.h>
+#include "hwlib.h"
+#include "bestatus.h"
+
+static
+inline void mp_ring_create(struct mp_ring *ring, u32 num, u32 size, void *va)
+{
+	ASSERT(ring);
+	memset(ring, 0, sizeof(struct mp_ring));
+	ring->num = num;
+	ring->pages = DIV_ROUND_UP(num * size, PAGE_SIZE);
+	ring->itemSize = size;
+	ring->va = va;
+}
+
+/*
+ * -----------------------------------------------------------------------
+ * Interface for 2 index rings. i.e. consumer/producer rings
+ * --------------------------------------------------------------------------
+ */
+
+/* Returns number items pending on ring. */
+static inline u32 mp_ring_num_pending(struct mp_ring *ring)
+{
+	ASSERT(ring);
+	if (ring->num == 0)
+		return 0;
+	return be_subc(ring->pidx, ring->cidx, ring->num);
+}
+
+/* Returns number items free on ring. */
+static inline u32 mp_ring_num_empty(struct mp_ring *ring)
+{
+	ASSERT(ring);
+	return ring->num - 1 - mp_ring_num_pending(ring);
+}
+
+/* Consume 1 item */
+static inline void mp_ring_consume(struct mp_ring *ring)
+{
+	ASSERT(ring);
+	ASSERT(ring->pidx != ring->cidx);
+
+	ring->cidx = be_addc(ring->cidx, 1, ring->num);
+}
+
+/* Produce 1 item */
+static inline void mp_ring_produce(struct mp_ring *ring)
+{
+	ASSERT(ring);
+	ring->pidx = be_addc(ring->pidx, 1, ring->num);
+}
+
+/* Consume count items */
+static inline void mp_ring_consume_multiple(struct mp_ring *ring, u32 count)
+{
+	ASSERT(ring);
+	ASSERT(mp_ring_num_pending(ring) >= count);
+	ring->cidx = be_addc(ring->cidx, count, ring->num);
+}
+
+static inline void *mp_ring_item(struct mp_ring *ring, u32 index)
+{
+	ASSERT(ring);
+	ASSERT(index < ring->num);
+	ASSERT(ring->itemSize > 0);
+	return (u8 *) ring->va + index * ring->itemSize;
+}
+
+/* Ptr to produce item */
+static inline void *mp_ring_producer_ptr(struct mp_ring *ring)
+{
+	ASSERT(ring);
+	return mp_ring_item(ring, ring->pidx);
+}
+
+/*
+ * Returns a pointer to the current location in the ring.
+ * This is used for rings with 1 index.
+ */
+static inline void *mp_ring_current(struct mp_ring *ring)
+{
+	ASSERT(ring);
+	ASSERT(ring->pidx == 0);	/* not used */
+
+	return mp_ring_item(ring, ring->cidx);
+}
+
+/*
+ * Increment index for rings with only 1 index.
+ * This is used for rings with 1 index.
+ */
+static inline void *mp_ring_next(struct mp_ring *ring)
+{
+	ASSERT(ring);
+	ASSERT(ring->num > 0);
+	ASSERT(ring->pidx == 0);	/* not used */
+
+	ring->cidx = be_addc(ring->cidx, 1, ring->num);
+	return mp_ring_current(ring);
+}
+
+/*
+    This routine waits for a previously posted mailbox WRB to be completed.
+    Specifically it waits for the mailbox to say that it's ready to accept
+    more data by setting the LSB of the mailbox pd register to 1.
+
+    pcontroller      - The function object to post this data to
+
+    IRQL < DISPATCH_LEVEL
+*/
+static void be_mcc_mailbox_wait(struct be_function_object *pfob)
+{
+	struct MPU_MAILBOX_DB_AMAP mailbox_db;
+	u32 i = 0;
+	u32 ready;
+
+	if (pfob->emulate) {
+		/* No waiting for mailbox in emulated mode. */
+		return;
+	}
+
+	mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
+	ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
+
+	while (ready == false) {
+		if ((++i & 0x3FFFF) == 0) {
+			TRACE(DL_WARN, "Waiting for mailbox ready - %dk polls",
+								i / 1000);
+		}
+		udelay(1);
+		mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
+		ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
+	}
+}
+
+/*
+    This routine tells the MCC mailbox that there is data to processed
+    in the mailbox. It does this by setting the physical address for the
+    mailbox location and clearing the LSB.  This routine returns immediately
+    and does not wait for the WRB to be processed.
+
+    pcontroller      - The function object to post this data to
+
+    IRQL < DISPATCH_LEVEL
+
+*/
+static void be_mcc_mailbox_notify(struct be_function_object *pfob)
+{
+	struct MPU_MAILBOX_DB_AMAP mailbox_db;
+	u32 pa;
+
+	ASSERT(pfob->mailbox.pa);
+	ASSERT(pfob->mailbox.va);
+
+	/* If emulated, do not ring the mailbox */
+	if (pfob->emulate) {
+		TRACE(DL_WARN, "MPU disabled. Skipping mailbox notify.");
+		return;
+	}
+
+	/* form the higher bits in the address */
+	mailbox_db.dw[0] = 0;	/* init */
+	AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 1);
+	AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
+
+	/* bits 34 to 63 */
+	pa = (u32) (pfob->mailbox.pa >> 34);
+	AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
+
+	/* Wait for the MPU to be ready */
+	be_mcc_mailbox_wait(pfob);
+
+	/* Ring doorbell 1st time */
+	PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
+
+	/* Wait for 1st write to be acknowledged. */
+	be_mcc_mailbox_wait(pfob);
+
+	/* lower bits 30 bits from 4th bit (bits 4 to 33)*/
+	pa = (u32) (pfob->mailbox.pa >> 4) & 0x3FFFFFFF;
+
+	AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 0);
+	AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
+	AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
+
+	/* Ring doorbell 2nd time */
+	PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
+}
+
+/*
+    This routine tells the MCC mailbox that there is data to processed
+    in the mailbox. It does this by setting the physical address for the
+    mailbox location and clearing the LSB.  This routine spins until the
+    MPU writes a 1 into the LSB indicating that the data has been received
+    and is ready to be processed.
+
+    pcontroller      - The function object to post this data to
+
+    IRQL < DISPATCH_LEVEL
+*/
+static void
+be_mcc_mailbox_notify_and_wait(struct be_function_object *pfob)
+{
+	/*
+	 * Notify it
+	 */
+	be_mcc_mailbox_notify(pfob);
+	/*
+	 * Now wait for completion of WRB
+	 */
+	be_mcc_mailbox_wait(pfob);
+}
+
+void
+be_mcc_process_cqe(struct be_function_object *pfob,
+				struct MCC_CQ_ENTRY_AMAP *cqe)
+{
+	struct be_mcc_wrb_context *wrb_context = NULL;
+	u32 offset, status;
+	u8 *p;
+
+	ASSERT(cqe);
+	/*
+	 * A command completed.  Commands complete out-of-order.
+	 * Determine which command completed from the TAG.
+	 */
+	offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8;
+	p = (u8 *) cqe + offset;
+	wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
+	ASSERT(wrb_context);
+
+	/*
+	 * Perform a response copy if requested.
+	 * Only copy data if the FWCMD is successful.
+	 */
+	status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, cqe);
+	if (status == MGMT_STATUS_SUCCESS && wrb_context->copy.length > 0) {
+		ASSERT(wrb_context->wrb);
+		ASSERT(wrb_context->copy.va);
+		p = (u8 *)wrb_context->wrb +
+				offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
+		memcpy(wrb_context->copy.va,
+			  (u8 *)p + wrb_context->copy.fwcmd_offset,
+			  wrb_context->copy.length);
+	}
+
+	if (status)
+		status = BE_NOT_OK;
+	/* internal callback */
+	if (wrb_context->internal_cb) {
+		wrb_context->internal_cb(wrb_context->internal_cb_context,
+						status, wrb_context->wrb);
+	}
+
+	/* callback */
+	if (wrb_context->cb) {
+		wrb_context->cb(wrb_context->cb_context,
+					      status, wrb_context->wrb);
+	}
+	/* Free the context structure */
+	_be_mcc_free_wrb_context(pfob, wrb_context);
+}
+
+void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc)
+{
+	struct be_function_object *pfob = NULL;
+	int status = BE_PENDING;
+	struct be_generic_q_ctxt *q_ctxt;
+	struct MCC_WRB_AMAP *wrb;
+	struct MCC_WRB_AMAP *queue_wrb;
+	u32 length, payload_length, sge_count, embedded;
+	unsigned long irql;
+
+	BUILD_BUG_ON((sizeof(struct be_generic_q_ctxt) <
+			  sizeof(struct be_queue_driver_context) +
+					sizeof(struct MCC_WRB_AMAP)));
+	pfob = mcc->parent_function;
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	if (mcc->driving_backlog) {
+		spin_unlock_irqrestore(&pfob->post_lock, irql);
+		if (pfob->pend_queue_driving && pfob->mcc) {
+			pfob->pend_queue_driving = 0;
+			be_drive_mcc_wrb_queue(pfob->mcc);
+		}
+		return;
+	}
+	/* Acquire the flag to limit 1 thread to redrive posts. */
+	mcc->driving_backlog = 1;
+
+	while (!list_empty(&mcc->backlog)) {
+		wrb = _be_mpu_peek_ring_wrb(mcc, true);	/* Driving the queue */
+		if (!wrb)
+			break;	/* No space in the ring yet. */
+		/* Get the next queued entry to process. */
+		q_ctxt = list_first_entry(&mcc->backlog,
+				struct be_generic_q_ctxt, context.list);
+		list_del(&q_ctxt->context.list);
+		pfob->mcc->backlog_length--;
+		/*
+		 * Compute the required length of the WRB.
+		 * Since the queue element may be smaller than
+		 * the complete WRB, copy only the required number of bytes.
+		 */
+		queue_wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+		embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, queue_wrb);
+		if (embedded) {
+			payload_length = AMAP_GET_BITS_PTR(MCC_WRB,
+						payload_length, queue_wrb);
+			length = sizeof(struct be_mcc_wrb_header) +
+								payload_length;
+		} else {
+			sge_count = AMAP_GET_BITS_PTR(MCC_WRB, sge_count,
+								queue_wrb);
+			ASSERT(sge_count == 1); /* only 1 frag. */
+			length = sizeof(struct be_mcc_wrb_header) +
+			    sge_count * sizeof(struct MCC_SGE_AMAP);
+		}
+
+		/*
+		 * Truncate the length based on the size of the
+		 * queue element.  Some elements that have output parameters
+		 * can be smaller than the payload_length field would
+		 * indicate.  We really only need to copy the request
+		 * parameters, not the response.
+		 */
+		length = min(length, (u32) (q_ctxt->context.bytes -
+			offsetof(struct be_generic_q_ctxt, wrb_header)));
+
+		/* Copy the queue element WRB into the ring. */
+		memcpy(wrb, &q_ctxt->wrb_header, length);
+
+		/* Post the wrb.  This should not fail assuming we have
+		 * enough context structs. */
+		status = be_function_post_mcc_wrb(pfob, wrb, NULL,
+			   q_ctxt->context.cb, q_ctxt->context.cb_context,
+			   q_ctxt->context.internal_cb,
+			   q_ctxt->context.internal_cb_context,
+			   q_ctxt->context.optional_fwcmd_va,
+			   &q_ctxt->context.copy);
+
+		if (status == BE_SUCCESS) {
+			/*
+			 * Synchronous completion. Since it was queued,
+			 * we will invoke the callback.
+			 * To the user, this is an asynchronous request.
+			 */
+			spin_unlock_irqrestore(&pfob->post_lock, irql);
+			if (pfob->pend_queue_driving && pfob->mcc) {
+				pfob->pend_queue_driving = 0;
+				be_drive_mcc_wrb_queue(pfob->mcc);
+			}
+
+			ASSERT(q_ctxt->context.cb);
+
+			q_ctxt->context.cb(
+				q_ctxt->context.cb_context,
+						BE_SUCCESS, NULL);
+
+			spin_lock_irqsave(&pfob->post_lock, irql);
+
+		} else if (status != BE_PENDING) {
+			/*
+			 * Another resource failed.  Should never happen
+			 * if we have sufficient MCC_WRB_CONTEXT structs.
+			 * Return to head of the queue.
+			 */
+			TRACE(DL_WARN, "Failed to post a queued WRB. 0x%x",
+			      status);
+			list_add(&q_ctxt->context.list, &mcc->backlog);
+			pfob->mcc->backlog_length++;
+			break;
+		}
+	}
+
+	/* Free the flag to limit 1 thread to redrive posts. */
+	mcc->driving_backlog = 0;
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+}
+
+/* This function asserts that the WRB was consumed in order. */
+#ifdef BE_DEBUG
+u32 be_mcc_wrb_consumed_in_order(struct be_mcc_object *mcc,
+					struct MCC_CQ_ENTRY_AMAP *cqe)
+{
+	struct be_mcc_wrb_context *wrb_context = NULL;
+	u32 wrb_index;
+	u32 wrb_consumed_in_order;
+	u32 offset;
+	u8 *p;
+
+	ASSERT(cqe);
+	/*
+	 * A command completed.  Commands complete out-of-order.
+	 * Determine which command completed from the TAG.
+	 */
+	offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8;
+	p = (u8 *) cqe + offset;
+	wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
+
+	ASSERT(wrb_context);
+
+	wrb_index = (u32) (((u64)(size_t)wrb_context->ring_wrb -
+		(u64)(size_t)mcc->sq.ring.va) / sizeof(struct MCC_WRB_AMAP));
+
+	ASSERT(wrb_index < mcc->sq.ring.num);
+
+	wrb_consumed_in_order = (u32) (wrb_index == mcc->consumed_index);
+	mcc->consumed_index = be_addc(mcc->consumed_index, 1, mcc->sq.ring.num);
+	return wrb_consumed_in_order;
+}
+#endif
+
+int be_mcc_process_cq(struct be_mcc_object *mcc, bool rearm)
+{
+	struct be_function_object *pfob = NULL;
+	struct MCC_CQ_ENTRY_AMAP *cqe;
+	struct CQ_DB_AMAP db;
+	struct mp_ring *cq_ring = &mcc->cq.ring;
+	struct mp_ring *mp_ring = &mcc->sq.ring;
+	u32 num_processed = 0;
+	u32 consumed = 0, valid, completed, cqe_consumed, async_event;
+
+	pfob = mcc->parent_function;
+
+	spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
+
+	/*
+	 * Verify that only one thread is processing the CQ at once.
+	 * We cannot hold the lock while processing the CQ due to
+	 * the callbacks into the OS.  Therefore, this flag is used
+	 * to control it.  If any of the threads want to
+	 * rearm the CQ, we need to honor that.
+	 */
+	if (mcc->processing != 0) {
+		mcc->rearm = mcc->rearm || rearm;
+		goto Error;
+	} else {
+		mcc->processing = 1;	/* lock processing for this thread. */
+		mcc->rearm = rearm;	/* set our rearm setting */
+	}
+
+	spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
+
+	cqe = mp_ring_current(cq_ring);
+	valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
+	while (valid) {
+
+		if (num_processed >= 8) {
+			/* coalesce doorbells, but free space in cq
+			 * ring while processing. */
+			db.dw[0] = 0;	/* clear */
+			AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
+			AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, false);
+			AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
+			AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db,
+							num_processed);
+			num_processed = 0;
+
+			PD_WRITE(pfob, cq_db, db.dw[0]);
+		}
+
+		async_event = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, async_event, cqe);
+		if (async_event) {
+			/* This is an asynchronous event. */
+			struct ASYNC_EVENT_TRAILER_AMAP *async_trailer =
+			    (struct ASYNC_EVENT_TRAILER_AMAP *)
+			    ((u8 *) cqe + sizeof(struct MCC_CQ_ENTRY_AMAP) -
+			     sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
+			u32 event_code;
+			async_event = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
+						async_event, async_trailer);
+			ASSERT(async_event == 1);
+
+
+			valid = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
+						valid, async_trailer);
+			ASSERT(valid == 1);
+
+			/* Call the async event handler if it is installed. */
+			if (mcc->async_cb) {
+				event_code =
+					AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
+						event_code, async_trailer);
+				mcc->async_cb(mcc->async_context,
+					    (u32) event_code, (void *) cqe);
+			}
+
+		} else {
+			/* This is a completion entry. */
+
+			/* No vm forwarding in this driver. */
+
+			cqe_consumed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
+						consumed, cqe);
+			if (cqe_consumed) {
+				/*
+				 * A command on the MCC ring was consumed.
+				 * Update the consumer index.
+				 * These occur in order.
+				 */
+				ASSERT(be_mcc_wrb_consumed_in_order(mcc, cqe));
+				consumed++;
+			}
+
+			completed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
+					completed, cqe);
+			if (completed) {
+				/* A command completed.  Use tag to
+				 * determine which command.  */
+				be_mcc_process_cqe(pfob, cqe);
+			}
+		}
+
+		/* Reset the CQE */
+		AMAP_SET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe, false);
+		num_processed++;
+
+		/* Update our tracking for the CQ ring. */
+		cqe = mp_ring_next(cq_ring);
+		valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
+	}
+
+	TRACE(DL_INFO, "num_processed:0x%x, and consumed:0x%x",
+	      num_processed, consumed);
+	/*
+	 * Grab the CQ lock to synchronize the "rearm" setting for
+	 * the doorbell, and for clearing the "processing" flag.
+	 */
+	spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
+
+	/*
+	 * Rearm the cq.  This is done based on the global mcc->rearm
+	 * flag which combines the rearm parameter from the current
+	 * call to process_cq and any other threads
+	 * that tried to process the CQ while this one was active.
+	 * This handles the situation where a sync. fwcmd was processing
+	 * the CQ while the interrupt/dpc tries to process it.
+	 * The sync process gets to continue -- but it is now
+	 * responsible for the rearming.
+	 */
+	if (num_processed > 0 || mcc->rearm == true) {
+		db.dw[0] = 0;	/* clear */
+		AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
+		AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, mcc->rearm);
+		AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
+		AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db, num_processed);
+
+		PD_WRITE(pfob, cq_db, db.dw[0]);
+	}
+	/*
+	 * Update the consumer index after ringing the CQ doorbell.
+	 * We don't want another thread to post more WRBs before we
+	 * have CQ space available.
+	 */
+	mp_ring_consume_multiple(mp_ring, consumed);
+
+	/* Clear the processing flag. */
+	mcc->processing = 0;
+
+Error:
+	spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
+	/*
+	 * Use the local variable to detect if the current thread
+	 * holds the WRB post lock.  If rearm is false, this is
+	 * either a synchronous command, or the upper layer driver is polling
+	 * from a thread.  We do not drive the queue from that
+	 * context since the driver may hold the
+	 * wrb post lock already.
+	 */
+	if (rearm)
+		be_drive_mcc_wrb_queue(mcc);
+	else
+		pfob->pend_queue_driving = 1;
+
+	return BE_SUCCESS;
+}
+
+/*
+ *============================================================================
+ *                  P U B L I C  R O U T I N E S
+ *============================================================================
+ */
+
+/*
+    This routine creates an MCC object.  This object contains an MCC send queue
+    and a CQ private to the MCC.
+
+    pcontroller      - Handle to a function object
+
+    EqObject            - EQ object that will be used to dispatch this MCC
+
+    ppMccObject         - Pointer to an internal Mcc Object returned.
+
+    Returns BE_SUCCESS if successfull,, otherwise a useful error code
+	is returned.
+
+    IRQL < DISPATCH_LEVEL
+
+*/
+int
+be_mcc_ring_create(struct be_function_object *pfob,
+		   struct ring_desc *rd, u32 length,
+		   struct be_mcc_wrb_context *context_array,
+		   u32 num_context_entries,
+		   struct be_cq_object *cq, struct be_mcc_object *mcc)
+{
+	int status = 0;
+
+	struct FWCMD_COMMON_MCC_CREATE *fwcmd = NULL;
+	struct MCC_WRB_AMAP *wrb = NULL;
+	u32 num_entries_encoded, n, i;
+	void *va = NULL;
+	unsigned long irql;
+
+	if (length < sizeof(struct MCC_WRB_AMAP) * 2) {
+		TRACE(DL_ERR, "Invalid MCC ring length:%d", length);
+		return BE_NOT_OK;
+	}
+	/*
+	 * Reduce the actual ring size to be less than the number
+	 * of context entries.  This ensures that we run out of
+	 * ring WRBs first so the queuing works correctly.  We never
+	 * queue based on context structs.
+	 */
+	if (num_context_entries + 1 <
+			length / sizeof(struct MCC_WRB_AMAP) - 1) {
+
+		u32 max_length =
+		    (num_context_entries + 2) * sizeof(struct MCC_WRB_AMAP);
+
+		if (is_power_of_2(max_length))
+			length = __roundup_pow_of_two(max_length+1) / 2;
+		else
+			length = __roundup_pow_of_two(max_length) / 2;
+
+		ASSERT(length <= max_length);
+
+		TRACE(DL_WARN,
+			"MCC ring length reduced based on context entries."
+			" length:%d wrbs:%d context_entries:%d", length,
+			(int) (length / sizeof(struct MCC_WRB_AMAP)),
+			num_context_entries);
+	}
+
+	spin_lock_irqsave(&pfob->post_lock, irql);
+
+	num_entries_encoded =
+	    be_ring_length_to_encoding(length, sizeof(struct MCC_WRB_AMAP));
+
+	/* Init MCC object. */
+	memset(mcc, 0, sizeof(*mcc));
+	mcc->parent_function = pfob;
+	mcc->cq_object = cq;
+
+	INIT_LIST_HEAD(&mcc->backlog);
+
+	wrb = be_function_peek_mcc_wrb(pfob);
+	if (!wrb) {
+		ASSERT(wrb);
+		TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
+		status = BE_STATUS_NO_MCC_WRB;
+		goto error;
+	}
+	/* Prepares an embedded fwcmd, including request/response sizes. */
+	fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MCC_CREATE);
+
+	fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+	/*
+	 * Program MCC ring context
+	 */
+	AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, pdid,
+			&fwcmd->params.request.context, 0);
+	AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, invalid,
+			&fwcmd->params.request.context, false);
+	AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, ring_size,
+			&fwcmd->params.request.context, num_entries_encoded);
+
+	n = cq->cq_id;
+	AMAP_SET_BITS_PTR(MCC_RING_CONTEXT,
+				cq_id, &fwcmd->params.request.context, n);
+	be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+				ARRAY_SIZE(fwcmd->params.request.pages));
+	/* Post the f/w command */
+	status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+						NULL, NULL, fwcmd, NULL);
+	if (status != BE_SUCCESS) {
+		TRACE(DL_ERR, "MCC to create CQ failed.");
+		goto error;
+	}
+	/*
+	 * Create a linked list of context structures
+	 */
+	mcc->wrb_context.base = context_array;
+	mcc->wrb_context.num = num_context_entries;
+	INIT_LIST_HEAD(&mcc->wrb_context.list_head);
+	memset(context_array, 0,
+		    sizeof(struct be_mcc_wrb_context) * num_context_entries);
+	for (i = 0; i < mcc->wrb_context.num; i++) {
+		list_add_tail(&context_array[i].next,
+					&mcc->wrb_context.list_head);
+	}
+
+	/*
+	 *
+	 * Create an mcc_ring for tracking WRB hw ring
+	 */
+	va = rd->va;
+	ASSERT(va);
+	mp_ring_create(&mcc->sq.ring, length / sizeof(struct MCC_WRB_AMAP),
+				sizeof(struct MCC_WRB_AMAP), va);
+	mcc->sq.ring.id = fwcmd->params.response.id;
+	/*
+	 * Init a mcc_ring for tracking the MCC CQ.
+	 */
+	ASSERT(cq->va);
+	mp_ring_create(&mcc->cq.ring, cq->num_entries,
+		       sizeof(struct MCC_CQ_ENTRY_AMAP), cq->va);
+	mcc->cq.ring.id = cq->cq_id;
+
+	/* Force zeroing of CQ. */
+	memset(cq->va, 0, cq->num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP));
+
+	/* Initialize debug index. */
+	mcc->consumed_index = 0;
+
+	atomic_inc(&cq->ref_count);
+	pfob->mcc = mcc;
+
+	TRACE(DL_INFO, "MCC ring created. id:%d bytes:%d cq_id:%d cq_entries:%d"
+	      " num_context:%d", mcc->sq.ring.id, length,
+	      cq->cq_id, cq->num_entries, num_context_entries);
+
+error:
+	spin_unlock_irqrestore(&pfob->post_lock, irql);
+	if (pfob->pend_queue_driving && pfob->mcc) {
+		pfob->pend_queue_driving = 0;
+		be_drive_mcc_wrb_queue(pfob->mcc);
+	}
+	return status;
+}
+
+/*
+    This routine destroys an MCC send queue
+
+    MccObject         - Internal Mcc Object to be destroyed.
+
+    Returns BE_SUCCESS if successfull, otherwise an error code is returned.
+
+    IRQL < DISPATCH_LEVEL
+
+    The caller of this routine must ensure that no other WRB may be posted
+    until this routine returns.
+
+*/
+int be_mcc_ring_destroy(struct be_mcc_object *mcc)
+{
+	int status = 0;
+	struct be_function_object *pfob = mcc->parent_function;
+
+
+	ASSERT(mcc->processing == 0);
+
+	/*
+	 * Remove the ring from the function object.
+	 * This transitions back to mailbox mode.
+	 */
+	pfob->mcc = NULL;
+
+	/* Send fwcmd to destroy the queue.  (Using the mailbox.) */
+	status = be_function_ring_destroy(mcc->parent_function, mcc->sq.ring.id,
+			     FWCMD_RING_TYPE_MCC, NULL, NULL, NULL, NULL);
+	ASSERT(status == 0);
+
+	/* Release the SQ reference to the CQ */
+	atomic_dec(&mcc->cq_object->ref_count);
+
+	return status;
+}
+
+static void
+mcc_wrb_sync_cb(void *context, int staus, struct MCC_WRB_AMAP *wrb)
+{
+	struct be_mcc_wrb_context *wrb_context =
+				(struct be_mcc_wrb_context *) context;
+	ASSERT(wrb_context);
+	*wrb_context->users_final_status = staus;
+}
+
+/*
+    This routine posts a command to the MCC send queue
+
+    mcc       - Internal Mcc Object to be destroyed.
+
+    wrb             - wrb to post.
+
+    Returns BE_SUCCESS if successfull, otherwise an error code is returned.
+
+    IRQL < DISPATCH_LEVEL if CompletionCallback is not NULL
+    IRQL <=DISPATCH_LEVEL if CompletionCallback is  NULL
+
+    If this routine is called with CompletionCallback != NULL the
+    call is considered to be asynchronous and will return as soon
+    as the WRB is posted to the MCC with BE_PENDING.
+
+    If CompletionCallback is NULL, then this routine will not return until
+    a completion for this MCC command has been processed.
+    If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
+
+    This routine should only be called if the MPU has been boostraped past
+    mailbox mode.
+
+
+*/
+int
+_be_mpu_post_wrb_ring(struct be_mcc_object *mcc, struct MCC_WRB_AMAP *wrb,
+				struct be_mcc_wrb_context *wrb_context)
+{
+
+	struct MCC_WRB_AMAP *ring_wrb = NULL;
+	int status = BE_PENDING;
+	int final_status = BE_PENDING;
+	mcc_wrb_cqe_callback cb = NULL;
+	struct MCC_DB_AMAP mcc_db;
+	u32 embedded;
+
+	ASSERT(mp_ring_num_empty(&mcc->sq.ring) > 0);
+	/*
+	 * Input wrb is most likely the next wrb in the ring, since the client
+	 * can peek at the address.
+	 */
+	ring_wrb = mp_ring_producer_ptr(&mcc->sq.ring);
+	if (wrb != ring_wrb) {
+		/* If not equal, copy it into the ring. */
+		memcpy(ring_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
+	}
+#ifdef BE_DEBUG
+	wrb_context->ring_wrb = ring_wrb;
+#endif
+	embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, ring_wrb);
+	if (embedded) {
+		/* embedded commands will have the response within the WRB. */
+		wrb_context->wrb = ring_wrb;
+	} else {
+		/*
+		 * non-embedded commands will not have the response
+		 * within the WRB, and they may complete out-of-order.
+		 * The WRB will not be valid to inspect
+		 * during the completion.
+		 */
+		wrb_context->wrb = NULL;
+	}
+	cb = wrb_context->cb;
+
+	if (cb == NULL) {
+		/* Assign our internal callback if this is a
+		 * synchronous call. */
+		wrb_context->cb = mcc_wrb_sync_cb;
+		wrb_context->cb_context = wrb_context;
+		wrb_context->users_final_status = &final_status;
+	}
+	/* Increment producer index */
+
+	mcc_db.dw[0] = 0;		/* initialize */
+	AMAP_SET_BITS_PTR(MCC_DB, rid, &mcc_db, mcc->sq.ring.id);
+	AMAP_SET_BITS_PTR(MCC_DB, numPosted, &mcc_db, 1);
+
+	mp_ring_produce(&mcc->sq.ring);
+	PD_WRITE(mcc->parent_function, mpu_mcc_db, mcc_db.dw[0]);
+	TRACE(DL_INFO, "pidx: %x and cidx: %x.", mcc->sq.ring.pidx,
+	      mcc->sq.ring.cidx);
+
+	if (cb == NULL) {
+		int polls = 0;	/* At >= 1 us per poll   */
+		/* Wait until this command completes, polling the CQ. */
+		do {
+			TRACE(DL_INFO, "FWCMD submitted in the poll mode.");
+			/* Do not rearm CQ in this context. */
+			be_mcc_process_cq(mcc, false);
+
+			if (final_status == BE_PENDING) {
+				if ((++polls & 0x7FFFF) == 0) {
+					TRACE(DL_WARN,
+					      "Warning : polling MCC CQ for %d"
+					      "ms.", polls / 1000);
+				}
+
+				udelay(1);
+			}
+
+			/* final_status changed when the command completes */
+		} while (final_status == BE_PENDING);
+
+		status = final_status;
+	}
+
+	return status;
+}
+
+struct MCC_WRB_AMAP *
+_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue)
+{
+	/* If we have queued items, do not allow a post to bypass the queue. */
+	if (!driving_queue && !list_empty(&mcc->backlog))
+		return NULL;
+
+	if (mp_ring_num_empty(&mcc->sq.ring) <= 0)
+		return NULL;
+	return (struct MCC_WRB_AMAP *) mp_ring_producer_ptr(&mcc->sq.ring);
+}
+
+int
+be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *mailbox)
+{
+	ASSERT(mailbox);
+	pfob->mailbox.va = mailbox->va;
+	pfob->mailbox.pa =  cpu_to_le64(mailbox->pa);
+	pfob->mailbox.length = mailbox->length;
+
+	ASSERT(((u32)(size_t)pfob->mailbox.va & 0xf) == 0);
+	ASSERT(((u32)(size_t)pfob->mailbox.pa & 0xf) == 0);
+	/*
+	 * Issue the WRB to set MPU endianness
+	 */
+	{
+		u64 *endian_check = (u64 *) (pfob->mailbox.va +
+				offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8);
+		*endian_check = 0xFF1234FFFF5678FFULL;
+	}
+
+	be_mcc_mailbox_notify_and_wait(pfob);
+
+	return BE_SUCCESS;
+}
+
+
+/*
+    This routine posts a command to the MCC mailbox.
+
+    FuncObj         - Function Object to post the WRB on behalf of.
+    wrb             - wrb to post.
+    CompletionCallback  - Address of a callback routine to invoke once the WRB
+				is completed.
+    CompletionCallbackContext - Opaque context to be passed during the call to
+				the CompletionCallback.
+    Returns BE_SUCCESS if successfull, otherwise an error code is returned.
+
+    IRQL <=DISPATCH_LEVEL if CompletionCallback is  NULL
+
+    This routine will block until a completion for this MCC command has been
+    processed. If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
+
+    This routine should only be called if the MPU has not been boostraped past
+    mailbox mode.
+*/
+int
+_be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
+	 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context)
+{
+	struct MCC_MAILBOX_AMAP *mailbox = NULL;
+	struct MCC_WRB_AMAP *mb_wrb;
+	struct MCC_CQ_ENTRY_AMAP *mb_cq;
+	u32 offset, status;
+
+	ASSERT(pfob->mcc == NULL);
+	mailbox = pfob->mailbox.va;
+	ASSERT(mailbox);
+
+	offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
+	mb_wrb = (struct MCC_WRB_AMAP *) (u8 *)mailbox + offset;
+	if (mb_wrb != wrb) {
+		memset(mailbox, 0, sizeof(*mailbox));
+		memcpy(mb_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
+	}
+	/* The callback can inspect the final WRB to get output parameters. */
+	wrb_context->wrb = mb_wrb;
+
+	be_mcc_mailbox_notify_and_wait(pfob);
+
+	/* A command completed.  Use tag to determine which command. */
+	offset = offsetof(struct BE_MCC_MAILBOX_AMAP, cq)/8;
+	mb_cq = (struct MCC_CQ_ENTRY_AMAP *) ((u8 *)mailbox + offset);
+	be_mcc_process_cqe(pfob, mb_cq);
+
+	status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, mb_cq);
+	if (status)
+		status = BE_NOT_OK;
+	return status;
+}
+
+struct be_mcc_wrb_context *
+_be_mcc_allocate_wrb_context(struct be_function_object *pfob)
+{
+	struct be_mcc_wrb_context *context = NULL;
+	unsigned long irq;
+
+	spin_lock_irqsave(&pfob->mcc_context_lock, irq);
+
+	if (!pfob->mailbox.default_context_allocated) {
+		/* Use the single default context that we
+		 * always have allocated. */
+		pfob->mailbox.default_context_allocated = true;
+		context = &pfob->mailbox.default_context;
+	} else if (pfob->mcc) {
+		/* Get a context from the free list. If any are available. */
+		if (!list_empty(&pfob->mcc->wrb_context.list_head)) {
+			context = list_first_entry(
+				&pfob->mcc->wrb_context.list_head,
+					 struct be_mcc_wrb_context, next);
+		}
+	}
+
+	spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
+
+	return context;
+}
+
+void
+_be_mcc_free_wrb_context(struct be_function_object *pfob,
+			 struct be_mcc_wrb_context *context)
+{
+	unsigned long irq;
+
+	ASSERT(context);
+	/*
+	 * Zero during free to try and catch any bugs where the context
+	 * is accessed after a free.
+	 */
+	memset(context, 0, sizeof(context));
+
+	spin_lock_irqsave(&pfob->mcc_context_lock, irq);
+
+	if (context == &pfob->mailbox.default_context) {
+		/* Free the default context. */
+		ASSERT(pfob->mailbox.default_context_allocated);
+		pfob->mailbox.default_context_allocated = false;
+	} else {
+		/* Add to free list. */
+		ASSERT(pfob->mcc);
+		list_add_tail(&context->next,
+				&pfob->mcc->wrb_context.list_head);
+	}
+
+	spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
+}
+
+int
+be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
+		mcc_async_event_callback cb, void *cb_context)
+{
+	/* Lock against anyone trying to change the callback/context pointers
+	 * while being used. */
+	spin_lock_irqsave(&mcc_object->parent_function->cq_lock,
+		mcc_object->parent_function->cq_irq);
+
+	/* Assign the async callback. */
+	mcc_object->async_context = cb_context;
+	mcc_object->async_cb = cb;
+
+	spin_unlock_irqrestore(&mcc_object->parent_function->cq_lock,
+					mcc_object->parent_function->cq_irq);
+
+	return BE_SUCCESS;
+}
+
+#define MPU_EP_CONTROL 0
+#define MPU_EP_SEMAPHORE 0xac
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_wait_for_POST_complete
+ *   Waits until the BladeEngine POST completes (either in error or success).
+ * pfob -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *-------------------------------------------------------------------
+ */
+static int be_wait_for_POST_complete(struct be_function_object *pfob)
+{
+	struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
+	int s;
+	u32 post_error, post_stage;
+
+	const u32 us_per_loop = 1000;	/* 1000us */
+	const u32 print_frequency_loops = 1000000 / us_per_loop;
+	const u32 max_loops = 60 * print_frequency_loops;
+	u32 loops = 0;
+
+	/*
+	 * Wait for arm fw indicating it is done or a fatal error happened.
+	 * Note: POST can take some time to complete depending on configuration
+	 * settings (consider ARM attempts to acquire an IP address
+	 * over DHCP!!!).
+	 *
+	 */
+	do {
+		status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
+		post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
+						error, &status);
+		post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
+						stage, &status);
+		if (0 == (loops % print_frequency_loops)) {
+			/* Print current status */
+			TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
+				status.dw[0], post_stage);
+		}
+		udelay(us_per_loop);
+	} while ((post_error != 1) &&
+		 (post_stage != POST_STAGE_ARMFW_READY) &&
+		 (++loops < max_loops));
+
+	if (post_error == 1) {
+		TRACE(DL_ERR, "POST error! Status = 0x%x (stage = 0x%x)",
+		      status.dw[0], post_stage);
+		s = BE_NOT_OK;
+	} else if (post_stage != POST_STAGE_ARMFW_READY) {
+		TRACE(DL_ERR, "POST time-out! Status = 0x%x (stage = 0x%x)",
+		      status.dw[0], post_stage);
+		s = BE_NOT_OK;
+	} else {
+		s = BE_SUCCESS;
+	}
+	return s;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_kickoff_and_wait_for_POST
+ *   Interacts with the BladeEngine management processor to initiate POST, and
+ *   subsequently waits until POST completes (either in error or success).
+ *   The caller must acquire the reset semaphore before initiating POST
+ *   to prevent multiple drivers interacting with the management processor.
+ *   Once POST is complete the caller must release the reset semaphore.
+ *   Callers who only want to wait for POST complete may call
+ *   be_wait_for_POST_complete.
+ * pfob -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *-------------------------------------------------------------------
+ */
+static int
+be_kickoff_and_wait_for_POST(struct be_function_object *pfob)
+{
+	struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
+	int s;
+
+	const u32 us_per_loop = 1000;	/* 1000us */
+	const u32 print_frequency_loops = 1000000 / us_per_loop;
+	const u32 max_loops = 5 * print_frequency_loops;
+	u32 loops = 0;
+	u32 post_error, post_stage;
+
+	/* Wait for arm fw awaiting host ready or a fatal error happened. */
+	TRACE(DL_INFO, "Wait for BladeEngine ready to POST");
+	do {
+		status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
+		post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
+						error, &status);
+		post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
+						stage, &status);
+		if (0 == (loops % print_frequency_loops)) {
+			/* Print current status */
+			TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
+			      status.dw[0], post_stage);
+		}
+		udelay(us_per_loop);
+	} while ((post_error != 1) &&
+		 (post_stage < POST_STAGE_AWAITING_HOST_RDY) &&
+		 (++loops < max_loops));
+
+	if (post_error == 1) {
+		TRACE(DL_ERR, "Pre-POST error! Status = 0x%x (stage = 0x%x)",
+		      status.dw[0], post_stage);
+		s = BE_NOT_OK;
+	} else if (post_stage == POST_STAGE_AWAITING_HOST_RDY) {
+		iowrite32(POST_STAGE_HOST_RDY, pfob->csr_va + MPU_EP_SEMAPHORE);
+
+		/* Wait for POST to complete */
+		s = be_wait_for_POST_complete(pfob);
+	} else {
+		/*
+		 * Either a timeout waiting for host ready signal or POST has
+		 * moved ahead without requiring a host ready signal.
+		 * Might as well give POST a chance to complete
+		 * (or timeout again).
+		 */
+		s = be_wait_for_POST_complete(pfob);
+	}
+	return s;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_pci_soft_reset
+ *   This function is called to issue a BladeEngine soft reset.
+ *   Callers should acquire the soft reset semaphore before calling this
+ *   function. Additionaly, callers should ensure they cannot be pre-empted
+ *   while the routine executes. Upon completion of this routine, callers
+ *   should release the reset semaphore. This routine implicitly waits
+ *   for BladeEngine POST to complete.
+ * pfob -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *-------------------------------------------------------------------
+ */
+int be_pci_soft_reset(struct be_function_object *pfob)
+{
+	struct PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
+	struct PCICFG_ONLINE0_CSR_AMAP pciOnline0;
+	struct PCICFG_ONLINE1_CSR_AMAP pciOnline1;
+	struct EP_CONTROL_CSR_AMAP epControlCsr;
+	int status = BE_SUCCESS;
+	u32 i, soft_reset_bit;
+
+	TRACE(DL_NOTE, "PCI reset...");
+
+	/* Issue soft reset #1 to get BladeEngine into a known state. */
+	soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
+	AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
+	PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
+	/*
+	 * wait til soft reset is deasserted - hardware
+	 * deasserts after some time.
+	 */
+	i = 0;
+	do {
+		udelay(50);
+		soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
+		soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
+					softreset, soft_reset.dw);
+	} while (soft_reset_bit  && (i++ < 1024));
+	if (soft_reset_bit != 0) {
+		TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
+		status = BE_NOT_OK;
+		goto Error_label;
+	}
+	/* Mask everything  */
+	PCICFG0_WRITE(pfob, ue_status_low_mask, 0xFFFFFFFF);
+	PCICFG0_WRITE(pfob, ue_status_hi_mask, 0xFFFFFFFF);
+	/*
+	 * Set everything offline except MPU IRAM (it is offline with
+	 * the soft-reset, but soft-reset does not reset the PCICFG registers!)
+	 */
+	pciOnline0.dw[0] = 0;
+	pciOnline1.dw[0] = 0;
+	AMAP_SET_BITS_PTR(PCICFG_ONLINE1_CSR, mpu_iram_online,
+				pciOnline1.dw, 1);
+	PCICFG0_WRITE(pfob, online0, pciOnline0.dw[0]);
+	PCICFG0_WRITE(pfob, online1, pciOnline1.dw[0]);
+
+	udelay(20000);
+
+	/* Issue soft reset #2. */
+	AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
+	PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
+	/*
+	 * wait til soft reset is deasserted - hardware
+	 * deasserts after some time.
+	 */
+	i = 0;
+	do {
+		udelay(50);
+		soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
+		soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
+					softreset, soft_reset.dw);
+	} while (soft_reset_bit  && (i++ < 1024));
+	if (soft_reset_bit != 0) {
+		TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
+		status = BE_NOT_OK;
+		goto Error_label;
+	}
+
+
+	udelay(20000);
+
+	/* Take MPU out of reset. */
+
+	epControlCsr.dw[0] = ioread32(pfob->csr_va + MPU_EP_CONTROL);
+	AMAP_SET_BITS_PTR(EP_CONTROL_CSR, CPU_reset, &epControlCsr, 0);
+	iowrite32((u32)epControlCsr.dw[0], pfob->csr_va + MPU_EP_CONTROL);
+
+	/* Kickoff BE POST and wait for completion */
+	status = be_kickoff_and_wait_for_POST(pfob);
+
+Error_label:
+	return status;
+}
+
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_pci_reset_required
+ *   This private function is called to detect if a host entity is
+ *   required to issue a PCI soft reset and subsequently drive
+ *   BladeEngine POST. Scenarios where this is required:
+ *   1) BIOS-less configuration
+ *   2) Hot-swap/plug/power-on
+ * pfob -
+ * return   true if a reset is required, false otherwise
+ *-------------------------------------------------------------------
+ */
+static bool be_pci_reset_required(struct be_function_object *pfob)
+{
+	struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
+	bool do_reset = false;
+	u32 post_error, post_stage;
+
+	/*
+	 * Read the POST status register
+	 */
+	status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
+	post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, error,
+								&status);
+	post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, stage,
+								&status);
+	if (post_stage <= POST_STAGE_AWAITING_HOST_RDY) {
+		/*
+		 * If BladeEngine is waiting for host ready indication,
+		 * we want to do a PCI reset.
+		 */
+		do_reset = true;
+	}
+
+	return do_reset;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_drive_POST
+ *   This function is called to drive BladeEngine POST. The
+ *   caller should ensure they cannot be pre-empted while this routine executes.
+ * pfob -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *-------------------------------------------------------------------
+ */
+int be_drive_POST(struct be_function_object *pfob)
+{
+	int status;
+
+	if (false != be_pci_reset_required(pfob)) {
+		/* PCI reset is needed (implicitly starts and waits for POST) */
+		status = be_pci_soft_reset(pfob);
+	} else {
+		/* No PCI reset is needed, start POST */
+		status = be_kickoff_and_wait_for_POST(pfob);
+	}
+
+	return status;
+}

+ 74 - 0
drivers/staging/benet/mpu.h

@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __mpu_amap_h__
+#define __mpu_amap_h__
+#include "ep.h"
+
+/* Provide control parameters for the Managment Processor Unit. */
+struct BE_MPU_CSRMAP_AMAP {
+	struct BE_EP_CSRMAP_AMAP ep;
+	u8 rsvd0[128];	/* DWORD 64 */
+	u8 rsvd1[32];	/* DWORD 68 */
+	u8 rsvd2[192];	/* DWORD 69 */
+	u8 rsvd3[192];	/* DWORD 75 */
+	u8 rsvd4[32];	/* DWORD 81 */
+	u8 rsvd5[32];	/* DWORD 82 */
+	u8 rsvd6[32];	/* DWORD 83 */
+	u8 rsvd7[32];	/* DWORD 84 */
+	u8 rsvd8[32];	/* DWORD 85 */
+	u8 rsvd9[32];	/* DWORD 86 */
+	u8 rsvd10[32];	/* DWORD 87 */
+	u8 rsvd11[32];	/* DWORD 88 */
+	u8 rsvd12[32];	/* DWORD 89 */
+	u8 rsvd13[32];	/* DWORD 90 */
+	u8 rsvd14[32];	/* DWORD 91 */
+	u8 rsvd15[32];	/* DWORD 92 */
+	u8 rsvd16[32];	/* DWORD 93 */
+	u8 rsvd17[32];	/* DWORD 94 */
+	u8 rsvd18[32];	/* DWORD 95 */
+	u8 rsvd19[32];	/* DWORD 96 */
+	u8 rsvd20[32];	/* DWORD 97 */
+	u8 rsvd21[32];	/* DWORD 98 */
+	u8 rsvd22[32];	/* DWORD 99 */
+	u8 rsvd23[32];	/* DWORD 100 */
+	u8 rsvd24[32];	/* DWORD 101 */
+	u8 rsvd25[32];	/* DWORD 102 */
+	u8 rsvd26[32];	/* DWORD 103 */
+	u8 rsvd27[32];	/* DWORD 104 */
+	u8 rsvd28[96];	/* DWORD 105 */
+	u8 rsvd29[32];	/* DWORD 108 */
+	u8 rsvd30[32];	/* DWORD 109 */
+	u8 rsvd31[32];	/* DWORD 110 */
+	u8 rsvd32[32];	/* DWORD 111 */
+	u8 rsvd33[32];	/* DWORD 112 */
+	u8 rsvd34[96];	/* DWORD 113 */
+	u8 rsvd35[32];	/* DWORD 116 */
+	u8 rsvd36[32];	/* DWORD 117 */
+	u8 rsvd37[32];	/* DWORD 118 */
+	u8 rsvd38[32];	/* DWORD 119 */
+	u8 rsvd39[32];	/* DWORD 120 */
+	u8 rsvd40[32];	/* DWORD 121 */
+	u8 rsvd41[134][32];	/* DWORD 122 */
+} __packed;
+struct MPU_CSRMAP_AMAP {
+	u32 dw[256];
+};
+
+#endif /* __mpu_amap_h__ */

+ 46 - 0
drivers/staging/benet/mpu_context.h

@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __mpu_context_amap_h__
+#define __mpu_context_amap_h__
+
+/*
+ * Management command and control ring context. The MPUs BTLR_CTRL1 CSR
+ * controls the writeback behavior of the producer and consumer index values.
+ */
+struct BE_MCC_RING_CONTEXT_AMAP {
+	u8 con_index[16];	/* DWORD 0 */
+	u8 ring_size[4];	/* DWORD 0 */
+	u8 cq_id[11];	/* DWORD 0 */
+	u8 rsvd0;		/* DWORD 0 */
+	u8 prod_index[16];	/* DWORD 1 */
+	u8 pdid[15];	/* DWORD 1 */
+	u8 invalid;		/* DWORD 1 */
+	u8 cmd_pending_current[7];	/* DWORD 2 */
+	u8 rsvd1[25];	/* DWORD 2 */
+	u8 hpi_port_cq_id[11];	/* DWORD 3 */
+	u8 rsvd2[5];	/* DWORD 3 */
+	u8 cmd_pending_max[7];	/* DWORD 3 */
+	u8 rsvd3[9];	/* DWORD 3 */
+} __packed;
+struct MCC_RING_CONTEXT_AMAP {
+	u32 dw[4];
+};
+
+#endif /* __mpu_context_amap_h__ */

+ 825 - 0
drivers/staging/benet/pcicfg.h

@@ -0,0 +1,825 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __pcicfg_amap_h__
+#define __pcicfg_amap_h__
+
+/* Vendor and Device ID Register. */
+struct BE_PCICFG_ID_CSR_AMAP {
+	u8 vendorid[16];	/* DWORD 0 */
+	u8 deviceid[16];	/* DWORD 0 */
+} __packed;
+struct PCICFG_ID_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* IO Bar Register. */
+struct BE_PCICFG_IOBAR_CSR_AMAP {
+	u8 iospace;		/* DWORD 0 */
+	u8 rsvd0[7];	/* DWORD 0 */
+	u8 iobar[24];	/* DWORD 0 */
+} __packed;
+struct PCICFG_IOBAR_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Memory BAR 0 Register. */
+struct BE_PCICFG_MEMBAR0_CSR_AMAP {
+	u8 memspace;	/* DWORD 0 */
+	u8 type[2];		/* DWORD 0 */
+	u8 pf;		/* DWORD 0 */
+	u8 rsvd0[10];	/* DWORD 0 */
+	u8 membar0[18];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR0_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Memory BAR 1 - Low Address Register. */
+struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP {
+	u8 memspace;	/* DWORD 0 */
+	u8 type[2];		/* DWORD 0 */
+	u8 pf;		/* DWORD 0 */
+	u8 rsvd0[13];	/* DWORD 0 */
+	u8 membar1lo[15];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR1_LO_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Memory BAR 1 - High Address Register. */
+struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP {
+	u8 membar1hi[32];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR1_HI_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Memory BAR 2 - Low Address Register. */
+struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP {
+	u8 memspace;	/* DWORD 0 */
+	u8 type[2];		/* DWORD 0 */
+	u8 pf;		/* DWORD 0 */
+	u8 rsvd0[17];	/* DWORD 0 */
+	u8 membar2lo[11];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR2_LO_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Memory BAR 2 - High Address Register. */
+struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP {
+	u8 membar2hi[32];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR2_HI_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Subsystem Vendor and ID (Function 0) Register. */
+struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
+	u8 subsys_vendor_id[16];	/* DWORD 0 */
+	u8 subsys_id[16];	/* DWORD 0 */
+} __packed;
+struct PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Subsystem Vendor and ID (Function 1) Register. */
+struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
+	u8 subsys_vendor_id[16];	/* DWORD 0 */
+	u8 subsys_id[16];	/* DWORD 0 */
+} __packed;
+struct PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Semaphore Register. */
+struct BE_PCICFG_SEMAPHORE_CSR_AMAP {
+	u8 locked;		/* DWORD 0 */
+	u8 rsvd0[31];	/* DWORD 0 */
+} __packed;
+struct PCICFG_SEMAPHORE_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Soft Reset Register. */
+struct BE_PCICFG_SOFT_RESET_CSR_AMAP {
+	u8 rsvd0[7];	/* DWORD 0 */
+	u8 softreset;	/* DWORD 0 */
+	u8 rsvd1[16];	/* DWORD 0 */
+	u8 nec_ll_rcvdetect_i[8];	/* DWORD 0 */
+} __packed;
+struct PCICFG_SOFT_RESET_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Unrecoverable Error Status (Low) Register. Each bit corresponds to
+ * an internal Unrecoverable Error.  These are set by hardware and may be
+ * cleared by writing a one to the respective bit(s) to be cleared.  Any
+ * bit being set that is also unmasked will result in Unrecoverable Error
+ * interrupt notification to the host CPU and/or Server Management chip
+ * and the transitioning of BladeEngine to an Offline state.
+ */
+struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP {
+	u8 cev_ue_status;	/* DWORD 0 */
+	u8 ctx_ue_status;	/* DWORD 0 */
+	u8 dbuf_ue_status;	/* DWORD 0 */
+	u8 erx_ue_status;	/* DWORD 0 */
+	u8 host_ue_status;	/* DWORD 0 */
+	u8 mpu_ue_status;	/* DWORD 0 */
+	u8 ndma_ue_status;	/* DWORD 0 */
+	u8 ptc_ue_status;	/* DWORD 0 */
+	u8 rdma_ue_status;	/* DWORD 0 */
+	u8 rxf_ue_status;	/* DWORD 0 */
+	u8 rxips_ue_status;	/* DWORD 0 */
+	u8 rxulp0_ue_status;	/* DWORD 0 */
+	u8 rxulp1_ue_status;	/* DWORD 0 */
+	u8 rxulp2_ue_status;	/* DWORD 0 */
+	u8 tim_ue_status;	/* DWORD 0 */
+	u8 tpost_ue_status;	/* DWORD 0 */
+	u8 tpre_ue_status;	/* DWORD 0 */
+	u8 txips_ue_status;	/* DWORD 0 */
+	u8 txulp0_ue_status;	/* DWORD 0 */
+	u8 txulp1_ue_status;	/* DWORD 0 */
+	u8 uc_ue_status;	/* DWORD 0 */
+	u8 wdma_ue_status;	/* DWORD 0 */
+	u8 txulp2_ue_status;	/* DWORD 0 */
+	u8 host1_ue_status;	/* DWORD 0 */
+	u8 p0_ob_link_ue_status;	/* DWORD 0 */
+	u8 p1_ob_link_ue_status;	/* DWORD 0 */
+	u8 host_gpio_ue_status;	/* DWORD 0 */
+	u8 mbox_netw_ue_status;	/* DWORD 0 */
+	u8 mbox_stor_ue_status;	/* DWORD 0 */
+	u8 axgmac0_ue_status;	/* DWORD 0 */
+	u8 axgmac1_ue_status;	/* DWORD 0 */
+	u8 mpu_intpend_ue_status;	/* DWORD 0 */
+} __packed;
+struct PCICFG_UE_STATUS_LOW_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Unrecoverable Error Status (High) Register. Each bit corresponds to
+ * an internal Unrecoverable Error.  These are set by hardware and may be
+ * cleared by writing a one to the respective bit(s) to be cleared.  Any
+ * bit being set that is also unmasked will result in Unrecoverable Error
+ * interrupt notification to the host CPU and/or Server Management chip;
+ * and the transitioning of BladeEngine to an Offline state.
+ */
+struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP {
+	u8 jtag_ue_status;	/* DWORD 0 */
+	u8 lpcmemhost_ue_status;	/* DWORD 0 */
+	u8 mgmt_mac_ue_status;	/* DWORD 0 */
+	u8 mpu_iram_ue_status;	/* DWORD 0 */
+	u8 pcs0online_ue_status;	/* DWORD 0 */
+	u8 pcs1online_ue_status;	/* DWORD 0 */
+	u8 pctl0_ue_status;	/* DWORD 0 */
+	u8 pctl1_ue_status;	/* DWORD 0 */
+	u8 pmem_ue_status;	/* DWORD 0 */
+	u8 rr_ue_status;	/* DWORD 0 */
+	u8 rxpp_ue_status;	/* DWORD 0 */
+	u8 txpb_ue_status;	/* DWORD 0 */
+	u8 txp_ue_status;	/* DWORD 0 */
+	u8 xaui_ue_status;	/* DWORD 0 */
+	u8 arm_ue_status;	/* DWORD 0 */
+	u8 ipc_ue_status;	/* DWORD 0 */
+	u8 rsvd0[16];	/* DWORD 0 */
+} __packed;
+struct PCICFG_UE_STATUS_HI_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Unrecoverable Error Mask (Low) Register. Each bit, when set to one,
+ * will mask the associated Unrecoverable  Error status bit from notification
+ * of Unrecoverable Error to the host CPU and/or Server Managment chip and the
+ * transitioning of all BladeEngine units to an Offline state.
+ */
+struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
+	u8 cev_ue_mask;	/* DWORD 0 */
+	u8 ctx_ue_mask;	/* DWORD 0 */
+	u8 dbuf_ue_mask;	/* DWORD 0 */
+	u8 erx_ue_mask;	/* DWORD 0 */
+	u8 host_ue_mask;	/* DWORD 0 */
+	u8 mpu_ue_mask;	/* DWORD 0 */
+	u8 ndma_ue_mask;	/* DWORD 0 */
+	u8 ptc_ue_mask;	/* DWORD 0 */
+	u8 rdma_ue_mask;	/* DWORD 0 */
+	u8 rxf_ue_mask;	/* DWORD 0 */
+	u8 rxips_ue_mask;	/* DWORD 0 */
+	u8 rxulp0_ue_mask;	/* DWORD 0 */
+	u8 rxulp1_ue_mask;	/* DWORD 0 */
+	u8 rxulp2_ue_mask;	/* DWORD 0 */
+	u8 tim_ue_mask;	/* DWORD 0 */
+	u8 tpost_ue_mask;	/* DWORD 0 */
+	u8 tpre_ue_mask;	/* DWORD 0 */
+	u8 txips_ue_mask;	/* DWORD 0 */
+	u8 txulp0_ue_mask;	/* DWORD 0 */
+	u8 txulp1_ue_mask;	/* DWORD 0 */
+	u8 uc_ue_mask;	/* DWORD 0 */
+	u8 wdma_ue_mask;	/* DWORD 0 */
+	u8 txulp2_ue_mask;	/* DWORD 0 */
+	u8 host1_ue_mask;	/* DWORD 0 */
+	u8 p0_ob_link_ue_mask;	/* DWORD 0 */
+	u8 p1_ob_link_ue_mask;	/* DWORD 0 */
+	u8 host_gpio_ue_mask;	/* DWORD 0 */
+	u8 mbox_netw_ue_mask;	/* DWORD 0 */
+	u8 mbox_stor_ue_mask;	/* DWORD 0 */
+	u8 axgmac0_ue_mask;	/* DWORD 0 */
+	u8 axgmac1_ue_mask;	/* DWORD 0 */
+	u8 mpu_intpend_ue_mask;	/* DWORD 0 */
+} __packed;
+struct PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Unrecoverable Error Mask (High) Register. Each bit, when set to one,
+ * will mask the associated Unrecoverable Error status bit from notification
+ * of Unrecoverable Error to the host CPU and/or Server Managment chip and the
+ * transitioning of all BladeEngine units to an Offline state.
+ */
+struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
+	u8 jtag_ue_mask;	/* DWORD 0 */
+	u8 lpcmemhost_ue_mask;	/* DWORD 0 */
+	u8 mgmt_mac_ue_mask;	/* DWORD 0 */
+	u8 mpu_iram_ue_mask;	/* DWORD 0 */
+	u8 pcs0online_ue_mask;	/* DWORD 0 */
+	u8 pcs1online_ue_mask;	/* DWORD 0 */
+	u8 pctl0_ue_mask;	/* DWORD 0 */
+	u8 pctl1_ue_mask;	/* DWORD 0 */
+	u8 pmem_ue_mask;	/* DWORD 0 */
+	u8 rr_ue_mask;	/* DWORD 0 */
+	u8 rxpp_ue_mask;	/* DWORD 0 */
+	u8 txpb_ue_mask;	/* DWORD 0 */
+	u8 txp_ue_mask;	/* DWORD 0 */
+	u8 xaui_ue_mask;	/* DWORD 0 */
+	u8 arm_ue_mask;	/* DWORD 0 */
+	u8 ipc_ue_mask;	/* DWORD 0 */
+	u8 rsvd0[16];	/* DWORD 0 */
+} __packed;
+struct PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Online Control Register 0. This register controls various units within
+ * BladeEngine being in an Online or Offline state.
+ */
+struct BE_PCICFG_ONLINE0_CSR_AMAP {
+	u8 cev_online;	/* DWORD 0 */
+	u8 ctx_online;	/* DWORD 0 */
+	u8 dbuf_online;	/* DWORD 0 */
+	u8 erx_online;	/* DWORD 0 */
+	u8 host_online;	/* DWORD 0 */
+	u8 mpu_online;	/* DWORD 0 */
+	u8 ndma_online;	/* DWORD 0 */
+	u8 ptc_online;	/* DWORD 0 */
+	u8 rdma_online;	/* DWORD 0 */
+	u8 rxf_online;	/* DWORD 0 */
+	u8 rxips_online;	/* DWORD 0 */
+	u8 rxulp0_online;	/* DWORD 0 */
+	u8 rxulp1_online;	/* DWORD 0 */
+	u8 rxulp2_online;	/* DWORD 0 */
+	u8 tim_online;	/* DWORD 0 */
+	u8 tpost_online;	/* DWORD 0 */
+	u8 tpre_online;	/* DWORD 0 */
+	u8 txips_online;	/* DWORD 0 */
+	u8 txulp0_online;	/* DWORD 0 */
+	u8 txulp1_online;	/* DWORD 0 */
+	u8 uc_online;	/* DWORD 0 */
+	u8 wdma_online;	/* DWORD 0 */
+	u8 txulp2_online;	/* DWORD 0 */
+	u8 host1_online;	/* DWORD 0 */
+	u8 p0_ob_link_online;	/* DWORD 0 */
+	u8 p1_ob_link_online;	/* DWORD 0 */
+	u8 host_gpio_online;	/* DWORD 0 */
+	u8 mbox_netw_online;	/* DWORD 0 */
+	u8 mbox_stor_online;	/* DWORD 0 */
+	u8 axgmac0_online;	/* DWORD 0 */
+	u8 axgmac1_online;	/* DWORD 0 */
+	u8 mpu_intpend_online;	/* DWORD 0 */
+} __packed;
+struct PCICFG_ONLINE0_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Online Control Register 1. This register controls various units within
+ * BladeEngine being in an Online or Offline state.
+ */
+struct BE_PCICFG_ONLINE1_CSR_AMAP {
+	u8 jtag_online;	/* DWORD 0 */
+	u8 lpcmemhost_online;	/* DWORD 0 */
+	u8 mgmt_mac_online;	/* DWORD 0 */
+	u8 mpu_iram_online;	/* DWORD 0 */
+	u8 pcs0online_online;	/* DWORD 0 */
+	u8 pcs1online_online;	/* DWORD 0 */
+	u8 pctl0_online;	/* DWORD 0 */
+	u8 pctl1_online;	/* DWORD 0 */
+	u8 pmem_online;	/* DWORD 0 */
+	u8 rr_online;	/* DWORD 0 */
+	u8 rxpp_online;	/* DWORD 0 */
+	u8 txpb_online;	/* DWORD 0 */
+	u8 txp_online;	/* DWORD 0 */
+	u8 xaui_online;	/* DWORD 0 */
+	u8 arm_online;	/* DWORD 0 */
+	u8 ipc_online;	/* DWORD 0 */
+	u8 rsvd0[16];	/* DWORD 0 */
+} __packed;
+struct PCICFG_ONLINE1_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Host Timer Register. */
+struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
+	u8 hosttimer[24];	/* DWORD 0 */
+	u8 hostintr;	/* DWORD 0 */
+	u8 rsvd0[7];	/* DWORD 0 */
+} __packed;
+struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* Scratchpad Register (for software use). */
+struct BE_PCICFG_SCRATCHPAD_CSR_AMAP {
+	u8 scratchpad[32];	/* DWORD 0 */
+} __packed;
+struct PCICFG_SCRATCHPAD_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express Capabilities Register. */
+struct BE_PCICFG_PCIE_CAP_CSR_AMAP {
+	u8 capid[8];	/* DWORD 0 */
+	u8 nextcap[8];	/* DWORD 0 */
+	u8 capver[4];	/* DWORD 0 */
+	u8 devport[4];	/* DWORD 0 */
+	u8 rsvd0[6];	/* DWORD 0 */
+	u8 rsvd1[2];	/* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_CAP_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express Device Capabilities Register. */
+struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP {
+	u8 payload[3];	/* DWORD 0 */
+	u8 rsvd0[3];	/* DWORD 0 */
+	u8 lo_lat[3];	/* DWORD 0 */
+	u8 l1_lat[3];	/* DWORD 0 */
+	u8 rsvd1[3];	/* DWORD 0 */
+	u8 rsvd2[3];	/* DWORD 0 */
+	u8 pwr_value[8];	/* DWORD 0 */
+	u8 pwr_scale[2];	/* DWORD 0 */
+	u8 rsvd3[4];	/* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_DEVCAP_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express Device Control/Status Registers. */
+struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
+	u8 CorrErrReportEn;	/* DWORD 0 */
+	u8 NonFatalErrReportEn;	/* DWORD 0 */
+	u8 FatalErrReportEn;	/* DWORD 0 */
+	u8 UnsuppReqReportEn;	/* DWORD 0 */
+	u8 EnableRelaxOrder;	/* DWORD 0 */
+	u8 Max_Payload_Size[3];	/* DWORD 0 */
+	u8 ExtendTagFieldEnable;	/* DWORD 0 */
+	u8 PhantomFnEnable;	/* DWORD 0 */
+	u8 AuxPwrPMEnable;	/* DWORD 0 */
+	u8 EnableNoSnoop;	/* DWORD 0 */
+	u8 Max_Read_Req_Size[3];	/* DWORD 0 */
+	u8 rsvd0;		/* DWORD 0 */
+	u8 CorrErrDetect;	/* DWORD 0 */
+	u8 NonFatalErrDetect;	/* DWORD 0 */
+	u8 FatalErrDetect;	/* DWORD 0 */
+	u8 UnsuppReqDetect;	/* DWORD 0 */
+	u8 AuxPwrDetect;	/* DWORD 0 */
+	u8 TransPending;	/* DWORD 0 */
+	u8 rsvd1[10];	/* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express Link Capabilities Register. */
+struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP {
+	u8 MaxLinkSpeed[4];	/* DWORD 0 */
+	u8 MaxLinkWidth[6];	/* DWORD 0 */
+	u8 ASPMSupport[2];	/* DWORD 0 */
+	u8 L0sExitLat[3];	/* DWORD 0 */
+	u8 L1ExitLat[3];	/* DWORD 0 */
+	u8 rsvd0[6];	/* DWORD 0 */
+	u8 PortNum[8];	/* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_LINK_CAP_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express Link Status Register. */
+struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
+	u8 ASPMCtl[2];	/* DWORD 0 */
+	u8 rsvd0;		/* DWORD 0 */
+	u8 ReadCmplBndry;	/* DWORD 0 */
+	u8 LinkDisable;	/* DWORD 0 */
+	u8 RetrainLink;	/* DWORD 0 */
+	u8 CommonClkConfig;	/* DWORD 0 */
+	u8 ExtendSync;	/* DWORD 0 */
+	u8 rsvd1[8];	/* DWORD 0 */
+	u8 LinkSpeed[4];	/* DWORD 0 */
+	u8 NegLinkWidth[6];	/* DWORD 0 */
+	u8 LinkTrainErr;	/* DWORD 0 */
+	u8 LinkTrain;	/* DWORD 0 */
+	u8 SlotClkConfig;	/* DWORD 0 */
+	u8 rsvd2[3];	/* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express MSI Configuration Register. */
+struct BE_PCICFG_MSI_CSR_AMAP {
+	u8 capid[8];	/* DWORD 0 */
+	u8 nextptr[8];	/* DWORD 0 */
+	u8 tablesize[11];	/* DWORD 0 */
+	u8 rsvd0[3];	/* DWORD 0 */
+	u8 funcmask;	/* DWORD 0 */
+	u8 en;		/* DWORD 0 */
+} __packed;
+struct PCICFG_MSI_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* MSI-X Table Offset Register. */
+struct BE_PCICFG_MSIX_TABLE_CSR_AMAP {
+	u8 tablebir[3];	/* DWORD 0 */
+	u8 offset[29];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_TABLE_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* MSI-X PBA Offset Register. */
+struct BE_PCICFG_MSIX_PBA_CSR_AMAP {
+	u8 pbabir[3];	/* DWORD 0 */
+	u8 offset[29];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_PBA_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express MSI-X Message Vector Control Register. */
+struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
+	u8 vector_control;	/* DWORD 0 */
+	u8 rsvd0[31];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express MSI-X Message Data Register. */
+struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP {
+	u8 data[16];	/* DWORD 0 */
+	u8 rsvd0[16];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_MSG_DATA_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express MSI-X Message Address Register - High Part. */
+struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
+	u8 addr[32];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
+	u32 dw[1];
+};
+
+/* PCI Express MSI-X Message Address Register - Low Part. */
+struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
+	u8 rsvd0[2];	/* DWORD 0 */
+	u8 addr[30];	/* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
+	u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_18_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_18_RSVD_AMAP {
+	u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_19_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_19_RSVD_AMAP {
+	u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_20_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+	u8 rsvd1[25][32];	/* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_20_RSVD_AMAP {
+	u32 dw[26];
+};
+
+struct BE_PCICFG_ANON_21_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+	u8 rsvd1[1919][32];	/* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_21_RSVD_AMAP {
+	u32 dw[1920];
+};
+
+struct BE_PCICFG_ANON_22_MESSAGE_AMAP {
+	struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
+	struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
+	struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
+	struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
+} __packed;
+struct PCICFG_ANON_22_MESSAGE_AMAP {
+	u32 dw[4];
+};
+
+struct BE_PCICFG_ANON_23_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+	u8 rsvd1[895][32];	/* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_23_RSVD_AMAP {
+	u32 dw[896];
+};
+
+/* These PCI Configuration Space registers are for the Storage  Function of
+ * BladeEngine (Function 0). In the memory map of the registers below their
+ * table,
+ */
+struct BE_PCICFG0_CSRMAP_AMAP {
+	struct BE_PCICFG_ID_CSR_AMAP id;
+	u8 rsvd0[32];	/* DWORD 1 */
+	u8 rsvd1[32];	/* DWORD 2 */
+	u8 rsvd2[32];	/* DWORD 3 */
+	struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
+	struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
+	struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
+	struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
+	struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
+	struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
+	u8 rsvd3[32];	/* DWORD 10 */
+	struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP subsystem_id;
+	u8 rsvd4[32];	/* DWORD 12 */
+	u8 rsvd5[32];	/* DWORD 13 */
+	u8 rsvd6[32];	/* DWORD 14 */
+	u8 rsvd7[32];	/* DWORD 15 */
+	struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
+	struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
+	u8 rsvd8[32];	/* DWORD 21 */
+	struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
+	u8 rsvd9[32];	/* DWORD 23 */
+	u8 rsvd10[32];	/* DWORD 24 */
+	u8 rsvd11[32];	/* DWORD 25 */
+	u8 rsvd12[32];	/* DWORD 26 */
+	u8 rsvd13[32];	/* DWORD 27 */
+	u8 rsvd14[2][32];	/* DWORD 28 */
+	u8 rsvd15[32];	/* DWORD 30 */
+	u8 rsvd16[32];	/* DWORD 31 */
+	u8 rsvd17[8][32];	/* DWORD 32 */
+	struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
+	struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
+	struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
+	struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
+	struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
+	struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
+	u8 rsvd18[32];	/* DWORD 46 */
+	u8 rsvd19[32];	/* DWORD 47 */
+	u8 rsvd20[32];	/* DWORD 48 */
+	u8 rsvd21[32];	/* DWORD 49 */
+	struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
+	u8 rsvd22[32];	/* DWORD 51 */
+	struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
+	struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
+	struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
+	struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
+	struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
+	struct BE_PCICFG_MSI_CSR_AMAP msi;
+	struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
+	struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
+	u8 rsvd23[32];	/* DWORD 60 */
+	u8 rsvd24[32];	/* DWORD 61 */
+	u8 rsvd25[32];	/* DWORD 62 */
+	u8 rsvd26[32];	/* DWORD 63 */
+	u8 rsvd27[32];	/* DWORD 64 */
+	u8 rsvd28[32];	/* DWORD 65 */
+	u8 rsvd29[32];	/* DWORD 66 */
+	u8 rsvd30[32];	/* DWORD 67 */
+	u8 rsvd31[32];	/* DWORD 68 */
+	u8 rsvd32[32];	/* DWORD 69 */
+	u8 rsvd33[32];	/* DWORD 70 */
+	u8 rsvd34[32];	/* DWORD 71 */
+	u8 rsvd35[32];	/* DWORD 72 */
+	u8 rsvd36[32];	/* DWORD 73 */
+	u8 rsvd37[32];	/* DWORD 74 */
+	u8 rsvd38[32];	/* DWORD 75 */
+	u8 rsvd39[32];	/* DWORD 76 */
+	u8 rsvd40[32];	/* DWORD 77 */
+	u8 rsvd41[32];	/* DWORD 78 */
+	u8 rsvd42[32];	/* DWORD 79 */
+	u8 rsvd43[32];	/* DWORD 80 */
+	u8 rsvd44[32];	/* DWORD 81 */
+	u8 rsvd45[32];	/* DWORD 82 */
+	u8 rsvd46[32];	/* DWORD 83 */
+	u8 rsvd47[32];	/* DWORD 84 */
+	u8 rsvd48[32];	/* DWORD 85 */
+	u8 rsvd49[32];	/* DWORD 86 */
+	u8 rsvd50[32];	/* DWORD 87 */
+	u8 rsvd51[32];	/* DWORD 88 */
+	u8 rsvd52[32];	/* DWORD 89 */
+	u8 rsvd53[32];	/* DWORD 90 */
+	u8 rsvd54[32];	/* DWORD 91 */
+	u8 rsvd55[32];	/* DWORD 92 */
+	u8 rsvd56[832];	/* DWORD 93 */
+	u8 rsvd57[32];	/* DWORD 119 */
+	u8 rsvd58[32];	/* DWORD 120 */
+	u8 rsvd59[32];	/* DWORD 121 */
+	u8 rsvd60[32];	/* DWORD 122 */
+	u8 rsvd61[32];	/* DWORD 123 */
+	u8 rsvd62[32];	/* DWORD 124 */
+	u8 rsvd63[32];	/* DWORD 125 */
+	u8 rsvd64[32];	/* DWORD 126 */
+	u8 rsvd65[32];	/* DWORD 127 */
+	u8 rsvd66[61440];	/* DWORD 128 */
+	struct BE_PCICFG_ANON_22_MESSAGE_AMAP message[32];
+	u8 rsvd67[28672];	/* DWORD 2176 */
+	u8 rsvd68[32];	/* DWORD 3072 */
+	u8 rsvd69[1023][32];	/* DWORD 3073 */
+} __packed;
+struct PCICFG0_CSRMAP_AMAP {
+	u32 dw[4096];
+};
+
+struct BE_PCICFG_ANON_24_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_24_RSVD_AMAP {
+	u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_25_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_25_RSVD_AMAP {
+	u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_26_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_26_RSVD_AMAP {
+	u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_27_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+	u8 rsvd1[32];	/* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_27_RSVD_AMAP {
+	u32 dw[2];
+};
+
+struct BE_PCICFG_ANON_28_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+	u8 rsvd1[3][32];	/* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_28_RSVD_AMAP {
+	u32 dw[4];
+};
+
+struct BE_PCICFG_ANON_29_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+	u8 rsvd1[36][32];	/* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_29_RSVD_AMAP {
+	u32 dw[37];
+};
+
+struct BE_PCICFG_ANON_30_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+	u8 rsvd1[1930][32];	/* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_30_RSVD_AMAP {
+	u32 dw[1931];
+};
+
+struct BE_PCICFG_ANON_31_MESSAGE_AMAP {
+	struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
+	struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
+	struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
+	struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
+} __packed;
+struct PCICFG_ANON_31_MESSAGE_AMAP {
+	u32 dw[4];
+};
+
+struct BE_PCICFG_ANON_32_RSVD_AMAP {
+	u8 rsvd0[32];	/* DWORD 0 */
+	u8 rsvd1[895][32];	/* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_32_RSVD_AMAP {
+	u32 dw[896];
+};
+
+/* This PCI configuration space register map is for the  Networking Function of
+ * BladeEngine (Function 1).
+ */
+struct BE_PCICFG1_CSRMAP_AMAP {
+	struct BE_PCICFG_ID_CSR_AMAP id;
+	u8 rsvd0[32];	/* DWORD 1 */
+	u8 rsvd1[32];	/* DWORD 2 */
+	u8 rsvd2[32];	/* DWORD 3 */
+	struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
+	struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
+	struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
+	struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
+	struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
+	struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
+	u8 rsvd3[32];	/* DWORD 10 */
+	struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP subsystem_id;
+	u8 rsvd4[32];	/* DWORD 12 */
+	u8 rsvd5[32];	/* DWORD 13 */
+	u8 rsvd6[32];	/* DWORD 14 */
+	u8 rsvd7[32];	/* DWORD 15 */
+	struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
+	struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
+	u8 rsvd8[32];	/* DWORD 21 */
+	struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
+	u8 rsvd9[32];	/* DWORD 23 */
+	u8 rsvd10[32];	/* DWORD 24 */
+	u8 rsvd11[32];	/* DWORD 25 */
+	u8 rsvd12[32];	/* DWORD 26 */
+	u8 rsvd13[32];	/* DWORD 27 */
+	u8 rsvd14[2][32];	/* DWORD 28 */
+	u8 rsvd15[32];	/* DWORD 30 */
+	u8 rsvd16[32];	/* DWORD 31 */
+	u8 rsvd17[8][32];	/* DWORD 32 */
+	struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
+	struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
+	struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
+	struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
+	struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
+	struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
+	u8 rsvd18[32];	/* DWORD 46 */
+	u8 rsvd19[32];	/* DWORD 47 */
+	u8 rsvd20[32];	/* DWORD 48 */
+	u8 rsvd21[32];	/* DWORD 49 */
+	struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
+	u8 rsvd22[32];	/* DWORD 51 */
+	struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
+	struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
+	struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
+	struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
+	struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
+	struct BE_PCICFG_MSI_CSR_AMAP msi;
+	struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
+	struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
+	u8 rsvd23[64];	/* DWORD 60 */
+	u8 rsvd24[32];	/* DWORD 62 */
+	u8 rsvd25[32];	/* DWORD 63 */
+	u8 rsvd26[32];	/* DWORD 64 */
+	u8 rsvd27[32];	/* DWORD 65 */
+	u8 rsvd28[32];	/* DWORD 66 */
+	u8 rsvd29[32];	/* DWORD 67 */
+	u8 rsvd30[32];	/* DWORD 68 */
+	u8 rsvd31[32];	/* DWORD 69 */
+	u8 rsvd32[32];	/* DWORD 70 */
+	u8 rsvd33[32];	/* DWORD 71 */
+	u8 rsvd34[32];	/* DWORD 72 */
+	u8 rsvd35[32];	/* DWORD 73 */
+	u8 rsvd36[32];	/* DWORD 74 */
+	u8 rsvd37[128];	/* DWORD 75 */
+	u8 rsvd38[32];	/* DWORD 79 */
+	u8 rsvd39[1184];	/* DWORD 80 */
+	u8 rsvd40[61792];	/* DWORD 117 */
+	struct BE_PCICFG_ANON_31_MESSAGE_AMAP message[32];
+	u8 rsvd41[28672];	/* DWORD 2176 */
+	u8 rsvd42[32];	/* DWORD 3072 */
+	u8 rsvd43[1023][32];	/* DWORD 3073 */
+} __packed;
+struct PCICFG1_CSRMAP_AMAP {
+	u32 dw[4096];
+};
+
+#endif /* __pcicfg_amap_h__ */

+ 111 - 0
drivers/staging/benet/post_codes.h

@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __post_codes_amap_h__
+#define __post_codes_amap_h__
+
+/* --- MGMT_HBA_POST_STAGE_ENUM --- */
+#define POST_STAGE_POWER_ON_RESET   (0)	/* State after a cold or warm boot. */
+#define POST_STAGE_AWAITING_HOST_RDY (1)	/* ARM boot code awaiting a
+						go-ahed from  the host. */
+#define POST_STAGE_HOST_RDY (2)	/* Host has given go-ahed to ARM. */
+#define POST_STAGE_BE_RESET (3)	/* Host wants to reset chip, this is a  chip
+						workaround  */
+#define POST_STAGE_SEEPROM_CS_START (256)	/* SEEPROM checksum
+						test start. */
+#define POST_STAGE_SEEPROM_CS_DONE  (257)	/* SEEPROM checksum test
+							done. */
+#define POST_STAGE_DDR_CONFIG_START (512)	/* DDR configuration start. */
+#define POST_STAGE_DDR_CONFIG_DONE  (513)	/* DDR configuration done. */
+#define POST_STAGE_DDR_CALIBRATE_START  (768)	/* DDR calibration start. */
+#define POST_STAGE_DDR_CALIBRATE_DONE   (769)	/* DDR calibration done. */
+#define POST_STAGE_DDR_TEST_START   (1024)	/* DDR memory test start. */
+#define POST_STAGE_DDR_TEST_DONE    (1025)	/* DDR memory test done. */
+#define POST_STAGE_REDBOOT_INIT_START   (1536)	/* Redboot starts execution. */
+#define POST_STAGE_REDBOOT_INIT_DONE (1537)	/* Redboot done execution. */
+#define POST_STAGE_FW_IMAGE_LOAD_START (1792)	/* Firmware image load to
+							DDR start. */
+#define POST_STAGE_FW_IMAGE_LOAD_DONE   (1793)	/* Firmware image load
+							to DDR done. */
+#define POST_STAGE_ARMFW_START          (2048)	/* ARMfw runtime code
+						starts execution. */
+#define POST_STAGE_DHCP_QUERY_START     (2304)	/* DHCP server query start. */
+#define POST_STAGE_DHCP_QUERY_DONE      (2305)	/* DHCP server query done. */
+#define POST_STAGE_BOOT_TARGET_DISCOVERY_START (2560)	/* Boot Target
+						Discovery Start. */
+#define POST_STAGE_BOOT_TARGET_DISCOVERY_DONE (2561)	/* Boot Target
+						Discovery Done. */
+#define POST_STAGE_RC_OPTION_SET        (2816)	/* Remote configuration
+						option is set in  SEEPROM  */
+#define POST_STAGE_SWITCH_LINK          (2817)	/* Wait for link up on switch */
+#define POST_STAGE_SEND_ICDS_MESSAGE    (2818)	/* Send the ICDS message
+						to switch */
+#define POST_STAGE_PERFROM_TFTP         (2819)	/* Download xml using TFTP */
+#define POST_STAGE_PARSE_XML            (2820)	/* Parse XML file */
+#define POST_STAGE_DOWNLOAD_IMAGE       (2821)	/* Download IMAGE from
+						TFTP server */
+#define POST_STAGE_FLASH_IMAGE          (2822)	/* Flash the IMAGE */
+#define POST_STAGE_RC_DONE              (2823)	/* Remote configuration
+						complete */
+#define POST_STAGE_REBOOT_SYSTEM        (2824)	/* Upgrade IMAGE done,
+						reboot required */
+#define POST_STAGE_MAC_ADDRESS          (3072)	/* MAC Address Check */
+#define POST_STAGE_ARMFW_READY          (49152)	/* ARMfw is done with POST
+						and ready. */
+#define POST_STAGE_ARMFW_UE             (61440)	/* ARMfw has asserted an
+						unrecoverable error. The
+						lower 3 hex digits of the
+						stage code identify the
+						unique error code.
+						*/
+
+/* This structure defines the format of the MPU semaphore
+ * register when used for POST.
+ */
+struct BE_MGMT_HBA_POST_STATUS_STRUCT_AMAP {
+	u8 stage[16];	/* DWORD 0 */
+	u8 rsvd0[10];	/* DWORD 0 */
+	u8 iscsi_driver_loaded;	/* DWORD 0 */
+	u8 option_rom_installed;	/* DWORD 0 */
+	u8 iscsi_ip_conflict;	/* DWORD 0 */
+	u8 iscsi_no_ip;	/* DWORD 0 */
+	u8 backup_fw;	/* DWORD 0 */
+	u8 error;		/* DWORD 0 */
+} __packed;
+struct MGMT_HBA_POST_STATUS_STRUCT_AMAP {
+	u32 dw[1];
+};
+
+/* --- MGMT_HBA_POST_DUMMY_BITS_ENUM --- */
+#define POST_BIT_ISCSI_LOADED           (26)
+#define POST_BIT_OPTROM_INST            (27)
+#define POST_BIT_BAD_IP_ADDR            (28)
+#define POST_BIT_NO_IP_ADDR             (29)
+#define POST_BIT_BACKUP_FW              (30)
+#define POST_BIT_ERROR                  (31)
+
+/* --- MGMT_HBA_POST_DUMMY_VALUES_ENUM --- */
+#define POST_ISCSI_DRIVER_LOADED        (67108864)
+#define POST_OPTROM_INSTALLED           (134217728)
+#define POST_ISCSI_IP_ADDRESS_CONFLICT  (268435456)
+#define POST_ISCSI_NO_IP_ADDRESS        (536870912)
+#define POST_BACKUP_FW_LOADED           (1073741824)
+#define POST_FATAL_ERROR                (2147483648)
+
+#endif /* __post_codes_amap_h__ */

+ 68 - 0
drivers/staging/benet/regmap.h

@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __regmap_amap_h__
+#define __regmap_amap_h__
+#include "pcicfg.h"
+#include "ep.h"
+#include "cev.h"
+#include "mpu.h"
+#include "doorbells.h"
+
+/*
+ * This is the control and status register map for BladeEngine, showing
+ * the relative size and offset of each sub-module. The CSR registers
+ * are identical for the network and storage PCI functions. The
+ * CSR map is shown below, followed by details of each block,
+ * in sub-sections.  The sub-sections begin with a description
+ * of CSRs that are instantiated in multiple blocks.
+ */
+struct BE_BLADE_ENGINE_CSRMAP_AMAP {
+	struct BE_MPU_CSRMAP_AMAP mpu;
+	u8 rsvd0[8192];	/* DWORD 256 */
+	u8 rsvd1[8192];	/* DWORD 512 */
+	struct BE_CEV_CSRMAP_AMAP cev;
+	u8 rsvd2[8192];	/* DWORD 1024 */
+	u8 rsvd3[8192];	/* DWORD 1280 */
+	u8 rsvd4[8192];	/* DWORD 1536 */
+	u8 rsvd5[8192];	/* DWORD 1792 */
+	u8 rsvd6[8192];	/* DWORD 2048 */
+	u8 rsvd7[8192];	/* DWORD 2304 */
+	u8 rsvd8[8192];	/* DWORD 2560 */
+	u8 rsvd9[8192];	/* DWORD 2816 */
+	u8 rsvd10[8192];	/* DWORD 3072 */
+	u8 rsvd11[8192];	/* DWORD 3328 */
+	u8 rsvd12[8192];	/* DWORD 3584 */
+	u8 rsvd13[8192];	/* DWORD 3840 */
+	u8 rsvd14[8192];	/* DWORD 4096 */
+	u8 rsvd15[8192];	/* DWORD 4352 */
+	u8 rsvd16[8192];	/* DWORD 4608 */
+	u8 rsvd17[8192];	/* DWORD 4864 */
+	u8 rsvd18[8192];	/* DWORD 5120 */
+	u8 rsvd19[8192];	/* DWORD 5376 */
+	u8 rsvd20[8192];	/* DWORD 5632 */
+	u8 rsvd21[8192];	/* DWORD 5888 */
+	u8 rsvd22[8192];	/* DWORD 6144 */
+	u8 rsvd23[17152][32];	/* DWORD 6400 */
+} __packed;
+struct BLADE_ENGINE_CSRMAP_AMAP {
+	u32 dw[23552];
+};
+
+#endif /* __regmap_amap_h__ */

+ 27 - 0
drivers/staging/comedi/Kconfig

@@ -0,0 +1,27 @@
+config COMEDI
+	tristate "Data Acquision support (comedi)"
+	default N
+	---help---
+	  Enable support a wide range of data acquision devices
+	  for Linux.
+
+config COMEDI_RT
+	tristate "Comedi Real-time support"
+	depends on COMEDI && RT
+	default N
+	---help---
+	  Enable Real time support for the Comedi core.
+
+config COMEDI_PCI_DRIVERS
+	tristate "Comedi PCI drivers"
+	depends on COMEDI && PCI
+	default N
+	---help---
+	  Enable lots of comedi PCI drivers to be built
+
+config COMEDI_USB_DRIVERS
+	tristate "Comedi USB drivers"
+	depends on COMEDI && USB
+	default N
+	---help---
+	  Enable lots of comedi USB drivers to be built

+ 17 - 0
drivers/staging/comedi/Makefile

@@ -0,0 +1,17 @@
+obj-$(CONFIG_COMEDI) += comedi.o
+obj-$(CONFIG_COMEDI_RT) += comedi_rt.o
+
+obj-$(CONFIG_COMEDI)	+= kcomedilib/
+obj-$(CONFIG_COMEDI)	+= drivers/
+
+comedi-objs :=		\
+	comedi_fops.o	\
+	proc.o		\
+	range.o		\
+	drivers.o	\
+	comedi_compat32.o \
+	comedi_ksyms.o	\
+
+comedi_rt-objs :=	\
+	rt_pend_tq.o	\
+	rt.o

+ 14 - 0
drivers/staging/comedi/TODO

@@ -0,0 +1,14 @@
+TODO:
+	- checkpatch.pl cleanups
+	- Lindent
+	- remove all wrappers
+	- remove typedefs
+	- audit userspace interface
+	- reserve major number
+	- cleanup the individual comedi drivers as well
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+copy:
+	Ian Abbott <abbotti@mev.co.uk>
+	Frank Mori Hess <fmhess@users.sourceforge.net>
+	David Schleef <ds@schleef.org>

+ 916 - 0
drivers/staging/comedi/comedi.h

@@ -0,0 +1,916 @@
+/*
+    include/comedi.h (installed as /usr/include/comedi.h)
+    header file for comedi
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU Lesser General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef _COMEDI_H
+#define _COMEDI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define COMEDI_MAJORVERSION	0
+#define COMEDI_MINORVERSION	7
+#define COMEDI_MICROVERSION	76
+#define VERSION	"0.7.76"
+
+/* comedi's major device number */
+#define COMEDI_MAJOR 98
+
+/*
+   maximum number of minor devices.  This can be increased, although
+   kernel structures are currently statically allocated, thus you
+   don't want this to be much more than you actually use.
+ */
+#define COMEDI_NDEVICES 16
+
+/* number of config options in the config structure */
+#define COMEDI_NDEVCONFOPTS 32
+/*length of nth chunk of firmware data*/
+#define COMEDI_DEVCONF_AUX_DATA3_LENGTH		25
+#define COMEDI_DEVCONF_AUX_DATA2_LENGTH		26
+#define COMEDI_DEVCONF_AUX_DATA1_LENGTH		27
+#define COMEDI_DEVCONF_AUX_DATA0_LENGTH		28
+#define COMEDI_DEVCONF_AUX_DATA_HI		29	/* most significant 32 bits of pointer address (if needed) */
+#define COMEDI_DEVCONF_AUX_DATA_LO		30	/* least significant 32 bits of pointer address */
+#define COMEDI_DEVCONF_AUX_DATA_LENGTH		31	/* total data length */
+
+/* max length of device and driver names */
+#define COMEDI_NAMELEN 20
+
+	typedef unsigned int lsampl_t;
+	typedef unsigned short sampl_t;
+
+/* packs and unpacks a channel/range number */
+
+#define CR_PACK(chan, rng, aref)		((((aref)&0x3)<<24) | (((rng)&0xff)<<16) | (chan))
+#define CR_PACK_FLAGS(chan, range, aref, flags)	(CR_PACK(chan, range, aref) | ((flags) & CR_FLAGS_MASK))
+
+#define CR_CHAN(a)	((a)&0xffff)
+#define CR_RANGE(a)	(((a)>>16)&0xff)
+#define CR_AREF(a)	(((a)>>24)&0x03)
+
+#define CR_FLAGS_MASK	0xfc000000
+#define CR_ALT_FILTER	(1<<26)
+#define CR_DITHER	CR_ALT_FILTER
+#define CR_DEGLITCH	CR_ALT_FILTER
+#define CR_ALT_SOURCE	(1<<27)
+#define CR_EDGE		(1<<30)
+#define CR_INVERT	(1<<31)
+
+#define AREF_GROUND	0x00	/* analog ref = analog ground */
+#define AREF_COMMON	0x01	/* analog ref = analog common */
+#define AREF_DIFF	0x02	/* analog ref = differential */
+#define AREF_OTHER	0x03	/* analog ref = other (undefined) */
+
+/* counters -- these are arbitrary values */
+#define GPCT_RESET		0x0001
+#define GPCT_SET_SOURCE		0x0002
+#define GPCT_SET_GATE		0x0004
+#define GPCT_SET_DIRECTION	0x0008
+#define GPCT_SET_OPERATION	0x0010
+#define GPCT_ARM		0x0020
+#define GPCT_DISARM		0x0040
+#define GPCT_GET_INT_CLK_FRQ	0x0080
+
+#define GPCT_INT_CLOCK		0x0001
+#define GPCT_EXT_PIN		0x0002
+#define GPCT_NO_GATE		0x0004
+#define GPCT_UP			0x0008
+#define GPCT_DOWN		0x0010
+#define GPCT_HWUD		0x0020
+#define GPCT_SIMPLE_EVENT	0x0040
+#define GPCT_SINGLE_PERIOD	0x0080
+#define GPCT_SINGLE_PW		0x0100
+#define GPCT_CONT_PULSE_OUT	0x0200
+#define GPCT_SINGLE_PULSE_OUT	0x0400
+
+/* instructions */
+
+#define INSN_MASK_WRITE		0x8000000
+#define INSN_MASK_READ		0x4000000
+#define INSN_MASK_SPECIAL	0x2000000
+
+#define INSN_READ		(0 | INSN_MASK_READ)
+#define INSN_WRITE		(1 | INSN_MASK_WRITE)
+#define INSN_BITS		(2 | INSN_MASK_READ|INSN_MASK_WRITE)
+#define INSN_CONFIG		(3 | INSN_MASK_READ|INSN_MASK_WRITE)
+#define INSN_GTOD		(4 | INSN_MASK_READ|INSN_MASK_SPECIAL)
+#define INSN_WAIT		(5 | INSN_MASK_WRITE|INSN_MASK_SPECIAL)
+#define INSN_INTTRIG		(6 | INSN_MASK_WRITE|INSN_MASK_SPECIAL)
+
+/* trigger flags */
+/* These flags are used in comedi_trig structures */
+
+#define TRIG_BOGUS	0x0001	/* do the motions */
+#define TRIG_DITHER	0x0002	/* enable dithering */
+#define TRIG_DEGLITCH	0x0004	/* enable deglitching */
+/*#define TRIG_RT       0x0008 */	/* perform op in real time */
+#define TRIG_CONFIG	0x0010	/* perform configuration, not triggering */
+#define TRIG_WAKE_EOS	0x0020	/* wake up on end-of-scan events */
+/*#define TRIG_WRITE    0x0040*/	/* write to bidirectional devices */
+
+/* command flags */
+/* These flags are used in comedi_cmd structures */
+
+#define CMDF_PRIORITY		0x00000008	/* try to use a real-time interrupt while performing command */
+
+#define TRIG_RT		CMDF_PRIORITY	/* compatibility definition */
+
+#define CMDF_WRITE		0x00000040
+#define TRIG_WRITE	CMDF_WRITE	/* compatibility definition */
+
+#define CMDF_RAWDATA		0x00000080
+
+#define COMEDI_EV_START		0x00040000
+#define COMEDI_EV_SCAN_BEGIN	0x00080000
+#define COMEDI_EV_CONVERT	0x00100000
+#define COMEDI_EV_SCAN_END	0x00200000
+#define COMEDI_EV_STOP		0x00400000
+
+#define TRIG_ROUND_MASK		0x00030000
+#define TRIG_ROUND_NEAREST	0x00000000
+#define TRIG_ROUND_DOWN		0x00010000
+#define TRIG_ROUND_UP		0x00020000
+#define TRIG_ROUND_UP_NEXT	0x00030000
+
+/* trigger sources */
+
+#define TRIG_ANY	0xffffffff
+#define TRIG_INVALID	0x00000000
+
+#define TRIG_NONE	0x00000001	/* never trigger */
+#define TRIG_NOW	0x00000002	/* trigger now + N ns */
+#define TRIG_FOLLOW	0x00000004	/* trigger on next lower level trig */
+#define TRIG_TIME	0x00000008	/* trigger at time N ns */
+#define TRIG_TIMER	0x00000010	/* trigger at rate N ns */
+#define TRIG_COUNT	0x00000020	/* trigger when count reaches N */
+#define TRIG_EXT	0x00000040	/* trigger on external signal N */
+#define TRIG_INT	0x00000080	/* trigger on comedi-internal signal N */
+#define TRIG_OTHER	0x00000100	/* driver defined */
+
+/* subdevice flags */
+
+#define SDF_BUSY	0x0001	/* device is busy */
+#define SDF_BUSY_OWNER	0x0002	/* device is busy with your job */
+#define SDF_LOCKED	0x0004	/* subdevice is locked */
+#define SDF_LOCK_OWNER	0x0008	/* you own lock */
+#define SDF_MAXDATA	0x0010	/* maxdata depends on channel */
+#define SDF_FLAGS	0x0020	/* flags depend on channel */
+#define SDF_RANGETYPE	0x0040	/* range type depends on channel */
+#define SDF_MODE0	0x0080	/* can do mode 0 */
+#define SDF_MODE1	0x0100	/* can do mode 1 */
+#define SDF_MODE2	0x0200	/* can do mode 2 */
+#define SDF_MODE3	0x0400	/* can do mode 3 */
+#define SDF_MODE4	0x0800	/* can do mode 4 */
+#define SDF_CMD		0x1000	/* can do commands (deprecated) */
+#define SDF_SOFT_CALIBRATED	0x2000	/* subdevice uses software calibration */
+#define SDF_CMD_WRITE		0x4000	/* can do output commands */
+#define SDF_CMD_READ		0x8000	/* can do input commands */
+
+#define SDF_READABLE	0x00010000	/* subdevice can be read (e.g. analog input) */
+#define SDF_WRITABLE	0x00020000	/* subdevice can be written (e.g. analog output) */
+#define SDF_WRITEABLE	SDF_WRITABLE	/* spelling error in API */
+#define SDF_INTERNAL	0x00040000	/* subdevice does not have externally visible lines */
+#define SDF_RT		0x00080000	/* DEPRECATED: subdevice is RT capable */
+#define SDF_GROUND	0x00100000	/* can do aref=ground */
+#define SDF_COMMON	0x00200000	/* can do aref=common */
+#define SDF_DIFF	0x00400000	/* can do aref=diff */
+#define SDF_OTHER	0x00800000	/* can do aref=other */
+#define SDF_DITHER	0x01000000	/* can do dithering */
+#define SDF_DEGLITCH	0x02000000	/* can do deglitching */
+#define SDF_MMAP	0x04000000	/* can do mmap() */
+#define SDF_RUNNING	0x08000000	/* subdevice is acquiring data */
+#define SDF_LSAMPL	0x10000000	/* subdevice uses 32-bit samples */
+#define SDF_PACKED	0x20000000	/* subdevice can do packed DIO */
+/* re recyle these flags for PWM */
+#define SDF_PWM_COUNTER SDF_MODE0       /* PWM can automatically switch off */
+#define SDF_PWM_HBRIDGE SDF_MODE1       /* PWM is signed (H-bridge) */
+
+
+
+/* subdevice types */
+
+enum comedi_subdevice_type {
+	COMEDI_SUBD_UNUSED,	/* unused by driver */
+	COMEDI_SUBD_AI,	/* analog input */
+	COMEDI_SUBD_AO,	/* analog output */
+	COMEDI_SUBD_DI,	/* digital input */
+	COMEDI_SUBD_DO,	/* digital output */
+	COMEDI_SUBD_DIO,	/* digital input/output */
+	COMEDI_SUBD_COUNTER,	/* counter */
+	COMEDI_SUBD_TIMER,	/* timer */
+	COMEDI_SUBD_MEMORY,	/* memory, EEPROM, DPRAM */
+	COMEDI_SUBD_CALIB,	/* calibration DACs */
+	COMEDI_SUBD_PROC,	/* processor, DSP */
+	COMEDI_SUBD_SERIAL,	/* serial IO */
+	COMEDI_SUBD_PWM         /* PWM */
+};
+
+/* configuration instructions */
+
+enum configuration_ids {
+	INSN_CONFIG_DIO_INPUT = 0,
+	INSN_CONFIG_DIO_OUTPUT = 1,
+	INSN_CONFIG_DIO_OPENDRAIN = 2,
+	INSN_CONFIG_ANALOG_TRIG = 16,
+/*	INSN_CONFIG_WAVEFORM = 17, */
+/*	INSN_CONFIG_TRIG = 18, */
+/*	INSN_CONFIG_COUNTER = 19, */
+	INSN_CONFIG_ALT_SOURCE = 20,
+	INSN_CONFIG_DIGITAL_TRIG = 21,
+	INSN_CONFIG_BLOCK_SIZE = 22,
+	INSN_CONFIG_TIMER_1 = 23,
+	INSN_CONFIG_FILTER = 24,
+	INSN_CONFIG_CHANGE_NOTIFY = 25,
+
+	 /*ALPHA*/ INSN_CONFIG_SERIAL_CLOCK = 26,
+	INSN_CONFIG_BIDIRECTIONAL_DATA = 27,
+	INSN_CONFIG_DIO_QUERY = 28,
+	INSN_CONFIG_PWM_OUTPUT = 29,
+	INSN_CONFIG_GET_PWM_OUTPUT = 30,
+	INSN_CONFIG_ARM = 31,
+	INSN_CONFIG_DISARM = 32,
+	INSN_CONFIG_GET_COUNTER_STATUS = 33,
+	INSN_CONFIG_RESET = 34,
+	INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001,	/* Use CTR as single pulsegenerator */
+	INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002,	/* Use CTR as pulsetraingenerator */
+	INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003,	/* Use the counter as encoder */
+	INSN_CONFIG_SET_GATE_SRC = 2001,	/* Set gate source */
+	INSN_CONFIG_GET_GATE_SRC = 2002,	/* Get gate source */
+	INSN_CONFIG_SET_CLOCK_SRC = 2003,	/* Set master clock source */
+	INSN_CONFIG_GET_CLOCK_SRC = 2004,	/* Get master clock source */
+	INSN_CONFIG_SET_OTHER_SRC = 2005,	/* Set other source */
+/*	INSN_CONFIG_GET_OTHER_SRC = 2006,*/	/* Get other source */
+	INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE,	/* Get size in bytes of
+						  subdevice's on-board fifos
+						  used during streaming
+						  input/output */
+	INSN_CONFIG_SET_COUNTER_MODE = 4097,
+	INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE,	/* deprecated */
+	INSN_CONFIG_8254_READ_STATUS = 4098,
+	INSN_CONFIG_SET_ROUTING = 4099,
+	INSN_CONFIG_GET_ROUTING = 4109,
+/* PWM */
+	INSN_CONFIG_PWM_SET_PERIOD = 5000,   /* sets frequency */
+	INSN_CONFIG_PWM_GET_PERIOD = 5001,   /* gets frequency */
+	INSN_CONFIG_GET_PWM_STATUS = 5002,          /* is it running? */
+	INSN_CONFIG_PWM_SET_H_BRIDGE = 5003, /* sets H bridge: duty cycle and sign bit for a relay  at the same time*/
+	INSN_CONFIG_PWM_GET_H_BRIDGE = 5004  /* gets H bridge data: duty cycle and the sign bit */
+};
+
+enum comedi_io_direction {
+	COMEDI_INPUT = 0,
+	COMEDI_OUTPUT = 1,
+	COMEDI_OPENDRAIN = 2
+};
+
+enum comedi_support_level {
+	COMEDI_UNKNOWN_SUPPORT = 0,
+	COMEDI_SUPPORTED,
+	COMEDI_UNSUPPORTED
+};
+
+/* ioctls */
+
+#define CIO 'd'
+#define COMEDI_DEVCONFIG _IOW(CIO, 0, comedi_devconfig)
+#define COMEDI_DEVINFO _IOR(CIO, 1, comedi_devinfo)
+#define COMEDI_SUBDINFO _IOR(CIO, 2, comedi_subdinfo)
+#define COMEDI_CHANINFO _IOR(CIO, 3, comedi_chaninfo)
+#define COMEDI_TRIG _IOWR(CIO, 4, comedi_trig)
+#define COMEDI_LOCK _IO(CIO, 5)
+#define COMEDI_UNLOCK _IO(CIO, 6)
+#define COMEDI_CANCEL _IO(CIO, 7)
+#define COMEDI_RANGEINFO _IOR(CIO, 8, comedi_rangeinfo)
+#define COMEDI_CMD _IOR(CIO, 9, comedi_cmd)
+#define COMEDI_CMDTEST _IOR(CIO, 10, comedi_cmd)
+#define COMEDI_INSNLIST _IOR(CIO, 11, comedi_insnlist)
+#define COMEDI_INSN _IOR(CIO, 12, comedi_insn)
+#define COMEDI_BUFCONFIG _IOR(CIO, 13, comedi_bufconfig)
+#define COMEDI_BUFINFO _IOWR(CIO, 14, comedi_bufinfo)
+#define COMEDI_POLL _IO(CIO, 15)
+
+/* structures */
+
+typedef struct comedi_trig_struct comedi_trig;
+typedef struct comedi_cmd_struct comedi_cmd;
+typedef struct comedi_insn_struct comedi_insn;
+typedef struct comedi_insnlist_struct comedi_insnlist;
+typedef struct comedi_chaninfo_struct comedi_chaninfo;
+typedef struct comedi_subdinfo_struct comedi_subdinfo;
+typedef struct comedi_devinfo_struct comedi_devinfo;
+typedef struct comedi_devconfig_struct comedi_devconfig;
+typedef struct comedi_rangeinfo_struct comedi_rangeinfo;
+typedef struct comedi_krange_struct comedi_krange;
+typedef struct comedi_bufconfig_struct comedi_bufconfig;
+typedef struct comedi_bufinfo_struct comedi_bufinfo;
+
+struct comedi_trig_struct {
+	unsigned int subdev;	/* subdevice */
+	unsigned int mode;	/* mode */
+	unsigned int flags;
+	unsigned int n_chan;	/* number of channels */
+	unsigned int *chanlist;	/* channel/range list */
+	sampl_t *data;	/* data list, size depends on subd flags */
+	unsigned int n;	/* number of scans */
+	unsigned int trigsrc;
+	unsigned int trigvar;
+	unsigned int trigvar1;
+	unsigned int data_len;
+	unsigned int unused[3];
+};
+
+struct comedi_insn_struct {
+	unsigned int insn;
+	unsigned int n;
+	lsampl_t *data;
+	unsigned int subdev;
+	unsigned int chanspec;
+	unsigned int unused[3];
+};
+
+struct comedi_insnlist_struct {
+	unsigned int n_insns;
+	comedi_insn *insns;
+};
+
+struct comedi_cmd_struct {
+	unsigned int subdev;
+	unsigned int flags;
+
+	unsigned int start_src;
+	unsigned int start_arg;
+
+	unsigned int scan_begin_src;
+	unsigned int scan_begin_arg;
+
+	unsigned int convert_src;
+	unsigned int convert_arg;
+
+	unsigned int scan_end_src;
+	unsigned int scan_end_arg;
+
+	unsigned int stop_src;
+	unsigned int stop_arg;
+
+	unsigned int *chanlist;	/* channel/range list */
+	unsigned int chanlist_len;
+
+	sampl_t *data;	/* data list, size depends on subd flags */
+	unsigned int data_len;
+};
+
+struct comedi_chaninfo_struct {
+	unsigned int subdev;
+	lsampl_t *maxdata_list;
+	unsigned int *flaglist;
+	unsigned int *rangelist;
+	unsigned int unused[4];
+};
+
+struct comedi_rangeinfo_struct {
+	unsigned int range_type;
+	void *range_ptr;
+};
+
+struct comedi_krange_struct {
+	int min;	/* fixed point, multiply by 1e-6 */
+	int max;	/* fixed point, multiply by 1e-6 */
+	unsigned int flags;
+};
+
+
+struct comedi_subdinfo_struct {
+	unsigned int type;
+	unsigned int n_chan;
+	unsigned int subd_flags;
+	unsigned int timer_type;
+	unsigned int len_chanlist;
+	lsampl_t maxdata;
+	unsigned int flags;	/* channel flags */
+	unsigned int range_type;	/* lookup in kernel */
+	unsigned int settling_time_0;
+	unsigned insn_bits_support;	/* see support_level enum for values*/
+	unsigned int unused[8];
+};
+
+struct comedi_devinfo_struct {
+	unsigned int version_code;
+	unsigned int n_subdevs;
+	char driver_name[COMEDI_NAMELEN];
+	char board_name[COMEDI_NAMELEN];
+	int read_subdevice;
+	int write_subdevice;
+	int unused[30];
+};
+
+struct comedi_devconfig_struct {
+	char board_name[COMEDI_NAMELEN];
+	int options[COMEDI_NDEVCONFOPTS];
+};
+
+struct comedi_bufconfig_struct {
+	unsigned int subdevice;
+	unsigned int flags;
+
+	unsigned int maximum_size;
+	unsigned int size;
+
+	unsigned int unused[4];
+};
+
+struct comedi_bufinfo_struct {
+	unsigned int subdevice;
+	unsigned int bytes_read;
+
+	unsigned int buf_write_ptr;
+	unsigned int buf_read_ptr;
+	unsigned int buf_write_count;
+	unsigned int buf_read_count;
+
+	unsigned int bytes_written;
+
+	unsigned int unused[4];
+};
+
+/* range stuff */
+
+#define __RANGE(a, b)	((((a)&0xffff)<<16)|((b)&0xffff))
+
+#define RANGE_OFFSET(a)		(((a)>>16)&0xffff)
+#define RANGE_LENGTH(b)		((b)&0xffff)
+
+#define RF_UNIT(flags)		((flags)&0xff)
+#define RF_EXTERNAL		(1<<8)
+
+#define UNIT_volt		0
+#define UNIT_mA			1
+#define UNIT_none		2
+
+#define COMEDI_MIN_SPEED	((unsigned int)0xffffffff)
+
+/* callback stuff */
+/* only relevant to kernel modules. */
+
+#define COMEDI_CB_EOS		1	/* end of scan */
+#define COMEDI_CB_EOA		2	/* end of acquisition */
+#define COMEDI_CB_BLOCK		4	/* DEPRECATED: convenient block size */
+#define COMEDI_CB_EOBUF		8	/* DEPRECATED: end of buffer */
+#define COMEDI_CB_ERROR		16	/* card error during acquisition */
+#define COMEDI_CB_OVERFLOW	32	/* buffer overflow/underflow */
+
+/**********************************************************/
+/* everything after this line is ALPHA */
+/**********************************************************/
+
+/*
+  8254 specific configuration.
+
+  It supports two config commands:
+
+  0 ID: INSN_CONFIG_SET_COUNTER_MODE
+  1 8254 Mode
+    I8254_MODE0, I8254_MODE1, ..., I8254_MODE5
+    OR'ed with:
+    I8254_BCD, I8254_BINARY
+
+  0 ID: INSN_CONFIG_8254_READ_STATUS
+  1 <-- Status byte returned here.
+    B7 = Output
+    B6 = NULL Count
+    B5 - B0 Current mode.
+
+*/
+
+enum i8254_mode {
+	I8254_MODE0 = (0 << 1),	/* Interrupt on terminal count */
+	I8254_MODE1 = (1 << 1),	/* Hardware retriggerable one-shot */
+	I8254_MODE2 = (2 << 1),	/* Rate generator */
+	I8254_MODE3 = (3 << 1),	/* Square wave mode */
+	I8254_MODE4 = (4 << 1),	/* Software triggered strobe */
+	I8254_MODE5 = (5 << 1),	/* Hardware triggered strobe (retriggerable) */
+	I8254_BCD = 1,	/* use binary-coded decimal instead of binary (pretty useless) */
+	I8254_BINARY = 0
+};
+
+static inline unsigned NI_USUAL_PFI_SELECT(unsigned pfi_channel)
+{
+	if (pfi_channel < 10)
+		return 0x1 + pfi_channel;
+	else
+		return 0xb + pfi_channel;
+}
+static inline unsigned NI_USUAL_RTSI_SELECT(unsigned rtsi_channel)
+{
+	if (rtsi_channel < 7)
+		return 0xb + rtsi_channel;
+	else
+		return 0x1b;
+}
+/* mode bits for NI general-purpose counters, set with
+ * INSN_CONFIG_SET_COUNTER_MODE */
+#define NI_GPCT_COUNTING_MODE_SHIFT 16
+#define NI_GPCT_INDEX_PHASE_BITSHIFT 20
+#define NI_GPCT_COUNTING_DIRECTION_SHIFT 24
+enum ni_gpct_mode_bits {
+	NI_GPCT_GATE_ON_BOTH_EDGES_BIT = 0x4,
+	NI_GPCT_EDGE_GATE_MODE_MASK = 0x18,
+	NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS = 0x0,
+	NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS = 0x8,
+	NI_GPCT_EDGE_GATE_STARTS_BITS = 0x10,
+	NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS = 0x18,
+	NI_GPCT_STOP_MODE_MASK = 0x60,
+	NI_GPCT_STOP_ON_GATE_BITS = 0x00,
+	NI_GPCT_STOP_ON_GATE_OR_TC_BITS = 0x20,
+	NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS = 0x40,
+	NI_GPCT_LOAD_B_SELECT_BIT = 0x80,
+	NI_GPCT_OUTPUT_MODE_MASK = 0x300,
+	NI_GPCT_OUTPUT_TC_PULSE_BITS = 0x100,
+	NI_GPCT_OUTPUT_TC_TOGGLE_BITS = 0x200,
+	NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS = 0x300,
+	NI_GPCT_HARDWARE_DISARM_MASK = 0xc00,
+	NI_GPCT_NO_HARDWARE_DISARM_BITS = 0x000,
+	NI_GPCT_DISARM_AT_TC_BITS = 0x400,
+	NI_GPCT_DISARM_AT_GATE_BITS = 0x800,
+	NI_GPCT_DISARM_AT_TC_OR_GATE_BITS = 0xc00,
+	NI_GPCT_LOADING_ON_TC_BIT = 0x1000,
+	NI_GPCT_LOADING_ON_GATE_BIT = 0x4000,
+	NI_GPCT_COUNTING_MODE_MASK = 0x7 << NI_GPCT_COUNTING_MODE_SHIFT,
+	NI_GPCT_COUNTING_MODE_NORMAL_BITS =
+		0x0 << NI_GPCT_COUNTING_MODE_SHIFT,
+	NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS =
+		0x1 << NI_GPCT_COUNTING_MODE_SHIFT,
+	NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS =
+		0x2 << NI_GPCT_COUNTING_MODE_SHIFT,
+	NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS =
+		0x3 << NI_GPCT_COUNTING_MODE_SHIFT,
+	NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS =
+		0x4 << NI_GPCT_COUNTING_MODE_SHIFT,
+	NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS =
+		0x6 << NI_GPCT_COUNTING_MODE_SHIFT,
+	NI_GPCT_INDEX_PHASE_MASK = 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+	NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS =
+		0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+	NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS =
+		0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+	NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS =
+		0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+	NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS =
+		0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
+	NI_GPCT_INDEX_ENABLE_BIT = 0x400000,
+	NI_GPCT_COUNTING_DIRECTION_MASK =
+		0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+	NI_GPCT_COUNTING_DIRECTION_DOWN_BITS =
+		0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+	NI_GPCT_COUNTING_DIRECTION_UP_BITS =
+		0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+	NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS =
+		0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+	NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS =
+		0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
+	NI_GPCT_RELOAD_SOURCE_MASK = 0xc000000,
+	NI_GPCT_RELOAD_SOURCE_FIXED_BITS = 0x0,
+	NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS = 0x4000000,
+	NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS = 0x8000000,
+	NI_GPCT_OR_GATE_BIT = 0x10000000,
+	NI_GPCT_INVERT_OUTPUT_BIT = 0x20000000
+};
+
+/* Bits for setting a clock source with
+ * INSN_CONFIG_SET_CLOCK_SRC when using NI general-purpose counters. */
+enum ni_gpct_clock_source_bits {
+	NI_GPCT_CLOCK_SRC_SELECT_MASK = 0x3f,
+	NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS = 0x0,
+	NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS = 0x1,
+	NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS = 0x2,
+	NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS = 0x3,
+	NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS = 0x4,
+	NI_GPCT_NEXT_TC_CLOCK_SRC_BITS = 0x5,
+	NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS = 0x6,	/* NI 660x-specific */
+	NI_GPCT_PXI10_CLOCK_SRC_BITS = 0x7,
+	NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS = 0x8,
+	NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS = 0x9,
+	NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK = 0x30000000,
+	NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS = 0x0,
+	NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS = 0x10000000, /* divide source by 2 */
+	NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS = 0x20000000, /* divide source by 8 */
+	NI_GPCT_INVERT_CLOCK_SRC_BIT = 0x80000000
+};
+static inline unsigned NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(unsigned n)
+{
+	/* NI 660x-specific */
+	return 0x10 + n;
+}
+static inline unsigned NI_GPCT_RTSI_CLOCK_SRC_BITS(unsigned n)
+{
+	return 0x18 + n;
+}
+static inline unsigned NI_GPCT_PFI_CLOCK_SRC_BITS(unsigned n)
+{
+	/* no pfi on NI 660x */
+	return 0x20 + n;
+}
+
+/* Possibilities for setting a gate source with
+INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters.
+May be bitwise-or'd with CR_EDGE or CR_INVERT. */
+enum ni_gpct_gate_select {
+	/* m-series gates */
+	NI_GPCT_TIMESTAMP_MUX_GATE_SELECT = 0x0,
+	NI_GPCT_AI_START2_GATE_SELECT = 0x12,
+	NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT = 0x13,
+	NI_GPCT_NEXT_OUT_GATE_SELECT = 0x14,
+	NI_GPCT_AI_START1_GATE_SELECT = 0x1c,
+	NI_GPCT_NEXT_SOURCE_GATE_SELECT = 0x1d,
+	NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT = 0x1e,
+	NI_GPCT_LOGIC_LOW_GATE_SELECT = 0x1f,
+	/* more gates for 660x */
+	NI_GPCT_SOURCE_PIN_i_GATE_SELECT = 0x100,
+	NI_GPCT_GATE_PIN_i_GATE_SELECT = 0x101,
+	/* more gates for 660x "second gate" */
+	NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT = 0x201,
+	NI_GPCT_SELECTED_GATE_GATE_SELECT = 0x21e,
+	/* m-series "second gate" sources are unknown,
+	   we should add them here with an offset of 0x300 when known. */
+	NI_GPCT_DISABLED_GATE_SELECT = 0x8000,
+};
+static inline unsigned NI_GPCT_GATE_PIN_GATE_SELECT(unsigned n)
+{
+	return 0x102 + n;
+}
+static inline unsigned NI_GPCT_RTSI_GATE_SELECT(unsigned n)
+{
+	return NI_USUAL_RTSI_SELECT(n);
+}
+static inline unsigned NI_GPCT_PFI_GATE_SELECT(unsigned n)
+{
+	return NI_USUAL_PFI_SELECT(n);
+}
+static inline unsigned NI_GPCT_UP_DOWN_PIN_GATE_SELECT(unsigned n)
+{
+	return 0x202 + n;
+}
+
+/* Possibilities for setting a source with
+INSN_CONFIG_SET_OTHER_SRC when using NI general-purpose counters. */
+enum ni_gpct_other_index {
+	NI_GPCT_SOURCE_ENCODER_A,
+	NI_GPCT_SOURCE_ENCODER_B,
+	NI_GPCT_SOURCE_ENCODER_Z
+};
+enum ni_gpct_other_select {
+	/* m-series gates */
+	/* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */
+	NI_GPCT_DISABLED_OTHER_SELECT = 0x8000,
+};
+static inline unsigned NI_GPCT_PFI_OTHER_SELECT(unsigned n)
+{
+	return NI_USUAL_PFI_SELECT(n);
+}
+
+/* start sources for ni general-purpose counters for use with
+INSN_CONFIG_ARM */
+enum ni_gpct_arm_source {
+	NI_GPCT_ARM_IMMEDIATE = 0x0,
+	NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1,	/* Start both the counter and
+						   the adjacent paired counter
+						   simultaneously */
+	/* NI doesn't document bits for selecting hardware arm triggers.  If
+	 * the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least
+	 * significant bits (3 bits for 660x or 5 bits for m-series) through to
+	 * the hardware.  This will at least allow someone to figure out what
+	 * the bits do later. */
+	NI_GPCT_ARM_UNKNOWN = 0x1000,
+};
+
+/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */
+enum ni_gpct_filter_select {
+	NI_GPCT_FILTER_OFF = 0x0,
+	NI_GPCT_FILTER_TIMEBASE_3_SYNC = 0x1,
+	NI_GPCT_FILTER_100x_TIMEBASE_1 = 0x2,
+	NI_GPCT_FILTER_20x_TIMEBASE_1 = 0x3,
+	NI_GPCT_FILTER_10x_TIMEBASE_1 = 0x4,
+	NI_GPCT_FILTER_2x_TIMEBASE_1 = 0x5,
+	NI_GPCT_FILTER_2x_TIMEBASE_3 = 0x6
+};
+
+/* PFI digital filtering options for ni m-series for use with
+ * INSN_CONFIG_FILTER. */
+enum ni_pfi_filter_select {
+	NI_PFI_FILTER_OFF = 0x0,
+	NI_PFI_FILTER_125ns = 0x1,
+	NI_PFI_FILTER_6425ns = 0x2,
+	NI_PFI_FILTER_2550us = 0x3
+};
+
+/* master clock sources for ni mio boards and INSN_CONFIG_SET_CLOCK_SRC */
+enum ni_mio_clock_source {
+	NI_MIO_INTERNAL_CLOCK = 0,
+	NI_MIO_RTSI_CLOCK = 1,	/* doesn't work for m-series, use
+				   NI_MIO_PLL_RTSI_CLOCK() */
+	/* the NI_MIO_PLL_* sources are m-series only */
+	NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK = 2,
+	NI_MIO_PLL_PXI10_CLOCK = 3,
+	NI_MIO_PLL_RTSI0_CLOCK = 4
+};
+static inline unsigned NI_MIO_PLL_RTSI_CLOCK(unsigned rtsi_channel)
+{
+	return NI_MIO_PLL_RTSI0_CLOCK + rtsi_channel;
+}
+
+/* Signals which can be routed to an NI RTSI pin with INSN_CONFIG_SET_ROUTING.
+ The numbers assigned are not arbitrary, they correspond to the bits required
+ to program the board. */
+enum ni_rtsi_routing {
+	NI_RTSI_OUTPUT_ADR_START1 = 0,
+	NI_RTSI_OUTPUT_ADR_START2 = 1,
+	NI_RTSI_OUTPUT_SCLKG = 2,
+	NI_RTSI_OUTPUT_DACUPDN = 3,
+	NI_RTSI_OUTPUT_DA_START1 = 4,
+	NI_RTSI_OUTPUT_G_SRC0 = 5,
+	NI_RTSI_OUTPUT_G_GATE0 = 6,
+	NI_RTSI_OUTPUT_RGOUT0 = 7,
+	NI_RTSI_OUTPUT_RTSI_BRD_0 = 8,
+	NI_RTSI_OUTPUT_RTSI_OSC = 12	/* pre-m-series always have RTSI clock
+					   on line 7 */
+};
+static inline unsigned NI_RTSI_OUTPUT_RTSI_BRD(unsigned n)
+{
+	return NI_RTSI_OUTPUT_RTSI_BRD_0 + n;
+}
+
+/* Signals which can be routed to an NI PFI pin on an m-series board with
+ * INSN_CONFIG_SET_ROUTING.  These numbers are also returned by
+ * INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their routing
+ * cannot be changed.  The numbers assigned are not arbitrary, they correspond
+ * to the bits required to program the board. */
+enum ni_pfi_routing {
+	NI_PFI_OUTPUT_PFI_DEFAULT = 0,
+	NI_PFI_OUTPUT_AI_START1 = 1,
+	NI_PFI_OUTPUT_AI_START2 = 2,
+	NI_PFI_OUTPUT_AI_CONVERT = 3,
+	NI_PFI_OUTPUT_G_SRC1 = 4,
+	NI_PFI_OUTPUT_G_GATE1 = 5,
+	NI_PFI_OUTPUT_AO_UPDATE_N = 6,
+	NI_PFI_OUTPUT_AO_START1 = 7,
+	NI_PFI_OUTPUT_AI_START_PULSE = 8,
+	NI_PFI_OUTPUT_G_SRC0 = 9,
+	NI_PFI_OUTPUT_G_GATE0 = 10,
+	NI_PFI_OUTPUT_EXT_STROBE = 11,
+	NI_PFI_OUTPUT_AI_EXT_MUX_CLK = 12,
+	NI_PFI_OUTPUT_GOUT0 = 13,
+	NI_PFI_OUTPUT_GOUT1 = 14,
+	NI_PFI_OUTPUT_FREQ_OUT = 15,
+	NI_PFI_OUTPUT_PFI_DO = 16,
+	NI_PFI_OUTPUT_I_ATRIG = 17,
+	NI_PFI_OUTPUT_RTSI0 = 18,
+	NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN = 26,
+	NI_PFI_OUTPUT_SCXI_TRIG1 = 27,
+	NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI = 28,
+	NI_PFI_OUTPUT_CDI_SAMPLE = 29,
+	NI_PFI_OUTPUT_CDO_UPDATE = 30
+};
+static inline unsigned NI_PFI_OUTPUT_RTSI(unsigned rtsi_channel)
+{
+	return NI_PFI_OUTPUT_RTSI0 + rtsi_channel;
+}
+
+/* Signals which can be routed to output on a NI PFI pin on a 660x board
+ with INSN_CONFIG_SET_ROUTING.  The numbers assigned are
+ not arbitrary, they correspond to the bits required
+ to program the board.  Lines 0 to 7 can only be set to
+ NI_660X_PFI_OUTPUT_DIO.  Lines 32 to 39 can only be set to
+ NI_660X_PFI_OUTPUT_COUNTER. */
+enum ni_660x_pfi_routing {
+	NI_660X_PFI_OUTPUT_COUNTER = 1,	/* counter */
+	NI_660X_PFI_OUTPUT_DIO = 2,	/* static digital output */
+};
+
+/* NI External Trigger lines.  These values are not arbitrary, but are related
+ * to the bits required to program the board (offset by 1 for historical
+ * reasons). */
+static inline unsigned NI_EXT_PFI(unsigned pfi_channel)
+{
+	return NI_USUAL_PFI_SELECT(pfi_channel) - 1;
+}
+static inline unsigned NI_EXT_RTSI(unsigned rtsi_channel)
+{
+	return NI_USUAL_RTSI_SELECT(rtsi_channel) - 1;
+}
+
+/* status bits for INSN_CONFIG_GET_COUNTER_STATUS */
+enum comedi_counter_status_flags {
+	COMEDI_COUNTER_ARMED = 0x1,
+	COMEDI_COUNTER_COUNTING = 0x2,
+	COMEDI_COUNTER_TERMINAL_COUNT = 0x4,
+};
+
+/* Clock sources for CDIO subdevice on NI m-series boards.  Used as the
+ * scan_begin_arg for a comedi_command. These sources may also be bitwise-or'd
+ * with CR_INVERT to change polarity. */
+enum ni_m_series_cdio_scan_begin_src {
+	NI_CDIO_SCAN_BEGIN_SRC_GROUND = 0,
+	NI_CDIO_SCAN_BEGIN_SRC_AI_START = 18,
+	NI_CDIO_SCAN_BEGIN_SRC_AI_CONVERT = 19,
+	NI_CDIO_SCAN_BEGIN_SRC_PXI_STAR_TRIGGER = 20,
+	NI_CDIO_SCAN_BEGIN_SRC_G0_OUT = 28,
+	NI_CDIO_SCAN_BEGIN_SRC_G1_OUT = 29,
+	NI_CDIO_SCAN_BEGIN_SRC_ANALOG_TRIGGER = 30,
+	NI_CDIO_SCAN_BEGIN_SRC_AO_UPDATE = 31,
+	NI_CDIO_SCAN_BEGIN_SRC_FREQ_OUT = 32,
+	NI_CDIO_SCAN_BEGIN_SRC_DIO_CHANGE_DETECT_IRQ = 33
+};
+static inline unsigned NI_CDIO_SCAN_BEGIN_SRC_PFI(unsigned pfi_channel)
+{
+	return NI_USUAL_PFI_SELECT(pfi_channel);
+}
+static inline unsigned NI_CDIO_SCAN_BEGIN_SRC_RTSI(unsigned rtsi_channel)
+{
+	return NI_USUAL_RTSI_SELECT(rtsi_channel);
+}
+
+/* scan_begin_src for scan_begin_arg==TRIG_EXT with analog output command on NI
+ * boards.  These scan begin sources can also be bitwise-or'd with CR_INVERT to
+ * change polarity. */
+static inline unsigned NI_AO_SCAN_BEGIN_SRC_PFI(unsigned pfi_channel)
+{
+	return NI_USUAL_PFI_SELECT(pfi_channel);
+}
+static inline unsigned NI_AO_SCAN_BEGIN_SRC_RTSI(unsigned rtsi_channel)
+{
+	return NI_USUAL_RTSI_SELECT(rtsi_channel);
+}
+
+/* Bits for setting a clock source with
+ * INSN_CONFIG_SET_CLOCK_SRC when using NI frequency output subdevice. */
+enum ni_freq_out_clock_source_bits {
+	NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC,	/* 10 MHz */
+	NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC	/* 100 KHz */
+};
+
+/* Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
+ * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver). */
+	enum amplc_dio_clock_source {
+		AMPLC_DIO_CLK_CLKN,	/* per channel external clock
+					   input/output pin (pin is only an
+					   input when clock source set to this
+					   value, otherwise it is an output) */
+		AMPLC_DIO_CLK_10MHZ,	/* 10 MHz internal clock */
+		AMPLC_DIO_CLK_1MHZ,	/* 1 MHz internal clock */
+		AMPLC_DIO_CLK_100KHZ,	/* 100 kHz internal clock */
+		AMPLC_DIO_CLK_10KHZ,	/* 10 kHz internal clock */
+		AMPLC_DIO_CLK_1KHZ,	/* 1 kHz internal clock */
+		AMPLC_DIO_CLK_OUTNM1,	/* output of preceding counter channel
+					   (for channel 0, preceding counter
+					   channel is channel 2 on preceding
+					   counter subdevice, for first counter
+					   subdevice, preceding counter
+					   subdevice is the last counter
+					   subdevice) */
+		AMPLC_DIO_CLK_EXT	/* per chip external input pin */
+	};
+
+/* Values for setting a gate source with INSN_CONFIG_SET_GATE_SRC for
+ * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver). */
+	enum amplc_dio_gate_source {
+		AMPLC_DIO_GAT_VCC,	/* internal high logic level */
+		AMPLC_DIO_GAT_GND,	/* internal low logic level */
+		AMPLC_DIO_GAT_GATN,	/* per channel external gate input */
+		AMPLC_DIO_GAT_NOUTNM2,	/* negated output of counter channel
+					   minus 2 (for channels 0 or 1,
+					   channel minus 2 is channel 1 or 2 on
+					   the preceding counter subdevice, for
+					   the first counter subdevice the
+					   preceding counter subdevice is the
+					   last counter subdevice) */
+		AMPLC_DIO_GAT_RESERVED4,
+		AMPLC_DIO_GAT_RESERVED5,
+		AMPLC_DIO_GAT_RESERVED6,
+		AMPLC_DIO_GAT_RESERVED7
+	};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COMEDI_H */

+ 597 - 0
drivers/staging/comedi/comedi_compat32.c

@@ -0,0 +1,597 @@
+/*
+    comedi/comedi_compat32.c
+    32-bit ioctl compatibility for 64-bit comedi kernel module.
+
+    Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
+    Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#define __NO_VERSION__
+#include "comedi.h"
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+
+#include "comedi_compat32.h"
+
+#ifdef CONFIG_COMPAT
+
+#ifndef HAVE_COMPAT_IOCTL
+#include <linux/ioctl32.h>	/* for (un)register_ioctl32_conversion */
+#endif
+
+#define COMEDI32_CHANINFO _IOR(CIO,3,comedi32_chaninfo)
+#define COMEDI32_RANGEINFO _IOR(CIO,8,comedi32_rangeinfo)
+/* N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR.
+ * It's too late to change it now, but it only affects the command number. */
+#define COMEDI32_CMD _IOR(CIO,9,comedi32_cmd)
+/* N.B. COMEDI32_CMDTEST and COMEDI_CMDTEST ought to use _IOWR, not _IOR.
+ * It's too late to change it now, but it only affects the command number. */
+#define COMEDI32_CMDTEST _IOR(CIO,10,comedi32_cmd)
+#define COMEDI32_INSNLIST _IOR(CIO,11,comedi32_insnlist)
+#define COMEDI32_INSN _IOR(CIO,12,comedi32_insn)
+
+typedef struct comedi32_chaninfo_struct {
+	unsigned int subdev;
+	compat_uptr_t maxdata_list;	/* 32-bit 'lsampl_t *' */
+	compat_uptr_t flaglist;		/* 32-bit 'unsigned int *' */
+	compat_uptr_t rangelist;	/* 32-bit 'unsigned int *' */
+	unsigned int unused[4];
+} comedi32_chaninfo;
+
+typedef struct comedi32_rangeinfo_struct {
+	unsigned int range_type;
+	compat_uptr_t range_ptr;	/* 32-bit 'void *' */
+} comedi32_rangeinfo;
+
+typedef struct comedi32_cmd_struct {
+	unsigned int subdev;
+	unsigned int flags;
+	unsigned int start_src;
+	unsigned int start_arg;
+	unsigned int scan_begin_src;
+	unsigned int scan_begin_arg;
+	unsigned int convert_src;
+	unsigned int convert_arg;
+	unsigned int scan_end_src;
+	unsigned int scan_end_arg;
+	unsigned int stop_src;
+	unsigned int stop_arg;
+	compat_uptr_t chanlist;		/* 32-bit 'unsigned int *' */
+	unsigned int chanlist_len;
+	compat_uptr_t data;		/* 32-bit 'sampl_t *' */
+	unsigned int data_len;
+} comedi32_cmd;
+
+typedef struct comedi32_insn_struct {
+	unsigned int insn;
+	unsigned int n;
+	compat_uptr_t data;		/* 32-bit 'lsampl_t *' */
+	unsigned int subdev;
+	unsigned int chanspec;
+	unsigned int unused[3];
+} comedi32_insn;
+
+typedef struct comedi32_insnlist_struct {
+	unsigned int n_insns;
+	compat_uptr_t insns;		/* 32-bit 'comedi_insn *' */
+} comedi32_insnlist;
+
+/* Handle translated ioctl. */
+static int translated_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	if (!file->f_op) {
+		return -ENOTTY;
+	}
+#ifdef HAVE_UNLOCKED_IOCTL
+	if (file->f_op->unlocked_ioctl) {
+		int rc = (int)(*file->f_op->unlocked_ioctl)(file, cmd, arg);
+		if (rc == -ENOIOCTLCMD) {
+			rc = -ENOTTY;
+		}
+		return rc;
+	}
+#endif
+	if (file->f_op->ioctl) {
+		int rc;
+		lock_kernel();
+		rc = (*file->f_op->ioctl)(file->f_dentry->d_inode,
+				file, cmd, arg);
+		unlock_kernel();
+		return rc;
+	}
+	return -ENOTTY;
+}
+
+/* Handle 32-bit COMEDI_CHANINFO ioctl. */
+static int compat_chaninfo(struct file *file, unsigned long arg)
+{
+	comedi_chaninfo __user *chaninfo;
+	comedi32_chaninfo __user *chaninfo32;
+	int err;
+	union {
+		unsigned int uint;
+		compat_uptr_t uptr;
+	} temp;
+
+	chaninfo32 = compat_ptr(arg);
+	chaninfo = compat_alloc_user_space(sizeof(*chaninfo));
+
+	/* Copy chaninfo structure.  Ignore unused members. */
+	if (!access_ok(VERIFY_READ, chaninfo32, sizeof(*chaninfo32))
+			|| !access_ok(VERIFY_WRITE, chaninfo,
+				sizeof(*chaninfo))) {
+		return -EFAULT;
+	}
+	err = 0;
+	err |= __get_user(temp.uint, &chaninfo32->subdev);
+	err |= __put_user(temp.uint, &chaninfo->subdev);
+	err |= __get_user(temp.uptr, &chaninfo32->maxdata_list);
+	err |= __put_user(compat_ptr(temp.uptr), &chaninfo->maxdata_list);
+	err |= __get_user(temp.uptr, &chaninfo32->flaglist);
+	err |= __put_user(compat_ptr(temp.uptr), &chaninfo->flaglist);
+	err |= __get_user(temp.uptr, &chaninfo32->rangelist);
+	err |= __put_user(compat_ptr(temp.uptr), &chaninfo->rangelist);
+	if (err) {
+		return -EFAULT;
+	}
+
+	return translated_ioctl(file, COMEDI_CHANINFO, (unsigned long)chaninfo);
+}
+
+/* Handle 32-bit COMEDI_RANGEINFO ioctl. */
+static int compat_rangeinfo(struct file *file, unsigned long arg)
+{
+	comedi_rangeinfo __user *rangeinfo;
+	comedi32_rangeinfo __user *rangeinfo32;
+	int err;
+	union {
+		unsigned int uint;
+		compat_uptr_t uptr;
+	} temp;
+
+	rangeinfo32 = compat_ptr(arg);
+	rangeinfo = compat_alloc_user_space(sizeof(*rangeinfo));
+
+	/* Copy rangeinfo structure. */
+	if (!access_ok(VERIFY_READ, rangeinfo32, sizeof(*rangeinfo32))
+			|| !access_ok(VERIFY_WRITE, rangeinfo,
+				sizeof(*rangeinfo))) {
+		return -EFAULT;
+	}
+	err = 0;
+	err |= __get_user(temp.uint, &rangeinfo32->range_type);
+	err |= __put_user(temp.uint, &rangeinfo->range_type);
+	err |= __get_user(temp.uptr, &rangeinfo32->range_ptr);
+	err |= __put_user(compat_ptr(temp.uptr), &rangeinfo->range_ptr);
+	if (err) {
+		return -EFAULT;
+	}
+
+	return translated_ioctl(file, COMEDI_RANGEINFO,
+			(unsigned long)rangeinfo);
+}
+
+/* Copy 32-bit cmd structure to native cmd structure. */
+static int get_compat_cmd(comedi_cmd __user *cmd,
+		comedi32_cmd __user *cmd32)
+{
+	int err;
+	union {
+		unsigned int uint;
+		compat_uptr_t uptr;
+	} temp;
+
+	/* Copy cmd structure. */
+	if (!access_ok(VERIFY_READ, cmd32, sizeof(*cmd32))
+			|| !access_ok(VERIFY_WRITE, cmd, sizeof(*cmd))) {
+		return -EFAULT;
+	}
+	err = 0;
+	err |= __get_user(temp.uint, &cmd32->subdev);
+	err |= __put_user(temp.uint, &cmd->subdev);
+	err |= __get_user(temp.uint, &cmd32->flags);
+	err |= __put_user(temp.uint, &cmd->flags);
+	err |= __get_user(temp.uint, &cmd32->start_src);
+	err |= __put_user(temp.uint, &cmd->start_src);
+	err |= __get_user(temp.uint, &cmd32->start_arg);
+	err |= __put_user(temp.uint, &cmd->start_arg);
+	err |= __get_user(temp.uint, &cmd32->scan_begin_src);
+	err |= __put_user(temp.uint, &cmd->scan_begin_src);
+	err |= __get_user(temp.uint, &cmd32->scan_begin_arg);
+	err |= __put_user(temp.uint, &cmd->scan_begin_arg);
+	err |= __get_user(temp.uint, &cmd32->convert_src);
+	err |= __put_user(temp.uint, &cmd->convert_src);
+	err |= __get_user(temp.uint, &cmd32->convert_arg);
+	err |= __put_user(temp.uint, &cmd->convert_arg);
+	err |= __get_user(temp.uint, &cmd32->scan_end_src);
+	err |= __put_user(temp.uint, &cmd->scan_end_src);
+	err |= __get_user(temp.uint, &cmd32->scan_end_arg);
+	err |= __put_user(temp.uint, &cmd->scan_end_arg);
+	err |= __get_user(temp.uint, &cmd32->stop_src);
+	err |= __put_user(temp.uint, &cmd->stop_src);
+	err |= __get_user(temp.uint, &cmd32->stop_arg);
+	err |= __put_user(temp.uint, &cmd->stop_arg);
+	err |= __get_user(temp.uptr, &cmd32->chanlist);
+	err |= __put_user(compat_ptr(temp.uptr), &cmd->chanlist);
+	err |= __get_user(temp.uint, &cmd32->chanlist_len);
+	err |= __put_user(temp.uint, &cmd->chanlist_len);
+	err |= __get_user(temp.uptr, &cmd32->data);
+	err |= __put_user(compat_ptr(temp.uptr), &cmd->data);
+	err |= __get_user(temp.uint, &cmd32->data_len);
+	err |= __put_user(temp.uint, &cmd->data_len);
+	return err ? -EFAULT : 0;
+}
+
+/* Copy native cmd structure to 32-bit cmd structure. */
+static int put_compat_cmd(comedi32_cmd __user *cmd32, comedi_cmd __user *cmd)
+{
+	int err;
+	unsigned int temp;
+
+	/* Copy back most of cmd structure. */
+	/* Assume the pointer values are already valid. */
+	/* (Could use ptr_to_compat() to set them, but that wasn't implemented
+	 * until kernel version 2.6.11.) */
+	if (!access_ok(VERIFY_READ, cmd, sizeof(*cmd))
+			|| !access_ok(VERIFY_WRITE, cmd32, sizeof(*cmd32))) {
+		return -EFAULT;
+	}
+	err = 0;
+	err |= __get_user(temp, &cmd->subdev);
+	err |= __put_user(temp, &cmd32->subdev);
+	err |= __get_user(temp, &cmd->flags);
+	err |= __put_user(temp, &cmd32->flags);
+	err |= __get_user(temp, &cmd->start_src);
+	err |= __put_user(temp, &cmd32->start_src);
+	err |= __get_user(temp, &cmd->start_arg);
+	err |= __put_user(temp, &cmd32->start_arg);
+	err |= __get_user(temp, &cmd->scan_begin_src);
+	err |= __put_user(temp, &cmd32->scan_begin_src);
+	err |= __get_user(temp, &cmd->scan_begin_arg);
+	err |= __put_user(temp, &cmd32->scan_begin_arg);
+	err |= __get_user(temp, &cmd->convert_src);
+	err |= __put_user(temp, &cmd32->convert_src);
+	err |= __get_user(temp, &cmd->convert_arg);
+	err |= __put_user(temp, &cmd32->convert_arg);
+	err |= __get_user(temp, &cmd->scan_end_src);
+	err |= __put_user(temp, &cmd32->scan_end_src);
+	err |= __get_user(temp, &cmd->scan_end_arg);
+	err |= __put_user(temp, &cmd32->scan_end_arg);
+	err |= __get_user(temp, &cmd->stop_src);
+	err |= __put_user(temp, &cmd32->stop_src);
+	err |= __get_user(temp, &cmd->stop_arg);
+	err |= __put_user(temp, &cmd32->stop_arg);
+	/* Assume chanlist pointer is unchanged. */
+	err |= __get_user(temp, &cmd->chanlist_len);
+	err |= __put_user(temp, &cmd32->chanlist_len);
+	/* Assume data pointer is unchanged. */
+	err |= __get_user(temp, &cmd->data_len);
+	err |= __put_user(temp, &cmd32->data_len);
+	return err ? -EFAULT : 0;
+}
+
+/* Handle 32-bit COMEDI_CMD ioctl. */
+static int compat_cmd(struct file *file, unsigned long arg)
+{
+	comedi_cmd __user *cmd;
+	comedi32_cmd __user *cmd32;
+	int rc;
+
+	cmd32 = compat_ptr(arg);
+	cmd = compat_alloc_user_space(sizeof(*cmd));
+
+	rc = get_compat_cmd(cmd, cmd32);
+	if (rc) {
+		return rc;
+	}
+
+	return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
+}
+
+/* Handle 32-bit COMEDI_CMDTEST ioctl. */
+static int compat_cmdtest(struct file *file, unsigned long arg)
+{
+	comedi_cmd __user *cmd;
+	comedi32_cmd __user *cmd32;
+	int rc, err;
+
+	cmd32 = compat_ptr(arg);
+	cmd = compat_alloc_user_space(sizeof(*cmd));
+
+	rc = get_compat_cmd(cmd, cmd32);
+	if (rc) {
+		return rc;
+	}
+
+	rc = translated_ioctl(file, COMEDI_CMDTEST, (unsigned long)cmd);
+	if (rc < 0) {
+		return rc;
+	}
+
+	err = put_compat_cmd(cmd32, cmd);
+	if (err) {
+		rc = err;
+	}
+	return rc;
+}
+
+/* Copy 32-bit insn structure to native insn structure. */
+static int get_compat_insn(comedi_insn __user *insn,
+		comedi32_insn __user *insn32)
+{
+	int err;
+	union {
+		unsigned int uint;
+		compat_uptr_t uptr;
+	} temp;
+
+	/* Copy insn structure.  Ignore the unused members. */
+	err = 0;
+	if (!access_ok(VERIFY_READ, insn32, sizeof(*insn32))
+			|| !access_ok(VERIFY_WRITE, insn, sizeof(*insn))) {
+		return -EFAULT;
+	}
+	err |= __get_user(temp.uint, &insn32->insn);
+	err |= __put_user(temp.uint, &insn->insn);
+	err |= __get_user(temp.uint, &insn32->n);
+	err |= __put_user(temp.uint, &insn->n);
+	err |= __get_user(temp.uptr, &insn32->data);
+	err |= __put_user(compat_ptr(temp.uptr), &insn->data);
+	err |= __get_user(temp.uint, &insn32->subdev);
+	err |= __put_user(temp.uint, &insn->subdev);
+	err |= __get_user(temp.uint, &insn32->chanspec);
+	err |= __put_user(temp.uint, &insn->chanspec);
+	return err ? -EFAULT : 0;
+}
+
+/* Handle 32-bit COMEDI_INSNLIST ioctl. */
+static int compat_insnlist(struct file *file, unsigned long arg)
+{
+	struct combined_insnlist {
+		comedi_insnlist insnlist;
+		comedi_insn insn[1];
+	} __user *s;
+	comedi32_insnlist __user *insnlist32;
+	comedi32_insn __user *insn32;
+	compat_uptr_t uptr;
+	unsigned int n_insns, n;
+	int err, rc;
+
+	insnlist32 = compat_ptr(arg);
+
+	/* Get 32-bit insnlist structure.  */
+	if (!access_ok(VERIFY_READ, insnlist32, sizeof(*insnlist32))) {
+		return -EFAULT;
+	}
+	err = 0;
+	err |= __get_user(n_insns, &insnlist32->n_insns);
+	err |= __get_user(uptr, &insnlist32->insns);
+	insn32 = compat_ptr(uptr);
+	if (err) {
+		return -EFAULT;
+	}
+
+	/* Allocate user memory to copy insnlist and insns into. */
+	s = compat_alloc_user_space(offsetof(struct combined_insnlist,
+				insn[n_insns]));
+
+	/* Set native insnlist structure. */
+	if (!access_ok(VERIFY_WRITE, &s->insnlist, sizeof(s->insnlist))) {
+		return -EFAULT;
+	}
+	err |= __put_user(n_insns, &s->insnlist.n_insns);
+	err |= __put_user(&s->insn[0], &s->insnlist.insns);
+	if (err) {
+		return -EFAULT;
+	}
+
+	/* Copy insn structures. */
+	for (n = 0; n < n_insns; n++) {
+		rc = get_compat_insn(&s->insn[n], &insn32[n]);
+		if (rc) {
+			return rc;
+		}
+	}
+
+	return translated_ioctl(file, COMEDI_INSNLIST,
+			(unsigned long)&s->insnlist);
+}
+
+/* Handle 32-bit COMEDI_INSN ioctl. */
+static int compat_insn(struct file *file, unsigned long arg)
+{
+	comedi_insn __user *insn;
+	comedi32_insn __user *insn32;
+	int rc;
+
+	insn32 = compat_ptr(arg);
+	insn = compat_alloc_user_space(sizeof(*insn));
+
+	rc = get_compat_insn(insn, insn32);
+	if (rc) {
+		return rc;
+	}
+
+	return translated_ioctl(file, COMEDI_INSN, (unsigned long)insn);
+}
+
+/* Process untranslated ioctl. */
+/* Returns -ENOIOCTLCMD for unrecognised ioctl codes. */
+static inline int raw_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	int rc;
+
+	switch (cmd) {
+	case COMEDI_DEVCONFIG:
+	case COMEDI_DEVINFO:
+	case COMEDI_SUBDINFO:
+	case COMEDI_BUFCONFIG:
+	case COMEDI_BUFINFO:
+		/* Just need to translate the pointer argument. */
+		arg = (unsigned long)compat_ptr(arg);
+		rc = translated_ioctl(file, cmd, arg);
+		break;
+	case COMEDI_LOCK:
+	case COMEDI_UNLOCK:
+	case COMEDI_CANCEL:
+	case COMEDI_POLL:
+		/* No translation needed. */
+		rc = translated_ioctl(file, cmd, arg);
+		break;
+	case COMEDI32_CHANINFO:
+		rc = compat_chaninfo(file, arg);
+		break;
+	case COMEDI32_RANGEINFO:
+		rc = compat_rangeinfo(file, arg);
+		break;
+	case COMEDI32_CMD:
+		rc = compat_cmd(file, arg);
+		break;
+	case COMEDI32_CMDTEST:
+		rc = compat_cmdtest(file, arg);
+		break;
+	case COMEDI32_INSNLIST:
+		rc = compat_insnlist(file, arg);
+		break;
+	case COMEDI32_INSN:
+		rc = compat_insn(file, arg);
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+		break;
+	}
+	return rc;
+}
+
+#ifdef HAVE_COMPAT_IOCTL	/* defined in <linux/fs.h> 2.6.11 onwards */
+
+/* compat_ioctl file operation. */
+/* Returns -ENOIOCTLCMD for unrecognised ioctl codes. */
+long comedi_compat_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	return raw_ioctl(file, cmd, arg);
+}
+
+#else /* HAVE_COMPAT_IOCTL */
+
+/*
+ * Brain-dead ioctl compatibility for 2.6.10 and earlier.
+ *
+ * It's brain-dead because cmd numbers need to be unique system-wide!
+ * The comedi driver could end up attempting to execute ioctls for non-Comedi
+ * devices because it registered the system-wide cmd code first.  Similarly,
+ * another driver could end up attempting to execute ioctls for a Comedi
+ * device because it registered the cmd code first.  Chaos ensues.
+ */
+
+/* Handler for all 32-bit ioctl codes registered by this driver. */
+static int mapped_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg,
+		struct file *file)
+{
+	int rc;
+
+	/* Make sure we are dealing with a Comedi device. */
+	if (imajor(file->f_dentry->d_inode) != COMEDI_MAJOR) {
+		return -ENOTTY;
+	}
+	rc = raw_ioctl(file, cmd, arg);
+	/* Do not return -ENOIOCTLCMD. */
+	if (rc == -ENOIOCTLCMD) {
+		rc = -ENOTTY;
+	}
+	return rc;
+}
+
+struct ioctl32_map {
+	unsigned int cmd;
+	int (*handler)(unsigned int, unsigned int, unsigned long,
+			struct file *);
+	int registered;
+};
+
+static struct ioctl32_map comedi_ioctl32_map[] = {
+	{ COMEDI_DEVCONFIG, mapped_ioctl, 0 },
+	{ COMEDI_DEVINFO, mapped_ioctl, 0 },
+	{ COMEDI_SUBDINFO, mapped_ioctl, 0 },
+	{ COMEDI_BUFCONFIG, mapped_ioctl, 0 },
+	{ COMEDI_BUFINFO, mapped_ioctl, 0 },
+	{ COMEDI_LOCK, mapped_ioctl, 0 },
+	{ COMEDI_UNLOCK, mapped_ioctl, 0 },
+	{ COMEDI_CANCEL, mapped_ioctl, 0 },
+	{ COMEDI_POLL, mapped_ioctl, 0 },
+	{ COMEDI32_CHANINFO, mapped_ioctl, 0 },
+	{ COMEDI32_RANGEINFO, mapped_ioctl, 0 },
+	{ COMEDI32_CMD, mapped_ioctl, 0 },
+	{ COMEDI32_CMDTEST, mapped_ioctl, 0 },
+	{ COMEDI32_INSNLIST, mapped_ioctl, 0 },
+	{ COMEDI32_INSN, mapped_ioctl, 0 },
+};
+
+#define NUM_IOCTL32_MAPS ARRAY_SIZE(comedi_ioctl32_map)
+
+/* Register system-wide 32-bit ioctl handlers. */
+void comedi_register_ioctl32(void)
+{
+	int n, rc;
+
+	for (n = 0; n < NUM_IOCTL32_MAPS; n++) {
+		rc = register_ioctl32_conversion(comedi_ioctl32_map[n].cmd,
+				comedi_ioctl32_map[n].handler);
+		if (rc) {
+			printk(KERN_WARNING
+					"comedi: failed to register 32-bit "
+					"compatible ioctl handler for 0x%X - "
+					"expect bad things to happen!\n",
+					comedi_ioctl32_map[n].cmd);
+		}
+		comedi_ioctl32_map[n].registered = !rc;
+	}
+}
+
+/* Unregister system-wide 32-bit ioctl translations. */
+void comedi_unregister_ioctl32(void)
+{
+	int n, rc;
+
+	for (n = 0; n < NUM_IOCTL32_MAPS; n++) {
+		if (comedi_ioctl32_map[n].registered) {
+			rc = unregister_ioctl32_conversion(
+					comedi_ioctl32_map[n].cmd,
+					comedi_ioctl32_map[n].handler);
+			if (rc) {
+				printk(KERN_ERR
+					"comedi: failed to unregister 32-bit "
+					"compatible ioctl handler for 0x%X - "
+					"expect kernel Oops!\n",
+					comedi_ioctl32_map[n].cmd);
+			} else {
+				comedi_ioctl32_map[n].registered = 0;
+			}
+		}
+	}
+}
+
+#endif	/* HAVE_COMPAT_IOCTL */
+
+#endif	/* CONFIG_COMPAT */

+ 58 - 0
drivers/staging/comedi/comedi_compat32.h

@@ -0,0 +1,58 @@
+/*
+    comedi/comedi_compat32.h
+    32-bit ioctl compatibility for 64-bit comedi kernel module.
+
+    Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
+    Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef _COMEDI_COMPAT32_H
+#define _COMEDI_COMPAT32_H
+
+#include <linux/compat.h>
+#include <linux/fs.h>	/* For HAVE_COMPAT_IOCTL and HAVE_UNLOCKED_IOCTL */
+
+#ifdef CONFIG_COMPAT
+
+#ifdef HAVE_COMPAT_IOCTL
+
+extern long comedi_compat_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg);
+#define comedi_register_ioctl32() do {} while (0)
+#define comedi_unregister_ioctl32() do {} while (0)
+
+#else /* HAVE_COMPAT_IOCTL */
+
+#define comedi_compat_ioctl 0	/* NULL */
+extern void comedi_register_ioctl32(void);
+extern void comedi_unregister_ioctl32(void);
+
+#endif /* HAVE_COMPAT_IOCTL */
+
+#else /* CONFIG_COMPAT */
+
+#define comedi_compat_ioctl 0	/* NULL */
+#define comedi_register_ioctl32() do {} while (0)
+#define comedi_unregister_ioctl32() do {} while (0)
+
+#endif /* CONFIG_COMPAT */
+
+#endif /* _COMEDI_COMPAT32_H */

+ 2244 - 0
drivers/staging/comedi/comedi_fops.c

@@ -0,0 +1,2244 @@
+/*
+    comedi/comedi_fops.c
+    comedi kernel module
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#undef DEBUG
+
+#define __NO_VERSION__
+#include "comedi_fops.h"
+#include "comedi_compat32.h"
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include "comedidev.h"
+#include <linux/cdev.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+/* #include "kvmem.h" */
+
+MODULE_AUTHOR("http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi core module");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_COMEDI_DEBUG
+int comedi_debug;
+module_param(comedi_debug, int, 0644);
+#endif
+
+static DEFINE_SPINLOCK(comedi_file_info_table_lock);
+static struct comedi_device_file_info
+    *comedi_file_info_table[COMEDI_NUM_MINORS];
+
+static int do_devconfig_ioctl(comedi_device *dev, comedi_devconfig *arg);
+static int do_bufconfig_ioctl(comedi_device *dev, void *arg);
+static int do_devinfo_ioctl(comedi_device *dev, comedi_devinfo *arg,
+			    struct file *file);
+static int do_subdinfo_ioctl(comedi_device *dev, comedi_subdinfo *arg,
+			     void *file);
+static int do_chaninfo_ioctl(comedi_device *dev, comedi_chaninfo *arg);
+static int do_bufinfo_ioctl(comedi_device *dev, void *arg);
+static int do_cmd_ioctl(comedi_device *dev, void *arg, void *file);
+static int do_lock_ioctl(comedi_device *dev, unsigned int arg, void *file);
+static int do_unlock_ioctl(comedi_device *dev, unsigned int arg, void *file);
+static int do_cancel_ioctl(comedi_device *dev, unsigned int arg, void *file);
+static int do_cmdtest_ioctl(comedi_device *dev, void *arg, void *file);
+static int do_insnlist_ioctl(comedi_device *dev, void *arg, void *file);
+static int do_insn_ioctl(comedi_device *dev, void *arg, void *file);
+static int do_poll_ioctl(comedi_device *dev, unsigned int subd, void *file);
+
+extern void do_become_nonbusy(comedi_device *dev, comedi_subdevice *s);
+static int do_cancel(comedi_device *dev, comedi_subdevice *s);
+
+static int comedi_fasync(int fd, struct file *file, int on);
+
+static int is_device_busy(comedi_device *dev);
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+#else
+static int comedi_ioctl(struct inode *inode, struct file *file,
+			unsigned int cmd, unsigned long arg)
+#endif
+{
+	const unsigned minor = iminor(file->f_dentry->d_inode);
+	struct comedi_device_file_info *dev_file_info =
+	    comedi_get_device_file_info(minor);
+	comedi_device *dev = dev_file_info->device;
+	int rc;
+
+	mutex_lock(&dev->mutex);
+
+	/* Device config is special, because it must work on
+	 * an unconfigured device. */
+	if (cmd == COMEDI_DEVCONFIG) {
+		rc = do_devconfig_ioctl(dev, (void *)arg);
+		goto done;
+	}
+
+	if (!dev->attached) {
+		DPRINTK("no driver configured on /dev/comedi%i\n", dev->minor);
+		rc = -ENODEV;
+		goto done;
+	}
+
+	switch (cmd) {
+	case COMEDI_BUFCONFIG:
+		rc = do_bufconfig_ioctl(dev, (void *)arg);
+		break;
+	case COMEDI_DEVINFO:
+		rc = do_devinfo_ioctl(dev, (void *)arg, file);
+		break;
+	case COMEDI_SUBDINFO:
+		rc = do_subdinfo_ioctl(dev, (void *)arg, file);
+		break;
+	case COMEDI_CHANINFO:
+		rc = do_chaninfo_ioctl(dev, (void *)arg);
+		break;
+	case COMEDI_RANGEINFO:
+		rc = do_rangeinfo_ioctl(dev, (void *)arg);
+		break;
+	case COMEDI_BUFINFO:
+		rc = do_bufinfo_ioctl(dev, (void *)arg);
+		break;
+	case COMEDI_LOCK:
+		rc = do_lock_ioctl(dev, arg, file);
+		break;
+	case COMEDI_UNLOCK:
+		rc = do_unlock_ioctl(dev, arg, file);
+		break;
+	case COMEDI_CANCEL:
+		rc = do_cancel_ioctl(dev, arg, file);
+		break;
+	case COMEDI_CMD:
+		rc = do_cmd_ioctl(dev, (void *)arg, file);
+		break;
+	case COMEDI_CMDTEST:
+		rc = do_cmdtest_ioctl(dev, (void *)arg, file);
+		break;
+	case COMEDI_INSNLIST:
+		rc = do_insnlist_ioctl(dev, (void *)arg, file);
+		break;
+	case COMEDI_INSN:
+		rc = do_insn_ioctl(dev, (void *)arg, file);
+		break;
+	case COMEDI_POLL:
+		rc = do_poll_ioctl(dev, arg, file);
+		break;
+	default:
+		rc = -ENOTTY;
+		break;
+	}
+
+done:
+	mutex_unlock(&dev->mutex);
+	return rc;
+}
+
+/*
+	COMEDI_DEVCONFIG
+	device config ioctl
+
+	arg:
+		pointer to devconfig structure
+
+	reads:
+		devconfig structure at arg
+
+	writes:
+		none
+*/
+static int do_devconfig_ioctl(comedi_device *dev, comedi_devconfig *arg)
+{
+	comedi_devconfig it;
+	int ret;
+	unsigned char *aux_data = NULL;
+	int aux_len;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (arg == NULL) {
+		if (is_device_busy(dev))
+			return -EBUSY;
+		if (dev->attached) {
+			struct module *driver_module = dev->driver->module;
+			comedi_device_detach(dev);
+			module_put(driver_module);
+		}
+		return 0;
+	}
+
+	if (copy_from_user(&it, arg, sizeof(comedi_devconfig)))
+		return -EFAULT;
+
+	it.board_name[COMEDI_NAMELEN - 1] = 0;
+
+	if (comedi_aux_data(it.options, 0) &&
+	    it.options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) {
+		int bit_shift;
+		aux_len = it.options[COMEDI_DEVCONF_AUX_DATA_LENGTH];
+		if (aux_len < 0)
+			return -EFAULT;
+
+		aux_data = vmalloc(aux_len);
+		if (!aux_data)
+			return -ENOMEM;
+
+		if (copy_from_user(aux_data,
+				   comedi_aux_data(it.options, 0), aux_len)) {
+			vfree(aux_data);
+			return -EFAULT;
+		}
+		it.options[COMEDI_DEVCONF_AUX_DATA_LO] =
+		    (unsigned long)aux_data;
+		if (sizeof(void *) > sizeof(int)) {
+			bit_shift = sizeof(int) * 8;
+			it.options[COMEDI_DEVCONF_AUX_DATA_HI] =
+			    ((unsigned long)aux_data) >> bit_shift;
+		} else
+			it.options[COMEDI_DEVCONF_AUX_DATA_HI] = 0;
+	}
+
+	ret = comedi_device_attach(dev, &it);
+	if (ret == 0) {
+		if (!try_module_get(dev->driver->module)) {
+			comedi_device_detach(dev);
+			return -ENOSYS;
+		}
+	}
+
+	if (aux_data)
+		vfree(aux_data);
+
+	return ret;
+}
+
+/*
+	COMEDI_BUFCONFIG
+	buffer configuration ioctl
+
+	arg:
+		pointer to bufconfig structure
+
+	reads:
+		bufconfig at arg
+
+	writes:
+		modified bufconfig at arg
+
+*/
+static int do_bufconfig_ioctl(comedi_device *dev, void *arg)
+{
+	comedi_bufconfig bc;
+	comedi_async *async;
+	comedi_subdevice *s;
+	int ret = 0;
+
+	if (copy_from_user(&bc, arg, sizeof(comedi_bufconfig)))
+		return -EFAULT;
+
+	if (bc.subdevice >= dev->n_subdevices || bc.subdevice < 0)
+		return -EINVAL;
+
+	s = dev->subdevices + bc.subdevice;
+	async = s->async;
+
+	if (!async) {
+		DPRINTK("subdevice does not have async capability\n");
+		bc.size = 0;
+		bc.maximum_size = 0;
+		goto copyback;
+	}
+
+	if (bc.maximum_size) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		async->max_bufsize = bc.maximum_size;
+	}
+
+	if (bc.size) {
+		if (bc.size > async->max_bufsize)
+			return -EPERM;
+
+		if (s->busy) {
+			DPRINTK("subdevice is busy, cannot resize buffer\n");
+			return -EBUSY;
+		}
+		if (async->mmap_count) {
+			DPRINTK("subdevice is mmapped, cannot resize buffer\n");
+			return -EBUSY;
+		}
+
+		if (!async->prealloc_buf)
+			return -EINVAL;
+
+		/* make sure buffer is an integral number of pages
+		 * (we round up) */
+		bc.size = (bc.size + PAGE_SIZE - 1) & PAGE_MASK;
+
+		ret = comedi_buf_alloc(dev, s, bc.size);
+		if (ret < 0)
+			return ret;
+
+		if (s->buf_change) {
+			ret = s->buf_change(dev, s, bc.size);
+			if (ret < 0)
+				return ret;
+		}
+
+		DPRINTK("comedi%i subd %d buffer resized to %i bytes\n",
+			dev->minor, bc.subdevice, async->prealloc_bufsz);
+	}
+
+	bc.size = async->prealloc_bufsz;
+	bc.maximum_size = async->max_bufsize;
+
+copyback:
+	if (copy_to_user(arg, &bc, sizeof(comedi_bufconfig)))
+		return -EFAULT;
+
+	return 0;
+}
+
+/*
+	COMEDI_DEVINFO
+	device info ioctl
+
+	arg:
+		pointer to devinfo structure
+
+	reads:
+		none
+
+	writes:
+		devinfo structure
+
+*/
+static int do_devinfo_ioctl(comedi_device *dev, comedi_devinfo *arg,
+			    struct file *file)
+{
+	comedi_devinfo devinfo;
+	const unsigned minor = iminor(file->f_dentry->d_inode);
+	struct comedi_device_file_info *dev_file_info =
+	    comedi_get_device_file_info(minor);
+	comedi_subdevice *read_subdev =
+	    comedi_get_read_subdevice(dev_file_info);
+	comedi_subdevice *write_subdev =
+	    comedi_get_write_subdevice(dev_file_info);
+
+	memset(&devinfo, 0, sizeof(devinfo));
+
+	/* fill devinfo structure */
+	devinfo.version_code = COMEDI_VERSION_CODE;
+	devinfo.n_subdevs = dev->n_subdevices;
+	memcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
+	memcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
+
+	if (read_subdev)
+		devinfo.read_subdevice = read_subdev - dev->subdevices;
+	else
+		devinfo.read_subdevice = -1;
+
+	if (write_subdev)
+		devinfo.write_subdevice = write_subdev - dev->subdevices;
+	else
+		devinfo.write_subdevice = -1;
+
+	if (copy_to_user(arg, &devinfo, sizeof(comedi_devinfo)))
+		return -EFAULT;
+
+	return 0;
+}
+
+/*
+	COMEDI_SUBDINFO
+	subdevice info ioctl
+
+	arg:
+		pointer to array of subdevice info structures
+
+	reads:
+		none
+
+	writes:
+		array of subdevice info structures at arg
+
+*/
+static int do_subdinfo_ioctl(comedi_device *dev, comedi_subdinfo *arg,
+			     void *file)
+{
+	int ret, i;
+	comedi_subdinfo *tmp, *us;
+	comedi_subdevice *s;
+
+	tmp = kcalloc(dev->n_subdevices, sizeof(comedi_subdinfo), GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	/* fill subdinfo structs */
+	for (i = 0; i < dev->n_subdevices; i++) {
+		s = dev->subdevices + i;
+		us = tmp + i;
+
+		us->type = s->type;
+		us->n_chan = s->n_chan;
+		us->subd_flags = s->subdev_flags;
+		if (comedi_get_subdevice_runflags(s) & SRF_RUNNING)
+			us->subd_flags |= SDF_RUNNING;
+#define TIMER_nanosec 5		/* backwards compatibility */
+		us->timer_type = TIMER_nanosec;
+		us->len_chanlist = s->len_chanlist;
+		us->maxdata = s->maxdata;
+		if (s->range_table) {
+			us->range_type =
+			    (i << 24) | (0 << 16) | (s->range_table->length);
+		} else {
+			us->range_type = 0;	/* XXX */
+		}
+		us->flags = s->flags;
+
+		if (s->busy)
+			us->subd_flags |= SDF_BUSY;
+		if (s->busy == file)
+			us->subd_flags |= SDF_BUSY_OWNER;
+		if (s->lock)
+			us->subd_flags |= SDF_LOCKED;
+		if (s->lock == file)
+			us->subd_flags |= SDF_LOCK_OWNER;
+		if (!s->maxdata && s->maxdata_list)
+			us->subd_flags |= SDF_MAXDATA;
+		if (s->flaglist)
+			us->subd_flags |= SDF_FLAGS;
+		if (s->range_table_list)
+			us->subd_flags |= SDF_RANGETYPE;
+		if (s->do_cmd)
+			us->subd_flags |= SDF_CMD;
+
+		if (s->insn_bits != &insn_inval)
+			us->insn_bits_support = COMEDI_SUPPORTED;
+		else
+			us->insn_bits_support = COMEDI_UNSUPPORTED;
+
+		us->settling_time_0 = s->settling_time_0;
+	}
+
+	ret = copy_to_user(arg, tmp,
+			   dev->n_subdevices * sizeof(comedi_subdinfo));
+
+	kfree(tmp);
+
+	return ret ? -EFAULT : 0;
+}
+
+/*
+	COMEDI_CHANINFO
+	subdevice info ioctl
+
+	arg:
+		pointer to chaninfo structure
+
+	reads:
+		chaninfo structure at arg
+
+	writes:
+		arrays at elements of chaninfo structure
+
+*/
+static int do_chaninfo_ioctl(comedi_device *dev, comedi_chaninfo *arg)
+{
+	comedi_subdevice *s;
+	comedi_chaninfo it;
+
+	if (copy_from_user(&it, arg, sizeof(comedi_chaninfo)))
+		return -EFAULT;
+
+	if (it.subdev >= dev->n_subdevices)
+		return -EINVAL;
+	s = dev->subdevices + it.subdev;
+
+	if (it.maxdata_list) {
+		if (s->maxdata || !s->maxdata_list)
+			return -EINVAL;
+		if (copy_to_user(it.maxdata_list, s->maxdata_list,
+				 s->n_chan * sizeof(lsampl_t)))
+			return -EFAULT;
+	}
+
+	if (it.flaglist) {
+		if (!s->flaglist)
+			return -EINVAL;
+		if (copy_to_user(it.flaglist, s->flaglist,
+				 s->n_chan * sizeof(unsigned int)))
+			return -EFAULT;
+	}
+
+	if (it.rangelist) {
+		int i;
+
+		if (!s->range_table_list)
+			return -EINVAL;
+		for (i = 0; i < s->n_chan; i++) {
+			int x;
+
+			x = (dev->minor << 28) | (it.subdev << 24) | (i << 16) |
+			    (s->range_table_list[i]->length);
+			put_user(x, it.rangelist + i);
+		}
+#if 0
+		if (copy_to_user(it.rangelist, s->range_type_list,
+				 s->n_chan*sizeof(unsigned int)))
+			return -EFAULT;
+#endif
+	}
+
+	return 0;
+}
+
+ /*
+    COMEDI_BUFINFO
+    buffer information ioctl
+
+    arg:
+    pointer to bufinfo structure
+
+    reads:
+    bufinfo at arg
+
+    writes:
+    modified bufinfo at arg
+
+  */
+static int do_bufinfo_ioctl(comedi_device *dev, void *arg)
+{
+	comedi_bufinfo bi;
+	comedi_subdevice *s;
+	comedi_async *async;
+
+	if (copy_from_user(&bi, arg, sizeof(comedi_bufinfo)))
+		return -EFAULT;
+
+	if (bi.subdevice >= dev->n_subdevices || bi.subdevice < 0)
+		return -EINVAL;
+
+	s = dev->subdevices + bi.subdevice;
+	async = s->async;
+
+	if (!async) {
+		DPRINTK("subdevice does not have async capability\n");
+		bi.buf_write_ptr = 0;
+		bi.buf_read_ptr = 0;
+		bi.buf_write_count = 0;
+		bi.buf_read_count = 0;
+		goto copyback;
+	}
+
+	if (bi.bytes_read && (s->subdev_flags & SDF_CMD_READ)) {
+		bi.bytes_read = comedi_buf_read_alloc(async, bi.bytes_read);
+		comedi_buf_read_free(async, bi.bytes_read);
+
+		if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR |
+							  SRF_RUNNING))
+		    && async->buf_write_count == async->buf_read_count) {
+			do_become_nonbusy(dev, s);
+		}
+	}
+
+	if (bi.bytes_written && (s->subdev_flags & SDF_CMD_WRITE)) {
+		bi.bytes_written =
+		    comedi_buf_write_alloc(async, bi.bytes_written);
+		comedi_buf_write_free(async, bi.bytes_written);
+	}
+
+	bi.buf_write_count = async->buf_write_count;
+	bi.buf_write_ptr = async->buf_write_ptr;
+	bi.buf_read_count = async->buf_read_count;
+	bi.buf_read_ptr = async->buf_read_ptr;
+
+copyback:
+	if (copy_to_user(arg, &bi, sizeof(comedi_bufinfo)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int parse_insn(comedi_device *dev, comedi_insn *insn, lsampl_t *data,
+		      void *file);
+/*
+ * 	COMEDI_INSNLIST
+ * 	synchronous instructions
+ *
+ * 	arg:
+ * 		pointer to sync cmd structure
+ *
+ * 	reads:
+ * 		sync cmd struct at arg
+ * 		instruction list
+ * 		data (for writes)
+ *
+ * 	writes:
+ * 		data (for reads)
+ */
+/* arbitrary limits */
+#define MAX_SAMPLES 256
+static int do_insnlist_ioctl(comedi_device *dev, void *arg, void *file)
+{
+	comedi_insnlist insnlist;
+	comedi_insn *insns = NULL;
+	lsampl_t *data = NULL;
+	int i = 0;
+	int ret = 0;
+
+	if (copy_from_user(&insnlist, arg, sizeof(comedi_insnlist)))
+		return -EFAULT;
+
+	data = kmalloc(sizeof(lsampl_t) * MAX_SAMPLES, GFP_KERNEL);
+	if (!data) {
+		DPRINTK("kmalloc failed\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	insns = kmalloc(sizeof(comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+	if (!insns) {
+		DPRINTK("kmalloc failed\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	if (copy_from_user(insns, insnlist.insns,
+			   sizeof(comedi_insn) * insnlist.n_insns)) {
+		DPRINTK("copy_from_user failed\n");
+		ret = -EFAULT;
+		goto error;
+	}
+
+	for (i = 0; i < insnlist.n_insns; i++) {
+		if (insns[i].n > MAX_SAMPLES) {
+			DPRINTK("number of samples too large\n");
+			ret = -EINVAL;
+			goto error;
+		}
+		if (insns[i].insn & INSN_MASK_WRITE) {
+			if (copy_from_user(data, insns[i].data,
+					   insns[i].n * sizeof(lsampl_t))) {
+				DPRINTK("copy_from_user failed\n");
+				ret = -EFAULT;
+				goto error;
+			}
+		}
+		ret = parse_insn(dev, insns + i, data, file);
+		if (ret < 0)
+			goto error;
+		if (insns[i].insn & INSN_MASK_READ) {
+			if (copy_to_user(insns[i].data, data,
+					 insns[i].n * sizeof(lsampl_t))) {
+				DPRINTK("copy_to_user failed\n");
+				ret = -EFAULT;
+				goto error;
+			}
+		}
+		if (need_resched())
+			schedule();
+	}
+
+error:
+	kfree(insns);
+	kfree(data);
+
+	if (ret < 0)
+		return ret;
+	return i;
+}
+
+static int check_insn_config_length(comedi_insn *insn, lsampl_t *data)
+{
+	if (insn->n < 1)
+		return -EINVAL;
+
+	switch (data[0]) {
+	case INSN_CONFIG_DIO_OUTPUT:
+	case INSN_CONFIG_DIO_INPUT:
+	case INSN_CONFIG_DISARM:
+	case INSN_CONFIG_RESET:
+		if (insn->n == 1)
+			return 0;
+		break;
+	case INSN_CONFIG_ARM:
+	case INSN_CONFIG_DIO_QUERY:
+	case INSN_CONFIG_BLOCK_SIZE:
+	case INSN_CONFIG_FILTER:
+	case INSN_CONFIG_SERIAL_CLOCK:
+	case INSN_CONFIG_BIDIRECTIONAL_DATA:
+	case INSN_CONFIG_ALT_SOURCE:
+	case INSN_CONFIG_SET_COUNTER_MODE:
+	case INSN_CONFIG_8254_READ_STATUS:
+	case INSN_CONFIG_SET_ROUTING:
+	case INSN_CONFIG_GET_ROUTING:
+	case INSN_CONFIG_GET_PWM_STATUS:
+	case INSN_CONFIG_PWM_SET_PERIOD:
+	case INSN_CONFIG_PWM_GET_PERIOD:
+		if (insn->n == 2)
+			return 0;
+		break;
+	case INSN_CONFIG_SET_GATE_SRC:
+	case INSN_CONFIG_GET_GATE_SRC:
+	case INSN_CONFIG_SET_CLOCK_SRC:
+	case INSN_CONFIG_GET_CLOCK_SRC:
+	case INSN_CONFIG_SET_OTHER_SRC:
+	case INSN_CONFIG_GET_COUNTER_STATUS:
+	case INSN_CONFIG_PWM_SET_H_BRIDGE:
+	case INSN_CONFIG_PWM_GET_H_BRIDGE:
+	case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
+		if (insn->n == 3)
+			return 0;
+		break;
+	case INSN_CONFIG_PWM_OUTPUT:
+	case INSN_CONFIG_ANALOG_TRIG:
+		if (insn->n == 5)
+			return 0;
+		break;
+	/* by default we allow the insn since we don't have checks for
+	 * all possible cases yet */
+	default:
+		rt_printk("comedi: no check for data length of config insn id "
+			  "%i is implemented.\n"
+			  " Add a check to %s in %s.\n"
+			  " Assuming n=%i is correct.\n", data[0], __func__,
+			  __FILE__, insn->n);
+		return 0;
+		break;
+	}
+	return -EINVAL;
+}
+
+static int parse_insn(comedi_device *dev, comedi_insn *insn, lsampl_t *data,
+		      void *file)
+{
+	comedi_subdevice *s;
+	int ret = 0;
+	int i;
+
+	if (insn->insn & INSN_MASK_SPECIAL) {
+		/* a non-subdevice instruction */
+
+		switch (insn->insn) {
+		case INSN_GTOD:
+			{
+				struct timeval tv;
+
+				if (insn->n != 2) {
+					ret = -EINVAL;
+					break;
+				}
+
+				do_gettimeofday(&tv);
+				data[0] = tv.tv_sec;
+				data[1] = tv.tv_usec;
+				ret = 2;
+
+				break;
+			}
+		case INSN_WAIT:
+			if (insn->n != 1 || data[0] >= 100000) {
+				ret = -EINVAL;
+				break;
+			}
+			udelay(data[0] / 1000);
+			ret = 1;
+			break;
+		case INSN_INTTRIG:
+			if (insn->n != 1) {
+				ret = -EINVAL;
+				break;
+			}
+			if (insn->subdev >= dev->n_subdevices) {
+				DPRINTK("%d not usable subdevice\n",
+					insn->subdev);
+				ret = -EINVAL;
+				break;
+			}
+			s = dev->subdevices + insn->subdev;
+			if (!s->async) {
+				DPRINTK("no async\n");
+				ret = -EINVAL;
+				break;
+			}
+			if (!s->async->inttrig) {
+				DPRINTK("no inttrig\n");
+				ret = -EAGAIN;
+				break;
+			}
+			ret = s->async->inttrig(dev, s, insn->data[0]);
+			if (ret >= 0)
+				ret = 1;
+			break;
+		default:
+			DPRINTK("invalid insn\n");
+			ret = -EINVAL;
+			break;
+		}
+	} else {
+		/* a subdevice instruction */
+		lsampl_t maxdata;
+
+		if (insn->subdev >= dev->n_subdevices) {
+			DPRINTK("subdevice %d out of range\n", insn->subdev);
+			ret = -EINVAL;
+			goto out;
+		}
+		s = dev->subdevices + insn->subdev;
+
+		if (s->type == COMEDI_SUBD_UNUSED) {
+			DPRINTK("%d not usable subdevice\n", insn->subdev);
+			ret = -EIO;
+			goto out;
+		}
+
+		/* are we locked? (ioctl lock) */
+		if (s->lock && s->lock != file) {
+			DPRINTK("device locked\n");
+			ret = -EACCES;
+			goto out;
+		}
+
+		ret = check_chanlist(s, 1, &insn->chanspec);
+		if (ret < 0) {
+			ret = -EINVAL;
+			DPRINTK("bad chanspec\n");
+			goto out;
+		}
+
+		if (s->busy) {
+			ret = -EBUSY;
+			goto out;
+		}
+		/* This looks arbitrary.  It is. */
+		s->busy = &parse_insn;
+		switch (insn->insn) {
+		case INSN_READ:
+			ret = s->insn_read(dev, s, insn, data);
+			break;
+		case INSN_WRITE:
+			maxdata = s->maxdata_list
+			    ? s->maxdata_list[CR_CHAN(insn->chanspec)]
+			    : s->maxdata;
+			for (i = 0; i < insn->n; ++i) {
+				if (data[i] > maxdata) {
+					ret = -EINVAL;
+					DPRINTK("bad data value(s)\n");
+					break;
+				}
+			}
+			if (ret == 0)
+				ret = s->insn_write(dev, s, insn, data);
+			break;
+		case INSN_BITS:
+			if (insn->n != 2) {
+				ret = -EINVAL;
+				break;
+			}
+			ret = s->insn_bits(dev, s, insn, data);
+			break;
+		case INSN_CONFIG:
+			ret = check_insn_config_length(insn, data);
+			if (ret)
+				break;
+			ret = s->insn_config(dev, s, insn, data);
+			break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+
+		s->busy = NULL;
+	}
+
+out:
+	return ret;
+}
+
+/*
+ * 	COMEDI_INSN
+ * 	synchronous instructions
+ *
+ * 	arg:
+ * 		pointer to insn
+ *
+ * 	reads:
+ * 		comedi_insn struct at arg
+ * 		data (for writes)
+ *
+ * 	writes:
+ * 		data (for reads)
+ */
+static int do_insn_ioctl(comedi_device *dev, void *arg, void *file)
+{
+	comedi_insn insn;
+	lsampl_t *data = NULL;
+	int ret = 0;
+
+	data = kmalloc(sizeof(lsampl_t) * MAX_SAMPLES, GFP_KERNEL);
+	if (!data) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	if (copy_from_user(&insn, arg, sizeof(comedi_insn))) {
+		ret = -EFAULT;
+		goto error;
+	}
+
+	/* This is where the behavior of insn and insnlist deviate. */
+	if (insn.n > MAX_SAMPLES)
+		insn.n = MAX_SAMPLES;
+	if (insn.insn & INSN_MASK_WRITE) {
+		if (copy_from_user(data, insn.data, insn.n * sizeof(lsampl_t))) {
+			ret = -EFAULT;
+			goto error;
+		}
+	}
+	ret = parse_insn(dev, &insn, data, file);
+	if (ret < 0)
+		goto error;
+	if (insn.insn & INSN_MASK_READ) {
+		if (copy_to_user(insn.data, data, insn.n * sizeof(lsampl_t))) {
+			ret = -EFAULT;
+			goto error;
+		}
+	}
+	ret = insn.n;
+
+error:
+	kfree(data);
+
+	return ret;
+}
+
+/*
+	COMEDI_CMD
+	command ioctl
+
+	arg:
+		pointer to cmd structure
+
+	reads:
+		cmd structure at arg
+		channel/range list
+
+	writes:
+		modified cmd structure at arg
+
+*/
+static int do_cmd_ioctl(comedi_device *dev, void *arg, void *file)
+{
+	comedi_cmd user_cmd;
+	comedi_subdevice *s;
+	comedi_async *async;
+	int ret = 0;
+	unsigned int *chanlist_saver = NULL;
+
+	if (copy_from_user(&user_cmd, arg, sizeof(comedi_cmd))) {
+		DPRINTK("bad cmd address\n");
+		return -EFAULT;
+	}
+	/* save user's chanlist pointer so it can be restored later */
+	chanlist_saver = user_cmd.chanlist;
+
+	if (user_cmd.subdev >= dev->n_subdevices) {
+		DPRINTK("%d no such subdevice\n", user_cmd.subdev);
+		return -ENODEV;
+	}
+
+	s = dev->subdevices + user_cmd.subdev;
+	async = s->async;
+
+	if (s->type == COMEDI_SUBD_UNUSED) {
+		DPRINTK("%d not valid subdevice\n", user_cmd.subdev);
+		return -EIO;
+	}
+
+	if (!s->do_cmd || !s->do_cmdtest || !s->async) {
+		DPRINTK("subdevice %i does not support commands\n",
+			user_cmd.subdev);
+		return -EIO;
+	}
+
+	/* are we locked? (ioctl lock) */
+	if (s->lock && s->lock != file) {
+		DPRINTK("subdevice locked\n");
+		return -EACCES;
+	}
+
+	/* are we busy? */
+	if (s->busy) {
+		DPRINTK("subdevice busy\n");
+		return -EBUSY;
+	}
+	s->busy = file;
+
+	/* make sure channel/gain list isn't too long */
+	if (user_cmd.chanlist_len > s->len_chanlist) {
+		DPRINTK("channel/gain list too long %u > %d\n",
+			user_cmd.chanlist_len, s->len_chanlist);
+		ret = -EINVAL;
+		goto cleanup;
+	}
+
+	/* make sure channel/gain list isn't too short */
+	if (user_cmd.chanlist_len < 1) {
+		DPRINTK("channel/gain list too short %u < 1\n",
+			user_cmd.chanlist_len);
+		ret = -EINVAL;
+		goto cleanup;
+	}
+
+	kfree(async->cmd.chanlist);
+	async->cmd = user_cmd;
+	async->cmd.data = NULL;
+	/* load channel/gain list */
+	async->cmd.chanlist =
+	    kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
+	if (!async->cmd.chanlist) {
+		DPRINTK("allocation failed\n");
+		ret = -ENOMEM;
+		goto cleanup;
+	}
+
+	if (copy_from_user(async->cmd.chanlist, user_cmd.chanlist,
+			   async->cmd.chanlist_len * sizeof(int))) {
+		DPRINTK("fault reading chanlist\n");
+		ret = -EFAULT;
+		goto cleanup;
+	}
+
+	/* make sure each element in channel/gain list is valid */
+	ret = check_chanlist(s, async->cmd.chanlist_len, async->cmd.chanlist);
+	if (ret < 0) {
+		DPRINTK("bad chanlist\n");
+		goto cleanup;
+	}
+
+	ret = s->do_cmdtest(dev, s, &async->cmd);
+
+	if (async->cmd.flags & TRIG_BOGUS || ret) {
+		DPRINTK("test returned %d\n", ret);
+		user_cmd = async->cmd;
+		/* restore chanlist pointer before copying back */
+		user_cmd.chanlist = chanlist_saver;
+		user_cmd.data = NULL;
+		if (copy_to_user(arg, &user_cmd, sizeof(comedi_cmd))) {
+			DPRINTK("fault writing cmd\n");
+			ret = -EFAULT;
+			goto cleanup;
+		}
+		ret = -EAGAIN;
+		goto cleanup;
+	}
+
+	if (!async->prealloc_bufsz) {
+		ret = -ENOMEM;
+		DPRINTK("no buffer (?)\n");
+		goto cleanup;
+	}
+
+	comedi_reset_async_buf(async);
+
+	async->cb_mask =
+	    COMEDI_CB_EOA | COMEDI_CB_BLOCK | COMEDI_CB_ERROR |
+	    COMEDI_CB_OVERFLOW;
+	if (async->cmd.flags & TRIG_WAKE_EOS)
+		async->cb_mask |= COMEDI_CB_EOS;
+
+	comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
+
+#ifdef CONFIG_COMEDI_RT
+	if (async->cmd.flags & TRIG_RT) {
+		if (comedi_switch_to_rt(dev) == 0)
+			comedi_set_subdevice_runflags(s, SRF_RT, SRF_RT);
+	}
+#endif
+
+	ret = s->do_cmd(dev, s);
+	if (ret == 0)
+		return 0;
+
+cleanup:
+	do_become_nonbusy(dev, s);
+
+	return ret;
+}
+
+/*
+	COMEDI_CMDTEST
+	command testing ioctl
+
+	arg:
+		pointer to cmd structure
+
+	reads:
+		cmd structure at arg
+		channel/range list
+
+	writes:
+		modified cmd structure at arg
+
+*/
+static int do_cmdtest_ioctl(comedi_device *dev, void *arg, void *file)
+{
+	comedi_cmd user_cmd;
+	comedi_subdevice *s;
+	int ret = 0;
+	unsigned int *chanlist = NULL;
+	unsigned int *chanlist_saver = NULL;
+
+	if (copy_from_user(&user_cmd, arg, sizeof(comedi_cmd))) {
+		DPRINTK("bad cmd address\n");
+		return -EFAULT;
+	}
+	/* save user's chanlist pointer so it can be restored later */
+	chanlist_saver = user_cmd.chanlist;
+
+	if (user_cmd.subdev >= dev->n_subdevices) {
+		DPRINTK("%d no such subdevice\n", user_cmd.subdev);
+		return -ENODEV;
+	}
+
+	s = dev->subdevices + user_cmd.subdev;
+	if (s->type == COMEDI_SUBD_UNUSED) {
+		DPRINTK("%d not valid subdevice\n", user_cmd.subdev);
+		return -EIO;
+	}
+
+	if (!s->do_cmd || !s->do_cmdtest) {
+		DPRINTK("subdevice %i does not support commands\n",
+			user_cmd.subdev);
+		return -EIO;
+	}
+
+	/* make sure channel/gain list isn't too long */
+	if (user_cmd.chanlist_len > s->len_chanlist) {
+		DPRINTK("channel/gain list too long %d > %d\n",
+			user_cmd.chanlist_len, s->len_chanlist);
+		ret = -EINVAL;
+		goto cleanup;
+	}
+
+	/* load channel/gain list */
+	if (user_cmd.chanlist) {
+		chanlist =
+		    kmalloc(user_cmd.chanlist_len * sizeof(int), GFP_KERNEL);
+		if (!chanlist) {
+			DPRINTK("allocation failed\n");
+			ret = -ENOMEM;
+			goto cleanup;
+		}
+
+		if (copy_from_user(chanlist, user_cmd.chanlist,
+				   user_cmd.chanlist_len * sizeof(int))) {
+			DPRINTK("fault reading chanlist\n");
+			ret = -EFAULT;
+			goto cleanup;
+		}
+
+		/* make sure each element in channel/gain list is valid */
+		ret = check_chanlist(s, user_cmd.chanlist_len, chanlist);
+		if (ret < 0) {
+			DPRINTK("bad chanlist\n");
+			goto cleanup;
+		}
+
+		user_cmd.chanlist = chanlist;
+	}
+
+	ret = s->do_cmdtest(dev, s, &user_cmd);
+
+	/* restore chanlist pointer before copying back */
+	user_cmd.chanlist = chanlist_saver;
+
+	if (copy_to_user(arg, &user_cmd, sizeof(comedi_cmd))) {
+		DPRINTK("bad cmd address\n");
+		ret = -EFAULT;
+		goto cleanup;
+	}
+cleanup:
+	kfree(chanlist);
+
+	return ret;
+}
+
+/*
+	COMEDI_LOCK
+	lock subdevice
+
+	arg:
+		subdevice number
+
+	reads:
+		none
+
+	writes:
+		none
+
+*/
+
+static int do_lock_ioctl(comedi_device *dev, unsigned int arg, void *file)
+{
+	int ret = 0;
+	unsigned long flags;
+	comedi_subdevice *s;
+
+	if (arg >= dev->n_subdevices)
+		return -EINVAL;
+	s = dev->subdevices + arg;
+
+	comedi_spin_lock_irqsave(&s->spin_lock, flags);
+	if (s->busy || s->lock)
+		ret = -EBUSY;
+	else
+		s->lock = file;
+	comedi_spin_unlock_irqrestore(&s->spin_lock, flags);
+
+	if (ret < 0)
+		return ret;
+
+#if 0
+	if (s->lock_f)
+		ret = s->lock_f(dev, s);
+#endif
+
+	return ret;
+}
+
+/*
+	COMEDI_UNLOCK
+	unlock subdevice
+
+	arg:
+		subdevice number
+
+	reads:
+		none
+
+	writes:
+		none
+
+	This function isn't protected by the semaphore, since
+	we already own the lock.
+*/
+static int do_unlock_ioctl(comedi_device *dev, unsigned int arg, void *file)
+{
+	comedi_subdevice *s;
+
+	if (arg >= dev->n_subdevices)
+		return -EINVAL;
+	s = dev->subdevices + arg;
+
+	if (s->busy)
+		return -EBUSY;
+
+	if (s->lock && s->lock != file)
+		return -EACCES;
+
+	if (s->lock == file) {
+#if 0
+		if (s->unlock)
+			s->unlock(dev, s);
+#endif
+
+		s->lock = NULL;
+	}
+
+	return 0;
+}
+
+/*
+	COMEDI_CANCEL
+	cancel acquisition ioctl
+
+	arg:
+		subdevice number
+
+	reads:
+		nothing
+
+	writes:
+		nothing
+
+*/
+static int do_cancel_ioctl(comedi_device *dev, unsigned int arg, void *file)
+{
+	comedi_subdevice *s;
+
+	if (arg >= dev->n_subdevices)
+		return -EINVAL;
+	s = dev->subdevices + arg;
+	if (s->async == NULL)
+		return -EINVAL;
+
+	if (s->lock && s->lock != file)
+		return -EACCES;
+
+	if (!s->busy)
+		return 0;
+
+	if (s->busy != file)
+		return -EBUSY;
+
+	return do_cancel(dev, s);
+}
+
+/*
+	COMEDI_POLL ioctl
+	instructs driver to synchronize buffers
+
+	arg:
+		subdevice number
+
+	reads:
+		nothing
+
+	writes:
+		nothing
+
+*/
+static int do_poll_ioctl(comedi_device *dev, unsigned int arg, void *file)
+{
+	comedi_subdevice *s;
+
+	if (arg >= dev->n_subdevices)
+		return -EINVAL;
+	s = dev->subdevices + arg;
+
+	if (s->lock && s->lock != file)
+		return -EACCES;
+
+	if (!s->busy)
+		return 0;
+
+	if (s->busy != file)
+		return -EBUSY;
+
+	if (s->poll)
+		return s->poll(dev, s);
+
+	return -EINVAL;
+}
+
+static int do_cancel(comedi_device *dev, comedi_subdevice *s)
+{
+	int ret = 0;
+
+	if ((comedi_get_subdevice_runflags(s) & SRF_RUNNING) && s->cancel)
+		ret = s->cancel(dev, s);
+
+	do_become_nonbusy(dev, s);
+
+	return ret;
+}
+
+void comedi_unmap(struct vm_area_struct *area)
+{
+	comedi_async *async;
+	comedi_device *dev;
+
+	async = area->vm_private_data;
+	dev = async->subdevice->device;
+
+	mutex_lock(&dev->mutex);
+	async->mmap_count--;
+	mutex_unlock(&dev->mutex);
+}
+
+static struct vm_operations_struct comedi_vm_ops = {
+	.close =	comedi_unmap,
+};
+
+static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	const unsigned minor = iminor(file->f_dentry->d_inode);
+	struct comedi_device_file_info *dev_file_info =
+	    comedi_get_device_file_info(minor);
+	comedi_device *dev = dev_file_info->device;
+	comedi_async *async = NULL;
+	unsigned long start = vma->vm_start;
+	unsigned long size;
+	int n_pages;
+	int i;
+	int retval;
+	comedi_subdevice *s;
+
+	mutex_lock(&dev->mutex);
+	if (!dev->attached) {
+		DPRINTK("no driver configured on comedi%i\n", dev->minor);
+		retval = -ENODEV;
+		goto done;
+	}
+	if (vma->vm_flags & VM_WRITE)
+		s = comedi_get_write_subdevice(dev_file_info);
+	else
+		s = comedi_get_read_subdevice(dev_file_info);
+
+	if (s == NULL) {
+		retval = -EINVAL;
+		goto done;
+	}
+	async = s->async;
+	if (async == NULL) {
+		retval = -EINVAL;
+		goto done;
+	}
+
+	if (vma->vm_pgoff != 0) {
+		DPRINTK("comedi: mmap() offset must be 0.\n");
+		retval = -EINVAL;
+		goto done;
+	}
+
+	size = vma->vm_end - vma->vm_start;
+	if (size > async->prealloc_bufsz) {
+		retval = -EFAULT;
+		goto done;
+	}
+	if (size & (~PAGE_MASK)) {
+		retval = -EFAULT;
+		goto done;
+	}
+
+	n_pages = size >> PAGE_SHIFT;
+	for (i = 0; i < n_pages; ++i) {
+		if (remap_pfn_range(vma, start,
+				    page_to_pfn(virt_to_page(async->
+							     buf_page_list[i].
+							     virt_addr)),
+				    PAGE_SIZE, PAGE_SHARED)) {
+			retval = -EAGAIN;
+			goto done;
+		}
+		start += PAGE_SIZE;
+	}
+
+	vma->vm_ops = &comedi_vm_ops;
+	vma->vm_private_data = async;
+
+	async->mmap_count++;
+
+	retval = 0;
+done:
+	mutex_unlock(&dev->mutex);
+	return retval;
+}
+
+static unsigned int comedi_poll(struct file *file, poll_table *wait)
+{
+	unsigned int mask = 0;
+	const unsigned minor = iminor(file->f_dentry->d_inode);
+	struct comedi_device_file_info *dev_file_info =
+	    comedi_get_device_file_info(minor);
+	comedi_device *dev = dev_file_info->device;
+	comedi_subdevice *read_subdev;
+	comedi_subdevice *write_subdev;
+
+	mutex_lock(&dev->mutex);
+	if (!dev->attached) {
+		DPRINTK("no driver configured on comedi%i\n", dev->minor);
+		mutex_unlock(&dev->mutex);
+		return 0;
+	}
+
+	mask = 0;
+	read_subdev = comedi_get_read_subdevice(dev_file_info);
+	if (read_subdev) {
+		poll_wait(file, &read_subdev->async->wait_head, wait);
+		if (!read_subdev->busy
+		    || comedi_buf_read_n_available(read_subdev->async) > 0
+		    || !(comedi_get_subdevice_runflags(read_subdev) &
+			 SRF_RUNNING)) {
+			mask |= POLLIN | POLLRDNORM;
+		}
+	}
+	write_subdev = comedi_get_write_subdevice(dev_file_info);
+	if (write_subdev) {
+		poll_wait(file, &write_subdev->async->wait_head, wait);
+		comedi_buf_write_alloc(write_subdev->async,
+				       write_subdev->async->prealloc_bufsz);
+		if (!write_subdev->busy
+		    || !(comedi_get_subdevice_runflags(write_subdev) &
+			 SRF_RUNNING)
+		    || comedi_buf_write_n_allocated(write_subdev->async) >=
+		    bytes_per_sample(write_subdev->async->subdevice)) {
+			mask |= POLLOUT | POLLWRNORM;
+		}
+	}
+
+	mutex_unlock(&dev->mutex);
+	return mask;
+}
+
+static ssize_t comedi_write(struct file *file, const char *buf, size_t nbytes,
+			    loff_t *offset)
+{
+	comedi_subdevice *s;
+	comedi_async *async;
+	int n, m, count = 0, retval = 0;
+	DECLARE_WAITQUEUE(wait, current);
+	const unsigned minor = iminor(file->f_dentry->d_inode);
+	struct comedi_device_file_info *dev_file_info =
+	    comedi_get_device_file_info(minor);
+	comedi_device *dev = dev_file_info->device;
+
+	if (!dev->attached) {
+		DPRINTK("no driver configured on comedi%i\n", dev->minor);
+		retval = -ENODEV;
+		goto done;
+	}
+
+	s = comedi_get_write_subdevice(dev_file_info);
+	if (s == NULL) {
+		retval = -EIO;
+		goto done;
+	}
+	async = s->async;
+
+	if (!nbytes) {
+		retval = 0;
+		goto done;
+	}
+	if (!s->busy) {
+		retval = 0;
+		goto done;
+	}
+	if (s->busy != file) {
+		retval = -EACCES;
+		goto done;
+	}
+	add_wait_queue(&async->wait_head, &wait);
+	while (nbytes > 0 && !retval) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		n = nbytes;
+
+		m = n;
+		if (async->buf_write_ptr + m > async->prealloc_bufsz)
+			m = async->prealloc_bufsz - async->buf_write_ptr;
+		comedi_buf_write_alloc(async, async->prealloc_bufsz);
+		if (m > comedi_buf_write_n_allocated(async))
+			m = comedi_buf_write_n_allocated(async);
+		if (m < n)
+			n = m;
+
+		if (n == 0) {
+			if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
+				if (comedi_get_subdevice_runflags(s) &
+				    SRF_ERROR) {
+					retval = -EPIPE;
+				} else {
+					retval = 0;
+				}
+				do_become_nonbusy(dev, s);
+				break;
+			}
+			if (file->f_flags & O_NONBLOCK) {
+				retval = -EAGAIN;
+				break;
+			}
+			if (signal_pending(current)) {
+				retval = -ERESTARTSYS;
+				break;
+			}
+			schedule();
+			if (!s->busy)
+				break;
+			if (s->busy != file) {
+				retval = -EACCES;
+				break;
+			}
+			continue;
+		}
+
+		m = copy_from_user(async->prealloc_buf + async->buf_write_ptr,
+				   buf, n);
+		if (m) {
+			n -= m;
+			retval = -EFAULT;
+		}
+		comedi_buf_write_free(async, n);
+
+		count += n;
+		nbytes -= n;
+
+		buf += n;
+		break;		/* makes device work like a pipe */
+	}
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&async->wait_head, &wait);
+
+done:
+	return count ? count : retval;
+}
+
+static ssize_t comedi_read(struct file *file, char *buf, size_t nbytes,
+			   loff_t *offset)
+{
+	comedi_subdevice *s;
+	comedi_async *async;
+	int n, m, count = 0, retval = 0;
+	DECLARE_WAITQUEUE(wait, current);
+	const unsigned minor = iminor(file->f_dentry->d_inode);
+	struct comedi_device_file_info *dev_file_info =
+	    comedi_get_device_file_info(minor);
+	comedi_device *dev = dev_file_info->device;
+
+	if (!dev->attached) {
+		DPRINTK("no driver configured on comedi%i\n", dev->minor);
+		retval = -ENODEV;
+		goto done;
+	}
+
+	s = comedi_get_read_subdevice(dev_file_info);
+	if (s == NULL) {
+		retval = -EIO;
+		goto done;
+	}
+	async = s->async;
+	if (!nbytes) {
+		retval = 0;
+		goto done;
+	}
+	if (!s->busy) {
+		retval = 0;
+		goto done;
+	}
+	if (s->busy != file) {
+		retval = -EACCES;
+		goto done;
+	}
+
+	add_wait_queue(&async->wait_head, &wait);
+	while (nbytes > 0 && !retval) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		n = nbytes;
+
+		m = comedi_buf_read_n_available(async);
+		/* printk("%d available\n",m); */
+		if (async->buf_read_ptr + m > async->prealloc_bufsz)
+			m = async->prealloc_bufsz - async->buf_read_ptr;
+		/* printk("%d contiguous\n",m); */
+		if (m < n)
+			n = m;
+
+		if (n == 0) {
+			if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
+				do_become_nonbusy(dev, s);
+				if (comedi_get_subdevice_runflags(s) &
+				    SRF_ERROR) {
+					retval = -EPIPE;
+				} else {
+					retval = 0;
+				}
+				break;
+			}
+			if (file->f_flags & O_NONBLOCK) {
+				retval = -EAGAIN;
+				break;
+			}
+			if (signal_pending(current)) {
+				retval = -ERESTARTSYS;
+				break;
+			}
+			schedule();
+			if (!s->busy) {
+				retval = 0;
+				break;
+			}
+			if (s->busy != file) {
+				retval = -EACCES;
+				break;
+			}
+			continue;
+		}
+		m = copy_to_user(buf, async->prealloc_buf +
+				 async->buf_read_ptr, n);
+		if (m) {
+			n -= m;
+			retval = -EFAULT;
+		}
+
+		comedi_buf_read_alloc(async, n);
+		comedi_buf_read_free(async, n);
+
+		count += n;
+		nbytes -= n;
+
+		buf += n;
+		break;		/* makes device work like a pipe */
+	}
+	if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING)) &&
+	    async->buf_read_count - async->buf_write_count == 0) {
+		do_become_nonbusy(dev, s);
+	}
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&async->wait_head, &wait);
+
+done:
+	return count ? count : retval;
+}
+
+/*
+   This function restores a subdevice to an idle state.
+ */
+void do_become_nonbusy(comedi_device *dev, comedi_subdevice *s)
+{
+	comedi_async *async = s->async;
+
+	comedi_set_subdevice_runflags(s, SRF_RUNNING, 0);
+#ifdef CONFIG_COMEDI_RT
+	if (comedi_get_subdevice_runflags(s) & SRF_RT) {
+		comedi_switch_to_non_rt(dev);
+		comedi_set_subdevice_runflags(s, SRF_RT, 0);
+	}
+#endif
+	if (async) {
+		comedi_reset_async_buf(async);
+		async->inttrig = NULL;
+	} else {
+		printk(KERN_ERR
+		       "BUG: (?) do_become_nonbusy called with async=0\n");
+	}
+
+	s->busy = NULL;
+}
+
+static int comedi_open(struct inode *inode, struct file *file)
+{
+	char mod[32];
+	const unsigned minor = iminor(inode);
+	struct comedi_device_file_info *dev_file_info =
+	    comedi_get_device_file_info(minor);
+	comedi_device *dev = dev_file_info->device;
+	if (dev == NULL) {
+		DPRINTK("invalid minor number\n");
+		return -ENODEV;
+	}
+
+	/* This is slightly hacky, but we want module autoloading
+	 * to work for root.
+	 * case: user opens device, attached -> ok
+	 * case: user opens device, unattached, in_request_module=0 -> autoload
+	 * case: user opens device, unattached, in_request_module=1 -> fail
+	 * case: root opens device, attached -> ok
+	 * case: root opens device, unattached, in_request_module=1 -> ok
+	 *   (typically called from modprobe)
+	 * case: root opens device, unattached, in_request_module=0 -> autoload
+	 *
+	 * The last could be changed to "-> ok", which would deny root
+	 * autoloading.
+	 */
+	mutex_lock(&dev->mutex);
+	if (dev->attached)
+		goto ok;
+	if (!capable(CAP_SYS_MODULE) && dev->in_request_module) {
+		DPRINTK("in request module\n");
+		mutex_unlock(&dev->mutex);
+		return -ENODEV;
+	}
+	if (capable(CAP_SYS_MODULE) && dev->in_request_module)
+		goto ok;
+
+	dev->in_request_module = 1;
+
+	sprintf(mod, "char-major-%i-%i", COMEDI_MAJOR, dev->minor);
+#ifdef CONFIG_KMOD
+	mutex_unlock(&dev->mutex);
+	request_module(mod);
+	mutex_lock(&dev->mutex);
+#endif
+
+	dev->in_request_module = 0;
+
+	if (!dev->attached && !capable(CAP_SYS_MODULE)) {
+		DPRINTK("not attached and not CAP_SYS_MODULE\n");
+		mutex_unlock(&dev->mutex);
+		return -ENODEV;
+	}
+ok:
+	__module_get(THIS_MODULE);
+
+	if (dev->attached) {
+		if (!try_module_get(dev->driver->module)) {
+			module_put(THIS_MODULE);
+			mutex_unlock(&dev->mutex);
+			return -ENOSYS;
+		}
+	}
+
+	if (dev->attached && dev->use_count == 0 && dev->open)
+		dev->open(dev);
+
+	dev->use_count++;
+
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+
+static int comedi_close(struct inode *inode, struct file *file)
+{
+	const unsigned minor = iminor(inode);
+	struct comedi_device_file_info *dev_file_info =
+	    comedi_get_device_file_info(minor);
+	comedi_device *dev = dev_file_info->device;
+	comedi_subdevice *s = NULL;
+	int i;
+
+	mutex_lock(&dev->mutex);
+
+	if (dev->subdevices) {
+		for (i = 0; i < dev->n_subdevices; i++) {
+			s = dev->subdevices + i;
+
+			if (s->busy == file)
+				do_cancel(dev, s);
+			if (s->lock == file)
+				s->lock = NULL;
+		}
+	}
+	if (dev->attached && dev->use_count == 1 && dev->close)
+		dev->close(dev);
+
+	module_put(THIS_MODULE);
+	if (dev->attached)
+		module_put(dev->driver->module);
+
+	dev->use_count--;
+
+	mutex_unlock(&dev->mutex);
+
+	if (file->f_flags & FASYNC)
+		comedi_fasync(-1, file, 0);
+
+	return 0;
+}
+
+static int comedi_fasync(int fd, struct file *file, int on)
+{
+	const unsigned minor = iminor(file->f_dentry->d_inode);
+	struct comedi_device_file_info *dev_file_info =
+	    comedi_get_device_file_info(minor);
+
+	comedi_device *dev = dev_file_info->device;
+
+	return fasync_helper(fd, file, on, &dev->async_queue);
+}
+
+const struct file_operations comedi_fops = {
+      .owner =		THIS_MODULE,
+#ifdef HAVE_UNLOCKED_IOCTL
+      .unlocked_ioctl =	comedi_unlocked_ioctl,
+#else
+      .ioctl =		comedi_ioctl,
+#endif
+#ifdef HAVE_COMPAT_IOCTL
+      .compat_ioctl =	comedi_compat_ioctl,
+#endif
+      .open =		comedi_open,
+      .release =	comedi_close,
+      .read =		comedi_read,
+      .write =		comedi_write,
+      .mmap =		comedi_mmap,
+      .poll =		comedi_poll,
+      .fasync =		comedi_fasync,
+};
+
+struct class *comedi_class;
+static struct cdev comedi_cdev;
+
+static void comedi_cleanup_legacy_minors(void)
+{
+	unsigned i;
+
+	for (i = 0; i < COMEDI_NUM_LEGACY_MINORS; i++)
+		comedi_free_board_minor(i);
+}
+
+static int __init comedi_init(void)
+{
+	int i;
+	int retval;
+
+	printk(KERN_INFO "comedi: version " COMEDI_RELEASE
+	       " - http://www.comedi.org\n");
+
+	memset(comedi_file_info_table, 0,
+	       sizeof(struct comedi_device_file_info *) * COMEDI_NUM_MINORS);
+
+	retval = register_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+					COMEDI_NUM_MINORS, "comedi");
+	if (retval)
+		return -EIO;
+	cdev_init(&comedi_cdev, &comedi_fops);
+	comedi_cdev.owner = THIS_MODULE;
+	kobject_set_name(&comedi_cdev.kobj, "comedi");
+	if (cdev_add(&comedi_cdev, MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS)) {
+		unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+					 COMEDI_NUM_MINORS);
+		return -EIO;
+	}
+	comedi_class = class_create(THIS_MODULE, "comedi");
+	if (IS_ERR(comedi_class)) {
+		printk("comedi: failed to create class");
+		cdev_del(&comedi_cdev);
+		unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+					 COMEDI_NUM_MINORS);
+		return PTR_ERR(comedi_class);
+	}
+
+	/* XXX requires /proc interface */
+	comedi_proc_init();
+
+	/* create devices files for legacy/manual use */
+	for (i = 0; i < COMEDI_NUM_LEGACY_MINORS; i++) {
+		int minor;
+		minor = comedi_alloc_board_minor(NULL);
+		if (minor < 0) {
+			comedi_cleanup_legacy_minors();
+			cdev_del(&comedi_cdev);
+			unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+						 COMEDI_NUM_MINORS);
+			return minor;
+		}
+	}
+
+	comedi_rt_init();
+
+	comedi_register_ioctl32();
+
+	return 0;
+}
+
+static void __exit comedi_cleanup(void)
+{
+	int i;
+
+	comedi_cleanup_legacy_minors();
+	for (i = 0; i < COMEDI_NUM_MINORS; ++i)
+		BUG_ON(comedi_file_info_table[i]);
+
+
+	class_destroy(comedi_class);
+	cdev_del(&comedi_cdev);
+	unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS);
+
+	comedi_proc_cleanup();
+
+	comedi_rt_cleanup();
+
+	comedi_unregister_ioctl32();
+}
+
+module_init(comedi_init);
+module_exit(comedi_cleanup);
+
+void comedi_error(const comedi_device *dev, const char *s)
+{
+	rt_printk("comedi%d: %s: %s\n", dev->minor, dev->driver->driver_name,
+		  s);
+}
+
+void comedi_event(comedi_device *dev, comedi_subdevice *s)
+{
+	comedi_async *async = s->async;
+	unsigned runflags = 0;
+	unsigned runflags_mask = 0;
+
+	/* DPRINTK("comedi_event 0x%x\n",mask); */
+
+	if ((comedi_get_subdevice_runflags(s) & SRF_RUNNING) == 0)
+		return;
+
+	if (s->async->
+	    events & (COMEDI_CB_EOA | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)) {
+		runflags_mask |= SRF_RUNNING;
+	}
+	/* remember if an error event has occured, so an error
+	 * can be returned the next time the user does a read() */
+	if (s->async->events & (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)) {
+		runflags_mask |= SRF_ERROR;
+		runflags |= SRF_ERROR;
+	}
+	if (runflags_mask) {
+		/*sets SRF_ERROR and SRF_RUNNING together atomically */
+		comedi_set_subdevice_runflags(s, runflags_mask, runflags);
+	}
+
+	if (async->cb_mask & s->async->events) {
+		if (comedi_get_subdevice_runflags(s) & SRF_USER) {
+
+			if (dev->rt) {
+#ifdef CONFIG_COMEDI_RT
+				/* pend wake up */
+				comedi_rt_pend_wakeup(&async->wait_head);
+#else
+				printk
+				    ("BUG: comedi_event() code unreachable\n");
+#endif
+			} else {
+				wake_up_interruptible(&async->wait_head);
+				if (s->subdev_flags & SDF_CMD_READ) {
+					kill_fasync(&dev->async_queue, SIGIO,
+						    POLL_IN);
+				}
+				if (s->subdev_flags & SDF_CMD_WRITE) {
+					kill_fasync(&dev->async_queue, SIGIO,
+						    POLL_OUT);
+				}
+			}
+		} else {
+			if (async->cb_func)
+				async->cb_func(s->async->events, async->cb_arg);
+			/* XXX bug here.  If subdevice A is rt, and
+			 * subdevice B tries to callback to a normal
+			 * linux kernel function, it will be at the
+			 * wrong priority.  Since this isn't very
+			 * common, I'm not going to worry about it. */
+		}
+	}
+	s->async->events = 0;
+}
+
+void comedi_set_subdevice_runflags(comedi_subdevice *s, unsigned mask,
+				   unsigned bits)
+{
+	unsigned long flags;
+
+	comedi_spin_lock_irqsave(&s->spin_lock, flags);
+	s->runflags &= ~mask;
+	s->runflags |= (bits & mask);
+	comedi_spin_unlock_irqrestore(&s->spin_lock, flags);
+}
+
+unsigned comedi_get_subdevice_runflags(comedi_subdevice *s)
+{
+	unsigned long flags;
+	unsigned runflags;
+
+	comedi_spin_lock_irqsave(&s->spin_lock, flags);
+	runflags = s->runflags;
+	comedi_spin_unlock_irqrestore(&s->spin_lock, flags);
+	return runflags;
+}
+
+static int is_device_busy(comedi_device *dev)
+{
+	comedi_subdevice *s;
+	int i;
+
+	if (!dev->attached)
+		return 0;
+
+	for (i = 0; i < dev->n_subdevices; i++) {
+		s = dev->subdevices + i;
+		if (s->busy)
+			return 1;
+		if (s->async && s->async->mmap_count)
+			return 1;
+	}
+
+	return 0;
+}
+
+void comedi_device_init(comedi_device *dev)
+{
+	memset(dev, 0, sizeof(comedi_device));
+	spin_lock_init(&dev->spinlock);
+	mutex_init(&dev->mutex);
+	dev->minor = -1;
+}
+
+void comedi_device_cleanup(comedi_device *dev)
+{
+	if (dev == NULL)
+		return;
+	mutex_lock(&dev->mutex);
+	comedi_device_detach(dev);
+	mutex_unlock(&dev->mutex);
+	mutex_destroy(&dev->mutex);
+}
+
+int comedi_alloc_board_minor(struct device *hardware_device)
+{
+	unsigned long flags;
+	struct comedi_device_file_info *info;
+	device_create_result_type *csdev;
+	unsigned i;
+
+	info = kzalloc(sizeof(struct comedi_device_file_info), GFP_KERNEL);
+	if (info == NULL)
+		return -ENOMEM;
+	info->device = kzalloc(sizeof(comedi_device), GFP_KERNEL);
+	if (info->device == NULL) {
+		kfree(info);
+		return -ENOMEM;
+	}
+	comedi_device_init(info->device);
+	comedi_spin_lock_irqsave(&comedi_file_info_table_lock, flags);
+	for (i = 0; i < COMEDI_NUM_BOARD_MINORS; ++i) {
+		if (comedi_file_info_table[i] == NULL) {
+			comedi_file_info_table[i] = info;
+			break;
+		}
+	}
+	comedi_spin_unlock_irqrestore(&comedi_file_info_table_lock, flags);
+	if (i == COMEDI_NUM_BOARD_MINORS) {
+		comedi_device_cleanup(info->device);
+		kfree(info->device);
+		kfree(info);
+		rt_printk
+		    ("comedi: error: ran out of minor numbers for board device files.\n");
+		return -EBUSY;
+	}
+	info->device->minor = i;
+	csdev = COMEDI_DEVICE_CREATE(comedi_class, NULL,
+				     MKDEV(COMEDI_MAJOR, i), NULL,
+				     hardware_device, "comedi%i", i);
+	if (!IS_ERR(csdev))
+		info->device->class_dev = csdev;
+
+	return i;
+}
+
+void comedi_free_board_minor(unsigned minor)
+{
+	unsigned long flags;
+	struct comedi_device_file_info *info;
+
+	BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
+	comedi_spin_lock_irqsave(&comedi_file_info_table_lock, flags);
+	info = comedi_file_info_table[minor];
+	comedi_file_info_table[minor] = NULL;
+	comedi_spin_unlock_irqrestore(&comedi_file_info_table_lock, flags);
+
+	if (info) {
+		comedi_device *dev = info->device;
+		if (dev) {
+			if (dev->class_dev) {
+				device_destroy(comedi_class,
+					       MKDEV(COMEDI_MAJOR, dev->minor));
+			}
+			comedi_device_cleanup(dev);
+			kfree(dev);
+		}
+		kfree(info);
+	}
+}
+
+int comedi_alloc_subdevice_minor(comedi_device *dev, comedi_subdevice *s)
+{
+	unsigned long flags;
+	struct comedi_device_file_info *info;
+	device_create_result_type *csdev;
+	unsigned i;
+
+	info = kmalloc(sizeof(struct comedi_device_file_info), GFP_KERNEL);
+	if (info == NULL)
+		return -ENOMEM;
+	info->device = dev;
+	info->read_subdevice = s;
+	info->write_subdevice = s;
+	comedi_spin_lock_irqsave(&comedi_file_info_table_lock, flags);
+	for (i = COMEDI_FIRST_SUBDEVICE_MINOR; i < COMEDI_NUM_BOARD_MINORS; ++i) {
+		if (comedi_file_info_table[i] == NULL) {
+			comedi_file_info_table[i] = info;
+			break;
+		}
+	}
+	comedi_spin_unlock_irqrestore(&comedi_file_info_table_lock, flags);
+	if (i == COMEDI_NUM_MINORS) {
+		kfree(info);
+		rt_printk
+		    ("comedi: error: ran out of minor numbers for board device files.\n");
+		return -EBUSY;
+	}
+	s->minor = i;
+	csdev = COMEDI_DEVICE_CREATE(comedi_class, dev->class_dev,
+				     MKDEV(COMEDI_MAJOR, i), NULL, NULL,
+				     "comedi%i_subd%i", dev->minor,
+				     (int)(s - dev->subdevices));
+	if (!IS_ERR(csdev))
+		s->class_dev = csdev;
+
+	return i;
+}
+
+void comedi_free_subdevice_minor(comedi_subdevice *s)
+{
+	unsigned long flags;
+	struct comedi_device_file_info *info;
+
+	if (s == NULL)
+		return;
+	if (s->minor < 0)
+		return;
+
+	BUG_ON(s->minor >= COMEDI_NUM_MINORS);
+	BUG_ON(s->minor < COMEDI_FIRST_SUBDEVICE_MINOR);
+
+	comedi_spin_lock_irqsave(&comedi_file_info_table_lock, flags);
+	info = comedi_file_info_table[s->minor];
+	comedi_file_info_table[s->minor] = NULL;
+	comedi_spin_unlock_irqrestore(&comedi_file_info_table_lock, flags);
+
+	if (s->class_dev) {
+		device_destroy(comedi_class, MKDEV(COMEDI_MAJOR, s->minor));
+		s->class_dev = NULL;
+	}
+	kfree(info);
+}
+
+struct comedi_device_file_info *comedi_get_device_file_info(unsigned minor)
+{
+	unsigned long flags;
+	struct comedi_device_file_info *info;
+
+	BUG_ON(minor >= COMEDI_NUM_MINORS);
+	comedi_spin_lock_irqsave(&comedi_file_info_table_lock, flags);
+	info = comedi_file_info_table[minor];
+	comedi_spin_unlock_irqrestore(&comedi_file_info_table_lock, flags);
+	return info;
+}

+ 8 - 0
drivers/staging/comedi/comedi_fops.h

@@ -0,0 +1,8 @@
+
+#ifndef _COMEDI_FOPS_H
+#define _COMEDI_FOPS_H
+
+extern struct class *comedi_class;
+extern const struct file_operations comedi_fops;
+
+#endif /* _COMEDI_FOPS_H */

+ 77 - 0
drivers/staging/comedi/comedi_ksyms.c

@@ -0,0 +1,77 @@
+/*
+    module/exp_ioctl.c
+    exported comedi functions
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#define __NO_VERSION__
+#ifndef EXPORT_SYMTAB
+#define EXPORT_SYMTAB
+#endif
+
+#include "comedidev.h"
+
+/* for drivers */
+EXPORT_SYMBOL(comedi_driver_register);
+EXPORT_SYMBOL(comedi_driver_unregister);
+//EXPORT_SYMBOL(comedi_bufcheck);
+//EXPORT_SYMBOL(comedi_done);
+//EXPORT_SYMBOL(comedi_error_done);
+EXPORT_SYMBOL(comedi_error);
+//EXPORT_SYMBOL(comedi_eobuf);
+//EXPORT_SYMBOL(comedi_eos);
+EXPORT_SYMBOL(comedi_event);
+EXPORT_SYMBOL(comedi_get_subdevice_runflags);
+EXPORT_SYMBOL(comedi_set_subdevice_runflags);
+EXPORT_SYMBOL(range_bipolar10);
+EXPORT_SYMBOL(range_bipolar5);
+EXPORT_SYMBOL(range_bipolar2_5);
+EXPORT_SYMBOL(range_unipolar10);
+EXPORT_SYMBOL(range_unipolar5);
+EXPORT_SYMBOL(range_unknown);
+#ifdef CONFIG_COMEDI_RT
+EXPORT_SYMBOL(comedi_free_irq);
+EXPORT_SYMBOL(comedi_request_irq);
+EXPORT_SYMBOL(comedi_switch_to_rt);
+EXPORT_SYMBOL(comedi_switch_to_non_rt);
+EXPORT_SYMBOL(rt_pend_call);
+#endif
+#ifdef CONFIG_COMEDI_DEBUG
+EXPORT_SYMBOL(comedi_debug);
+#endif
+EXPORT_SYMBOL_GPL(comedi_alloc_board_minor);
+EXPORT_SYMBOL_GPL(comedi_free_board_minor);
+EXPORT_SYMBOL_GPL(comedi_pci_auto_config);
+EXPORT_SYMBOL_GPL(comedi_pci_auto_unconfig);
+
+/* for kcomedilib */
+EXPORT_SYMBOL(check_chanlist);
+EXPORT_SYMBOL_GPL(comedi_get_device_file_info);
+
+EXPORT_SYMBOL(comedi_buf_put);
+EXPORT_SYMBOL(comedi_buf_get);
+EXPORT_SYMBOL(comedi_buf_read_n_available);
+EXPORT_SYMBOL(comedi_buf_write_free);
+EXPORT_SYMBOL(comedi_buf_write_alloc);
+EXPORT_SYMBOL(comedi_buf_read_free);
+EXPORT_SYMBOL(comedi_buf_read_alloc);
+EXPORT_SYMBOL(comedi_buf_memcpy_to);
+EXPORT_SYMBOL(comedi_buf_memcpy_from);
+EXPORT_SYMBOL(comedi_reset_async_buf);

+ 150 - 0
drivers/staging/comedi/comedi_rt.h

@@ -0,0 +1,150 @@
+/*
+    module/comedi_rt.h
+    header file for real-time structures, variables, and constants
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef _COMEDI_RT_H
+#define _COMEDI_RT_H
+
+#ifndef _COMEDIDEV_H
+#error comedi_rt.h should only be included by comedidev.h
+#endif
+
+#include <linux/version.h>
+#include <linux/kdev_t.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_COMEDI_RT
+
+#ifdef CONFIG_COMEDI_RTAI
+#include <rtai.h>
+#include <rtai_sched.h>
+#include <rtai_version.h>
+#endif
+#ifdef CONFIG_COMEDI_RTL
+#include <rtl_core.h>
+#include <rtl_time.h>
+/* #ifdef RTLINUX_VERSION_CODE */
+#include <rtl_sync.h>
+/* #endif */
+#define rt_printk rtl_printf
+#endif
+#ifdef CONFIG_COMEDI_FUSION
+#define rt_printk(format, args...) printk(format , ## args)
+#endif /* CONFIG_COMEDI_FUSION */
+#ifdef CONFIG_PRIORITY_IRQ
+#define rt_printk printk
+#endif
+
+int comedi_request_irq(unsigned int irq, irqreturn_t(*handler) (int,
+		void *PT_REGS_ARG), unsigned long flags, const char *device,
+		comedi_device *dev_id);
+void comedi_free_irq(unsigned int irq, comedi_device *dev_id);
+void comedi_rt_init(void);
+void comedi_rt_cleanup(void);
+int comedi_switch_to_rt(comedi_device *dev);
+void comedi_switch_to_non_rt(comedi_device *dev);
+void comedi_rt_pend_wakeup(wait_queue_head_t *q);
+extern int rt_pend_call(void (*func) (int arg1, void *arg2), int arg1,
+	void *arg2);
+
+#else
+
+#define comedi_request_irq(a, b, c, d, e) request_irq(a, b, c, d, e)
+#define comedi_free_irq(a, b) free_irq(a, b)
+#define comedi_rt_init() do {} while (0)
+#define comedi_rt_cleanup() do {} while (0)
+#define comedi_switch_to_rt(a) (-1)
+#define comedi_switch_to_non_rt(a) do {} while (0)
+#define comedi_rt_pend_wakeup(a) do {} while (0)
+
+#define rt_printk(format, args...)	printk(format, ##args)
+
+#endif
+
+/* Define a spin_lock_irqsave function that will work with rt or without.
+ * Use inline functions instead of just macros to enforce some type checking.
+ */
+#define comedi_spin_lock_irqsave(lock_ptr, flags) \
+	(flags = __comedi_spin_lock_irqsave(lock_ptr))
+
+static inline unsigned long __comedi_spin_lock_irqsave(spinlock_t *lock_ptr)
+{
+	unsigned long flags;
+
+#if defined(CONFIG_COMEDI_RTAI)
+	flags = rt_spin_lock_irqsave(lock_ptr);
+
+#elif defined(CONFIG_COMEDI_RTL)
+	rtl_spin_lock_irqsave(lock_ptr, flags);
+
+#elif defined(CONFIG_COMEDI_RTL_V1)
+	rtl_spin_lock_irqsave(lock_ptr, flags);
+
+#elif defined(CONFIG_COMEDI_FUSION)
+	rthal_spin_lock_irqsave(lock_ptr, flags);
+#else
+	spin_lock_irqsave(lock_ptr, flags);
+
+#endif
+
+	return flags;
+}
+
+static inline void comedi_spin_unlock_irqrestore(spinlock_t *lock_ptr,
+	unsigned long flags)
+{
+
+#if defined(CONFIG_COMEDI_RTAI)
+	rt_spin_unlock_irqrestore(flags, lock_ptr);
+
+#elif defined(CONFIG_COMEDI_RTL)
+	rtl_spin_unlock_irqrestore(lock_ptr, flags);
+
+#elif defined(CONFIG_COMEDI_RTL_V1)
+	rtl_spin_unlock_irqrestore(lock_ptr, flags);
+#elif defined(CONFIG_COMEDI_FUSION)
+	rthal_spin_unlock_irqrestore(lock_ptr, flags);
+#else
+	spin_unlock_irqrestore(lock_ptr, flags);
+
+#endif
+
+}
+
+/* define a RT safe udelay */
+static inline void comedi_udelay(unsigned int usec)
+{
+#if defined(CONFIG_COMEDI_RTAI)
+	static const int nanosec_per_usec = 1000;
+	rt_busy_sleep(usec * nanosec_per_usec);
+#elif defined(CONFIG_COMEDI_RTL)
+	static const int nanosec_per_usec = 1000;
+	rtl_delay(usec * nanosec_per_usec);
+#else
+	udelay(usec);
+#endif
+}
+
+#endif

+ 537 - 0
drivers/staging/comedi/comedidev.h

@@ -0,0 +1,537 @@
+/*
+    include/linux/comedidev.h
+    header file for kernel-only structures, variables, and constants
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef _COMEDIDEV_H
+#define _COMEDIDEV_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kdev_t.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include "interrupt.h"
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "comedi.h"
+
+#define DPRINTK(format, args...)	do {		\
+	if (comedi_debug)				\
+		printk(KERN_DEBUG "comedi: " format , ## args);	\
+} while (0)
+
+#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, COMEDI_MINORVERSION, COMEDI_MICROVERSION)
+#define COMEDI_RELEASE VERSION
+
+#define COMEDI_INITCLEANUP_NOMODULE(x)					\
+	static int __init x ## _init_module(void)			\
+		{return comedi_driver_register(&(x));}			\
+	static void __exit x ## _cleanup_module(void)			\
+		{comedi_driver_unregister(&(x));} 			\
+	module_init(x ## _init_module);					\
+	module_exit(x ## _cleanup_module);					\
+
+#define COMEDI_MODULE_MACROS						\
+	MODULE_AUTHOR("Comedi http://www.comedi.org");		\
+	MODULE_DESCRIPTION("Comedi low-level driver");			\
+	MODULE_LICENSE("GPL");						\
+
+#define COMEDI_INITCLEANUP(x)						\
+	COMEDI_MODULE_MACROS		\
+	COMEDI_INITCLEANUP_NOMODULE(x)
+
+#define COMEDI_PCI_INITCLEANUP_NOMODULE(comedi_driver, pci_id_table) \
+	static int __devinit comedi_driver ## _pci_probe(struct pci_dev *dev, \
+		const struct pci_device_id *ent) \
+	{ \
+		return comedi_pci_auto_config(dev, comedi_driver.driver_name); \
+	} \
+	static void __devexit comedi_driver ## _pci_remove(struct pci_dev *dev) \
+	{ \
+		comedi_pci_auto_unconfig(dev); \
+	} \
+	static struct pci_driver comedi_driver ## _pci_driver = \
+	{ \
+		.id_table = pci_id_table, \
+		.probe = &comedi_driver ## _pci_probe, \
+		.remove = __devexit_p(&comedi_driver ## _pci_remove) \
+	}; \
+	static int __init comedi_driver ## _init_module(void) \
+	{ \
+		int retval; \
+		retval = comedi_driver_register(&comedi_driver); \
+		if (retval < 0) \
+			return retval; \
+		comedi_driver ## _pci_driver.name = (char *)comedi_driver.driver_name; \
+		return pci_register_driver(&comedi_driver ## _pci_driver); \
+	} \
+	static void __exit comedi_driver ## _cleanup_module(void) \
+	{ \
+		pci_unregister_driver(&comedi_driver ## _pci_driver); \
+		comedi_driver_unregister(&comedi_driver); \
+	} \
+	module_init(comedi_driver ## _init_module); \
+	module_exit(comedi_driver ## _cleanup_module);
+
+#define COMEDI_PCI_INITCLEANUP(comedi_driver, pci_id_table) \
+	COMEDI_MODULE_MACROS \
+	COMEDI_PCI_INITCLEANUP_NOMODULE(comedi_driver, pci_id_table)
+
+#define PCI_VENDOR_ID_INOVA		0x104c
+#define PCI_VENDOR_ID_NATINST		0x1093
+#define PCI_VENDOR_ID_DATX		0x1116
+#define PCI_VENDOR_ID_COMPUTERBOARDS	0x1307
+#define PCI_VENDOR_ID_ADVANTECH		0x13fe
+#define PCI_VENDOR_ID_RTD		0x1435
+#define PCI_VENDOR_ID_AMPLICON		0x14dc
+#define PCI_VENDOR_ID_ADLINK		0x144a
+#define PCI_VENDOR_ID_ICP		0x104c
+#define PCI_VENDOR_ID_CONTEC		0x1221
+#define PCI_VENDOR_ID_MEILHAUS		0x1402
+
+#define COMEDI_NUM_MINORS 0x100
+#define COMEDI_NUM_LEGACY_MINORS 0x10
+#define COMEDI_NUM_BOARD_MINORS 0x30
+#define COMEDI_FIRST_SUBDEVICE_MINOR COMEDI_NUM_BOARD_MINORS
+
+typedef struct comedi_device_struct comedi_device;
+typedef struct comedi_subdevice_struct comedi_subdevice;
+typedef struct comedi_async_struct comedi_async;
+typedef struct comedi_driver_struct comedi_driver;
+typedef struct comedi_lrange_struct comedi_lrange;
+
+typedef struct device device_create_result_type;
+
+#define COMEDI_DEVICE_CREATE(cs, parent, devt, drvdata, device, fmt...) \
+	device_create(cs, ((parent) ? (parent) : (device)), devt, drvdata, fmt)
+
+struct comedi_subdevice_struct {
+	comedi_device *device;
+	int type;
+	int n_chan;
+	volatile int subdev_flags;
+	int len_chanlist;	/* maximum length of channel/gain list */
+
+	void *private;
+
+	comedi_async *async;
+
+	void *lock;
+	void *busy;
+	unsigned runflags;
+	spinlock_t spin_lock;
+
+	int io_bits;
+
+	lsampl_t maxdata;	/* if maxdata==0, use list */
+	const lsampl_t *maxdata_list;	/* list is channel specific */
+
+	unsigned int flags;
+	const unsigned int *flaglist;
+
+	unsigned int settling_time_0;
+
+	const comedi_lrange *range_table;
+	const comedi_lrange *const *range_table_list;
+
+	unsigned int *chanlist;	/* driver-owned chanlist (not used) */
+
+	int (*insn_read) (comedi_device *, comedi_subdevice *, comedi_insn *,
+		lsampl_t *);
+	int (*insn_write) (comedi_device *, comedi_subdevice *, comedi_insn *,
+		lsampl_t *);
+	int (*insn_bits) (comedi_device *, comedi_subdevice *, comedi_insn *,
+		lsampl_t *);
+	int (*insn_config) (comedi_device *, comedi_subdevice *, comedi_insn *,
+		lsampl_t *);
+
+	int (*do_cmd) (comedi_device *, comedi_subdevice *);
+	int (*do_cmdtest) (comedi_device *, comedi_subdevice *, comedi_cmd *);
+	int (*poll) (comedi_device *, comedi_subdevice *);
+	int (*cancel) (comedi_device *, comedi_subdevice *);
+	/* int (*do_lock)(comedi_device *,comedi_subdevice *); */
+	/* int (*do_unlock)(comedi_device *,comedi_subdevice *); */
+
+	/* called when the buffer changes */
+	int (*buf_change) (comedi_device *dev, comedi_subdevice *s,
+		unsigned long new_size);
+
+	void (*munge) (comedi_device *dev, comedi_subdevice *s, void *data,
+		unsigned int num_bytes, unsigned int start_chan_index);
+	enum dma_data_direction async_dma_dir;
+
+	unsigned int state;
+
+	device_create_result_type *class_dev;
+	int minor;
+};
+
+struct comedi_buf_page {
+	void *virt_addr;
+	dma_addr_t dma_addr;
+};
+
+struct comedi_async_struct {
+	comedi_subdevice *subdevice;
+
+	void *prealloc_buf;	/* pre-allocated buffer */
+	unsigned int prealloc_bufsz;	/* buffer size, in bytes */
+	struct comedi_buf_page *buf_page_list;	/* virtual and dma address of each page */
+	unsigned n_buf_pages;	/* num elements in buf_page_list */
+
+	unsigned int max_bufsize;	/* maximum buffer size, bytes */
+	unsigned int mmap_count;	/* current number of mmaps of prealloc_buf */
+
+	unsigned int buf_write_count;	/* byte count for writer (write completed) */
+	unsigned int buf_write_alloc_count;	/* byte count for writer (allocated for writing) */
+	unsigned int buf_read_count;	/* byte count for reader (read completed) */
+	unsigned int buf_read_alloc_count;	/* byte count for reader (allocated for reading) */
+
+	unsigned int buf_write_ptr;	/* buffer marker for writer */
+	unsigned int buf_read_ptr;	/* buffer marker for reader */
+
+	unsigned int cur_chan;	/* useless channel marker for interrupt */
+	/* number of bytes that have been received for current scan */
+	unsigned int scan_progress;
+	/* keeps track of where we are in chanlist as for munging */
+	unsigned int munge_chan;
+	/* number of bytes that have been munged */
+	unsigned int munge_count;
+	/* buffer marker for munging */
+	unsigned int munge_ptr;
+
+	unsigned int events;	/* events that have occurred */
+
+	comedi_cmd cmd;
+
+	wait_queue_head_t wait_head;
+
+	/* callback stuff */
+	unsigned int cb_mask;
+	int (*cb_func) (unsigned int flags, void *);
+	void *cb_arg;
+
+	int (*inttrig) (comedi_device *dev, comedi_subdevice *s,
+			unsigned int x);
+};
+
+struct comedi_driver_struct {
+	struct comedi_driver_struct *next;
+
+	const char *driver_name;
+	struct module *module;
+	int (*attach) (comedi_device *, comedi_devconfig *);
+	int (*detach) (comedi_device *);
+
+	/* number of elements in board_name and board_id arrays */
+	unsigned int num_names;
+	const char *const *board_name;
+	/* offset in bytes from one board name pointer to the next */
+	int offset;
+};
+
+struct comedi_device_struct {
+	int use_count;
+	comedi_driver *driver;
+	void *private;
+
+	device_create_result_type *class_dev;
+	int minor;
+	/* hw_dev is passed to dma_alloc_coherent when allocating async buffers
+	 * for subdevices that have async_dma_dir set to something other than
+	 * DMA_NONE */
+	struct device *hw_dev;
+
+	const char *board_name;
+	const void *board_ptr;
+	int attached;
+	int rt;
+	spinlock_t spinlock;
+	struct mutex mutex;
+	int in_request_module;
+
+	int n_subdevices;
+	comedi_subdevice *subdevices;
+
+	/* dumb */
+	unsigned long iobase;
+	unsigned int irq;
+
+	comedi_subdevice *read_subdev;
+	comedi_subdevice *write_subdev;
+
+	struct fasync_struct *async_queue;
+
+	void (*open) (comedi_device *dev);
+	void (*close) (comedi_device *dev);
+};
+
+struct comedi_device_file_info {
+	comedi_device *device;
+	comedi_subdevice *read_subdevice;
+	comedi_subdevice *write_subdevice;
+};
+
+#ifdef CONFIG_COMEDI_DEBUG
+extern int comedi_debug;
+#else
+static const int comedi_debug;
+#endif
+
+/*
+ * function prototypes
+ */
+
+void comedi_event(comedi_device *dev, comedi_subdevice *s);
+void comedi_error(const comedi_device *dev, const char *s);
+
+/* we can expand the number of bits used to encode devices/subdevices into
+ the minor number soon, after more distros support > 8 bit minor numbers
+ (like after Debian Etch gets released) */
+enum comedi_minor_bits {
+	COMEDI_DEVICE_MINOR_MASK = 0xf,
+	COMEDI_SUBDEVICE_MINOR_MASK = 0xf0
+};
+static const unsigned COMEDI_SUBDEVICE_MINOR_SHIFT = 4;
+static const unsigned COMEDI_SUBDEVICE_MINOR_OFFSET = 1;
+
+struct comedi_device_file_info *comedi_get_device_file_info(unsigned minor);
+
+static inline comedi_subdevice *comedi_get_read_subdevice(
+				const struct comedi_device_file_info *info)
+{
+	if (info->read_subdevice)
+		return info->read_subdevice;
+	if (info->device == NULL)
+		return NULL;
+	return info->device->read_subdev;
+}
+
+static inline comedi_subdevice *comedi_get_write_subdevice(
+				const struct comedi_device_file_info *info)
+{
+	if (info->write_subdevice)
+		return info->write_subdevice;
+	if (info->device == NULL)
+		return NULL;
+	return info->device->write_subdev;
+}
+
+void comedi_device_detach(comedi_device *dev);
+int comedi_device_attach(comedi_device *dev, comedi_devconfig *it);
+int comedi_driver_register(comedi_driver *);
+int comedi_driver_unregister(comedi_driver *);
+
+void init_polling(void);
+void cleanup_polling(void);
+void start_polling(comedi_device *);
+void stop_polling(comedi_device *);
+
+int comedi_buf_alloc(comedi_device *dev, comedi_subdevice *s, unsigned long
+	new_size);
+
+#ifdef CONFIG_PROC_FS
+void comedi_proc_init(void);
+void comedi_proc_cleanup(void);
+#else
+static inline void comedi_proc_init(void)
+{
+}
+static inline void comedi_proc_cleanup(void)
+{
+}
+#endif
+
+/* subdevice runflags */
+enum subdevice_runflags {
+	SRF_USER = 0x00000001,
+	SRF_RT = 0x00000002,
+	/* indicates an COMEDI_CB_ERROR event has occurred since the last
+	 * command was started */
+	SRF_ERROR = 0x00000004,
+	SRF_RUNNING = 0x08000000
+};
+
+/*
+   various internal comedi functions
+ */
+
+int do_rangeinfo_ioctl(comedi_device *dev, comedi_rangeinfo *arg);
+int check_chanlist(comedi_subdevice *s, int n, unsigned int *chanlist);
+void comedi_set_subdevice_runflags(comedi_subdevice *s, unsigned mask,
+	unsigned bits);
+unsigned comedi_get_subdevice_runflags(comedi_subdevice *s);
+int insn_inval(comedi_device *dev, comedi_subdevice *s,
+	comedi_insn *insn, lsampl_t *data);
+
+/* range stuff */
+
+#define RANGE(a, b)		{(a)*1e6, (b)*1e6, 0}
+#define RANGE_ext(a, b)		{(a)*1e6, (b)*1e6, RF_EXTERNAL}
+#define RANGE_mA(a, b)		{(a)*1e6, (b)*1e6, UNIT_mA}
+#define RANGE_unitless(a, b)	{(a)*1e6, (b)*1e6, 0}	/* XXX */
+#define BIP_RANGE(a)		{-(a)*1e6, (a)*1e6, 0}
+#define UNI_RANGE(a)		{0, (a)*1e6, 0}
+
+extern const comedi_lrange range_bipolar10;
+extern const comedi_lrange range_bipolar5;
+extern const comedi_lrange range_bipolar2_5;
+extern const comedi_lrange range_unipolar10;
+extern const comedi_lrange range_unipolar5;
+extern const comedi_lrange range_unknown;
+
+#define range_digital		range_unipolar5
+
+#if __GNUC__ >= 3
+#define GCC_ZERO_LENGTH_ARRAY
+#else
+#define GCC_ZERO_LENGTH_ARRAY 0
+#endif
+
+struct comedi_lrange_struct {
+	int length;
+	comedi_krange range[GCC_ZERO_LENGTH_ARRAY];
+};
+
+/* some silly little inline functions */
+
+static inline int alloc_subdevices(comedi_device *dev,
+				   unsigned int num_subdevices)
+{
+	unsigned i;
+
+	dev->n_subdevices = num_subdevices;
+	dev->subdevices =
+		kcalloc(num_subdevices, sizeof(comedi_subdevice), GFP_KERNEL);
+	if (!dev->subdevices)
+		return -ENOMEM;
+	for (i = 0; i < num_subdevices; ++i) {
+		dev->subdevices[i].device = dev;
+		dev->subdevices[i].async_dma_dir = DMA_NONE;
+		spin_lock_init(&dev->subdevices[i].spin_lock);
+		dev->subdevices[i].minor = -1;
+	}
+	return 0;
+}
+
+static inline int alloc_private(comedi_device *dev, int size)
+{
+	dev->private = kzalloc(size, GFP_KERNEL);
+	if (!dev->private)
+		return -ENOMEM;
+	return 0;
+}
+
+static inline unsigned int bytes_per_sample(const comedi_subdevice *subd)
+{
+	if (subd->subdev_flags & SDF_LSAMPL)
+		return sizeof(lsampl_t);
+	else
+		return sizeof(sampl_t);
+}
+
+/* must be used in attach to set dev->hw_dev if you wish to dma directly
+into comedi's buffer */
+static inline void comedi_set_hw_dev(comedi_device *dev, struct device *hw_dev)
+{
+	if (dev->hw_dev)
+		put_device(dev->hw_dev);
+
+	dev->hw_dev = hw_dev;
+	if (dev->hw_dev) {
+		dev->hw_dev = get_device(dev->hw_dev);
+		BUG_ON(dev->hw_dev == NULL);
+	}
+}
+
+int comedi_buf_put(comedi_async *async, sampl_t x);
+int comedi_buf_get(comedi_async *async, sampl_t *x);
+
+unsigned int comedi_buf_write_n_available(comedi_async *async);
+unsigned int comedi_buf_write_alloc(comedi_async *async, unsigned int nbytes);
+unsigned int comedi_buf_write_alloc_strict(comedi_async *async,
+	unsigned int nbytes);
+unsigned comedi_buf_write_free(comedi_async *async, unsigned int nbytes);
+unsigned comedi_buf_read_alloc(comedi_async *async, unsigned nbytes);
+unsigned comedi_buf_read_free(comedi_async *async, unsigned int nbytes);
+unsigned int comedi_buf_read_n_available(comedi_async *async);
+void comedi_buf_memcpy_to(comedi_async *async, unsigned int offset,
+	const void *source, unsigned int num_bytes);
+void comedi_buf_memcpy_from(comedi_async *async, unsigned int offset,
+	void *destination, unsigned int num_bytes);
+static inline unsigned comedi_buf_write_n_allocated(comedi_async *async)
+{
+	return async->buf_write_alloc_count - async->buf_write_count;
+}
+static inline unsigned comedi_buf_read_n_allocated(comedi_async *async)
+{
+	return async->buf_read_alloc_count - async->buf_read_count;
+}
+
+void comedi_reset_async_buf(comedi_async *async);
+
+static inline void *comedi_aux_data(int options[], int n)
+{
+	unsigned long address;
+	unsigned long addressLow;
+	int bit_shift;
+	if (sizeof(int) >= sizeof(void *))
+		address = options[COMEDI_DEVCONF_AUX_DATA_LO];
+	else {
+		address = options[COMEDI_DEVCONF_AUX_DATA_HI];
+		bit_shift = sizeof(int) * 8;
+		address <<= bit_shift;
+		addressLow = options[COMEDI_DEVCONF_AUX_DATA_LO];
+		addressLow &= (1UL << bit_shift) - 1;
+		address |= addressLow;
+	}
+	if (n >= 1)
+		address += options[COMEDI_DEVCONF_AUX_DATA0_LENGTH];
+	if (n >= 2)
+		address += options[COMEDI_DEVCONF_AUX_DATA1_LENGTH];
+	if (n >= 3)
+		address += options[COMEDI_DEVCONF_AUX_DATA2_LENGTH];
+	BUG_ON(n > 3);
+	return (void *)address;
+}
+
+int comedi_alloc_board_minor(struct device *hardware_device);
+void comedi_free_board_minor(unsigned minor);
+int comedi_alloc_subdevice_minor(comedi_device *dev, comedi_subdevice *s);
+void comedi_free_subdevice_minor(comedi_subdevice *s);
+int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name);
+void comedi_pci_auto_unconfig(struct pci_dev *pcidev);
+
+#include "comedi_rt.h"
+
+#endif /* _COMEDIDEV_H */

+ 192 - 0
drivers/staging/comedi/comedilib.h

@@ -0,0 +1,192 @@
+/*
+    linux/include/comedilib.h
+    header file for kcomedilib
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef _LINUX_COMEDILIB_H
+#define _LINUX_COMEDILIB_H
+
+#include "comedi.h"
+
+/* Kernel internal stuff.  Needed by real-time modules and such. */
+
+#ifndef __KERNEL__
+#error linux/comedilib.h should not be included by non-kernel-space code
+#endif
+
+/* exported functions */
+
+#ifndef KCOMEDILIB_DEPRECATED
+
+typedef void comedi_t;
+
+/* these functions may not be called at real-time priority */
+
+comedi_t *comedi_open(const char *path);
+int comedi_close(comedi_t *dev);
+
+/* these functions may be called at any priority, but may fail at
+   real-time priority */
+
+int comedi_lock(comedi_t *dev, unsigned int subdev);
+int comedi_unlock(comedi_t *dev, unsigned int subdev);
+
+/* these functions may be called at any priority, but you must hold
+   the lock for the subdevice */
+
+int comedi_loglevel(int loglevel);
+void comedi_perror(const char *s);
+char *comedi_strerror(int errnum);
+int comedi_errno(void);
+int comedi_fileno(comedi_t *dev);
+
+int comedi_cancel(comedi_t *dev, unsigned int subdev);
+int comedi_register_callback(comedi_t *dev, unsigned int subdev,
+	unsigned int mask, int (*cb) (unsigned int, void *), void *arg);
+
+int comedi_command(comedi_t *dev, comedi_cmd *cmd);
+int comedi_command_test(comedi_t *dev, comedi_cmd *cmd);
+int comedi_trigger(comedi_t *dev, unsigned int subdev, comedi_trig *it);
+int __comedi_trigger(comedi_t *dev, unsigned int subdev, comedi_trig *it);
+int comedi_data_write(comedi_t *dev, unsigned int subdev, unsigned int chan,
+	unsigned int range, unsigned int aref, lsampl_t data);
+int comedi_data_read(comedi_t *dev, unsigned int subdev, unsigned int chan,
+	unsigned int range, unsigned int aref, lsampl_t *data);
+int comedi_data_read_hint(comedi_t *dev, unsigned int subdev,
+	unsigned int chan, unsigned int range, unsigned int aref);
+int comedi_data_read_delayed(comedi_t *dev, unsigned int subdev,
+	unsigned int chan, unsigned int range, unsigned int aref,
+	lsampl_t *data, unsigned int nano_sec);
+int comedi_dio_config(comedi_t *dev, unsigned int subdev, unsigned int chan,
+	unsigned int io);
+int comedi_dio_read(comedi_t *dev, unsigned int subdev, unsigned int chan,
+	unsigned int *val);
+int comedi_dio_write(comedi_t *dev, unsigned int subdev, unsigned int chan,
+	unsigned int val);
+int comedi_dio_bitfield(comedi_t *dev, unsigned int subdev, unsigned int mask,
+	unsigned int *bits);
+int comedi_get_n_subdevices(comedi_t *dev);
+int comedi_get_version_code(comedi_t *dev);
+const char *comedi_get_driver_name(comedi_t *dev);
+const char *comedi_get_board_name(comedi_t *dev);
+int comedi_get_subdevice_type(comedi_t *dev, unsigned int subdevice);
+int comedi_find_subdevice_by_type(comedi_t *dev, int type, unsigned int subd);
+int comedi_get_n_channels(comedi_t *dev, unsigned int subdevice);
+lsampl_t comedi_get_maxdata(comedi_t *dev, unsigned int subdevice, unsigned
+	int chan);
+int comedi_get_n_ranges(comedi_t *dev, unsigned int subdevice, unsigned int
+	chan);
+int comedi_do_insn(comedi_t *dev, comedi_insn *insn);
+int comedi_poll(comedi_t *dev, unsigned int subdev);
+
+/* DEPRECATED functions */
+int comedi_get_rangetype(comedi_t *dev, unsigned int subdevice,
+	unsigned int chan);
+
+/* ALPHA functions */
+unsigned int comedi_get_subdevice_flags(comedi_t *dev, unsigned int subdevice);
+int comedi_get_len_chanlist(comedi_t *dev, unsigned int subdevice);
+int comedi_get_krange(comedi_t *dev, unsigned int subdevice, unsigned int
+	chan, unsigned int range, comedi_krange *krange);
+unsigned int comedi_get_buf_head_pos(comedi_t *dev, unsigned int subdevice);
+int comedi_set_user_int_count(comedi_t *dev, unsigned int subdevice,
+	unsigned int buf_user_count);
+int comedi_map(comedi_t *dev, unsigned int subdev, void *ptr);
+int comedi_unmap(comedi_t *dev, unsigned int subdev);
+int comedi_get_buffer_size(comedi_t *dev, unsigned int subdev);
+int comedi_mark_buffer_read(comedi_t *dev, unsigned int subdevice,
+	unsigned int num_bytes);
+int comedi_mark_buffer_written(comedi_t *d, unsigned int subdevice,
+	unsigned int num_bytes);
+int comedi_get_buffer_contents(comedi_t *dev, unsigned int subdevice);
+int comedi_get_buffer_offset(comedi_t *dev, unsigned int subdevice);
+
+#else
+
+/* these functions may not be called at real-time priority */
+
+int comedi_open(unsigned int minor);
+void comedi_close(unsigned int minor);
+
+/* these functions may be called at any priority, but may fail at
+   real-time priority */
+
+int comedi_lock(unsigned int minor, unsigned int subdev);
+int comedi_unlock(unsigned int minor, unsigned int subdev);
+
+/* these functions may be called at any priority, but you must hold
+   the lock for the subdevice */
+
+int comedi_cancel(unsigned int minor, unsigned int subdev);
+int comedi_register_callback(unsigned int minor, unsigned int subdev,
+	unsigned int mask, int (*cb) (unsigned int, void *), void *arg);
+
+int comedi_command(unsigned int minor, comedi_cmd *cmd);
+int comedi_command_test(unsigned int minor, comedi_cmd *cmd);
+int comedi_trigger(unsigned int minor, unsigned int subdev, comedi_trig *it);
+int __comedi_trigger(unsigned int minor, unsigned int subdev, comedi_trig *it);
+int comedi_data_write(unsigned int dev, unsigned int subdev, unsigned int chan,
+	unsigned int range, unsigned int aref, lsampl_t data);
+int comedi_data_read(unsigned int dev, unsigned int subdev, unsigned int chan,
+	unsigned int range, unsigned int aref, lsampl_t *data);
+int comedi_dio_config(unsigned int dev, unsigned int subdev, unsigned int chan,
+	unsigned int io);
+int comedi_dio_read(unsigned int dev, unsigned int subdev, unsigned int chan,
+	unsigned int *val);
+int comedi_dio_write(unsigned int dev, unsigned int subdev, unsigned int chan,
+	unsigned int val);
+int comedi_dio_bitfield(unsigned int dev, unsigned int subdev,
+	unsigned int mask, unsigned int *bits);
+int comedi_get_n_subdevices(unsigned int dev);
+int comedi_get_version_code(unsigned int dev);
+char *comedi_get_driver_name(unsigned int dev);
+char *comedi_get_board_name(unsigned int minor);
+int comedi_get_subdevice_type(unsigned int minor, unsigned int subdevice);
+int comedi_find_subdevice_by_type(unsigned int minor, int type,
+	unsigned int subd);
+int comedi_get_n_channels(unsigned int minor, unsigned int subdevice);
+lsampl_t comedi_get_maxdata(unsigned int minor, unsigned int subdevice, unsigned
+	int chan);
+int comedi_get_n_ranges(unsigned int minor, unsigned int subdevice, unsigned int
+	chan);
+int comedi_do_insn(unsigned int minor, comedi_insn *insn);
+int comedi_poll(unsigned int minor, unsigned int subdev);
+
+/* DEPRECATED functions */
+int comedi_get_rangetype(unsigned int minor, unsigned int subdevice,
+	unsigned int chan);
+
+/* ALPHA functions */
+unsigned int comedi_get_subdevice_flags(unsigned int minor, unsigned int
+	subdevice);
+int comedi_get_len_chanlist(unsigned int minor, unsigned int subdevice);
+int comedi_get_krange(unsigned int minor, unsigned int subdevice, unsigned int
+	chan, unsigned int range, comedi_krange *krange);
+unsigned int comedi_get_buf_head_pos(unsigned int minor, unsigned int
+	subdevice);
+int comedi_set_user_int_count(unsigned int minor, unsigned int subdevice,
+	unsigned int buf_user_count);
+int comedi_map(unsigned int minor, unsigned int subdev, void **ptr);
+int comedi_unmap(unsigned int minor, unsigned int subdev);
+
+#endif
+
+#endif

+ 846 - 0
drivers/staging/comedi/drivers.c

@@ -0,0 +1,846 @@
+/*
+    module/drivers.c
+    functions for manipulating drivers
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#define _GNU_SOURCE
+
+#define __NO_VERSION__
+#include "comedi_fops.h"
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include "comedidev.h"
+#include "wrapper.h"
+#include <linux/highmem.h>	/* for SuSE brokenness */
+#include <linux/vmalloc.h>
+#include <linux/cdev.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+static int postconfig(comedi_device * dev);
+static int insn_rw_emulate_bits(comedi_device * dev, comedi_subdevice * s,
+	comedi_insn * insn, lsampl_t * data);
+static void *comedi_recognize(comedi_driver * driv, const char *name);
+static void comedi_report_boards(comedi_driver * driv);
+static int poll_invalid(comedi_device * dev, comedi_subdevice * s);
+int comedi_buf_alloc(comedi_device * dev, comedi_subdevice * s,
+	unsigned long new_size);
+
+comedi_driver *comedi_drivers;
+
+int comedi_modprobe(int minor)
+{
+	return -EINVAL;
+}
+
+static void cleanup_device(comedi_device * dev)
+{
+	int i;
+	comedi_subdevice *s;
+
+	if (dev->subdevices) {
+		for (i = 0; i < dev->n_subdevices; i++) {
+			s = dev->subdevices + i;
+			comedi_free_subdevice_minor(s);
+			if (s->async) {
+				comedi_buf_alloc(dev, s, 0);
+				kfree(s->async);
+			}
+		}
+		kfree(dev->subdevices);
+		dev->subdevices = NULL;
+		dev->n_subdevices = 0;
+	}
+	if (dev->private) {
+		kfree(dev->private);
+		dev->private = NULL;
+	}
+	dev->driver = 0;
+	dev->board_name = NULL;
+	dev->board_ptr = NULL;
+	dev->iobase = 0;
+	dev->irq = 0;
+	dev->read_subdev = NULL;
+	dev->write_subdev = NULL;
+	dev->open = NULL;
+	dev->close = NULL;
+	comedi_set_hw_dev(dev, NULL);
+}
+
+static void __comedi_device_detach(comedi_device * dev)
+{
+	dev->attached = 0;
+	if (dev->driver) {
+		dev->driver->detach(dev);
+	} else {
+		printk("BUG: dev->driver=NULL in comedi_device_detach()\n");
+	}
+	cleanup_device(dev);
+}
+
+void comedi_device_detach(comedi_device * dev)
+{
+	if (!dev->attached)
+		return;
+	__comedi_device_detach(dev);
+}
+
+int comedi_device_attach(comedi_device * dev, comedi_devconfig * it)
+{
+	comedi_driver *driv;
+	int ret;
+
+	if (dev->attached)
+		return -EBUSY;
+
+	for (driv = comedi_drivers; driv; driv = driv->next) {
+		if (!try_module_get(driv->module)) {
+			printk("comedi: failed to increment module count, skipping\n");
+			continue;
+		}
+		if (driv->num_names) {
+			dev->board_ptr = comedi_recognize(driv, it->board_name);
+			if (dev->board_ptr == NULL) {
+				module_put(driv->module);
+				continue;
+			}
+		} else {
+			if (strcmp(driv->driver_name, it->board_name)) {
+				module_put(driv->module);
+				continue;
+			}
+		}
+		//initialize dev->driver here so comedi_error() can be called from attach
+		dev->driver = driv;
+		ret = driv->attach(dev, it);
+		if (ret < 0) {
+			module_put(dev->driver->module);
+			__comedi_device_detach(dev);
+			return ret;
+		}
+		goto attached;
+	}
+
+	// recognize has failed if we get here
+	// report valid board names before returning error
+	for (driv = comedi_drivers; driv; driv = driv->next) {
+		if (!try_module_get(driv->module)) {
+			printk("comedi: failed to increment module count\n");
+			continue;
+		}
+		comedi_report_boards(driv);
+		module_put(driv->module);
+	}
+	return -EIO;
+
+attached:
+	/* do a little post-config cleanup */
+	ret = postconfig(dev);
+	module_put(dev->driver->module);
+	if (ret < 0) {
+		__comedi_device_detach(dev);
+		return ret;
+	}
+
+	if (!dev->board_name) {
+		printk("BUG: dev->board_name=<%p>\n", dev->board_name);
+		dev->board_name = "BUG";
+	}
+	smp_wmb();
+	dev->attached = 1;
+
+	return 0;
+}
+
+int comedi_driver_register(comedi_driver * driver)
+{
+	driver->next = comedi_drivers;
+	comedi_drivers = driver;
+
+	return 0;
+}
+
+int comedi_driver_unregister(comedi_driver * driver)
+{
+	comedi_driver *prev;
+	int i;
+
+	/* check for devices using this driver */
+	for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
+		struct comedi_device_file_info *dev_file_info = comedi_get_device_file_info(i);
+		comedi_device *dev;
+
+		if(dev_file_info == NULL) continue;
+		dev = dev_file_info->device;
+
+		mutex_lock(&dev->mutex);
+		if (dev->attached && dev->driver == driver) {
+			if (dev->use_count)
+				printk("BUG! detaching device with use_count=%d\n", dev->use_count);
+			comedi_device_detach(dev);
+		}
+		mutex_unlock(&dev->mutex);
+	}
+
+	if (comedi_drivers == driver) {
+		comedi_drivers = driver->next;
+		return 0;
+	}
+
+	for (prev = comedi_drivers; prev->next; prev = prev->next) {
+		if (prev->next == driver) {
+			prev->next = driver->next;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int postconfig(comedi_device * dev)
+{
+	int i;
+	comedi_subdevice *s;
+	comedi_async *async = NULL;
+	int ret;
+
+	for (i = 0; i < dev->n_subdevices; i++) {
+		s = dev->subdevices + i;
+
+		if (s->type == COMEDI_SUBD_UNUSED)
+			continue;
+
+		if (s->len_chanlist == 0)
+			s->len_chanlist = 1;
+
+		if (s->do_cmd) {
+			BUG_ON((s->subdev_flags & (SDF_CMD_READ |
+				SDF_CMD_WRITE)) == 0);
+			BUG_ON(!s->do_cmdtest);
+
+			async = kzalloc(sizeof(comedi_async), GFP_KERNEL);
+			if (async == NULL) {
+				printk("failed to allocate async struct\n");
+				return -ENOMEM;
+			}
+			init_waitqueue_head(&async->wait_head);
+			async->subdevice = s;
+			s->async = async;
+
+#define DEFAULT_BUF_MAXSIZE (64*1024)
+#define DEFAULT_BUF_SIZE (64*1024)
+
+			async->max_bufsize = DEFAULT_BUF_MAXSIZE;
+
+			async->prealloc_buf = NULL;
+			async->prealloc_bufsz = 0;
+			if (comedi_buf_alloc(dev, s, DEFAULT_BUF_SIZE) < 0) {
+				printk("Buffer allocation failed\n");
+				return -ENOMEM;
+			}
+			if (s->buf_change) {
+				ret = s->buf_change(dev, s, DEFAULT_BUF_SIZE);
+				if (ret < 0)
+					return ret;
+			}
+			comedi_alloc_subdevice_minor(dev, s);
+		}
+
+		if (!s->range_table && !s->range_table_list)
+			s->range_table = &range_unknown;
+
+		if (!s->insn_read && s->insn_bits)
+			s->insn_read = insn_rw_emulate_bits;
+		if (!s->insn_write && s->insn_bits)
+			s->insn_write = insn_rw_emulate_bits;
+
+		if (!s->insn_read)
+			s->insn_read = insn_inval;
+		if (!s->insn_write)
+			s->insn_write = insn_inval;
+		if (!s->insn_bits)
+			s->insn_bits = insn_inval;
+		if (!s->insn_config)
+			s->insn_config = insn_inval;
+
+		if (!s->poll)
+			s->poll = poll_invalid;
+	}
+
+	return 0;
+}
+
+// generic recognize function for drivers that register their supported board names
+void *comedi_recognize(comedi_driver * driv, const char *name)
+{
+	unsigned i;
+	const char *const *name_ptr = driv->board_name;
+	for (i = 0; i < driv->num_names; i++) {
+		if (strcmp(*name_ptr, name) == 0)
+			return (void *)name_ptr;
+		name_ptr =
+			(const char *const *)((const char *)name_ptr +
+			driv->offset);
+	}
+
+	return NULL;
+}
+
+void comedi_report_boards(comedi_driver * driv)
+{
+	unsigned int i;
+	const char *const *name_ptr;
+
+	printk("comedi: valid board names for %s driver are:\n",
+		driv->driver_name);
+
+	name_ptr = driv->board_name;
+	for (i = 0; i < driv->num_names; i++) {
+		printk(" %s\n", *name_ptr);
+		name_ptr = (const char **)((char *)name_ptr + driv->offset);
+	}
+
+	if (driv->num_names == 0)
+		printk(" %s\n", driv->driver_name);
+}
+
+static int poll_invalid(comedi_device * dev, comedi_subdevice * s)
+{
+	return -EINVAL;
+}
+
+int insn_inval(comedi_device * dev, comedi_subdevice * s,
+	comedi_insn * insn, lsampl_t * data)
+{
+	return -EINVAL;
+}
+
+static int insn_rw_emulate_bits(comedi_device * dev, comedi_subdevice * s,
+	comedi_insn * insn, lsampl_t * data)
+{
+	comedi_insn new_insn;
+	int ret;
+	static const unsigned channels_per_bitfield = 32;
+
+	unsigned chan = CR_CHAN(insn->chanspec);
+	const unsigned base_bitfield_channel =
+		(chan < channels_per_bitfield) ? 0 : chan;
+	lsampl_t new_data[2];
+	memset(new_data, 0, sizeof(new_data));
+	memset(&new_insn, 0, sizeof(new_insn));
+	new_insn.insn = INSN_BITS;
+	new_insn.chanspec = base_bitfield_channel;
+	new_insn.n = 2;
+	new_insn.data = new_data;
+	new_insn.subdev = insn->subdev;
+
+	if (insn->insn == INSN_WRITE) {
+		if (!(s->subdev_flags & SDF_WRITABLE))
+			return -EINVAL;
+		new_data[0] = 1 << (chan - base_bitfield_channel);	/* mask */
+		new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel)) : 0;	/* bits */
+	}
+
+	ret = s->insn_bits(dev, s, &new_insn, new_data);
+	if (ret < 0)
+		return ret;
+
+	if (insn->insn == INSN_READ) {
+		data[0] = (new_data[1] >> (chan - base_bitfield_channel)) & 1;
+	}
+
+	return 1;
+}
+
+static inline unsigned long uvirt_to_kva(pgd_t * pgd, unsigned long adr)
+{
+	unsigned long ret = 0UL;
+	pmd_t *pmd;
+	pte_t *ptep, pte;
+	pud_t *pud;
+
+	if (!pgd_none(*pgd)) {
+		pud = pud_offset(pgd, adr);
+		pmd = pmd_offset(pud, adr);
+		if (!pmd_none(*pmd)) {
+			ptep = pte_offset_kernel(pmd, adr);
+			pte = *ptep;
+			if (pte_present(pte)) {
+				ret = (unsigned long)
+					page_address(pte_page(pte));
+				ret |= (adr & (PAGE_SIZE - 1));
+			}
+		}
+	}
+	return ret;
+}
+
+static inline unsigned long kvirt_to_kva(unsigned long adr)
+{
+	unsigned long va, kva;
+
+	va = adr;
+	kva = uvirt_to_kva(pgd_offset_k(va), va);
+
+	return kva;
+}
+
+int comedi_buf_alloc(comedi_device * dev, comedi_subdevice * s,
+	unsigned long new_size)
+{
+	comedi_async *async = s->async;
+
+	/* Round up new_size to multiple of PAGE_SIZE */
+	new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
+
+	/* if no change is required, do nothing */
+	if (async->prealloc_buf && async->prealloc_bufsz == new_size) {
+		return 0;
+	}
+	// deallocate old buffer
+	if (async->prealloc_buf) {
+		vunmap(async->prealloc_buf);
+		async->prealloc_buf = NULL;
+		async->prealloc_bufsz = 0;
+	}
+	if (async->buf_page_list) {
+		unsigned i;
+		for (i = 0; i < async->n_buf_pages; ++i) {
+			if (async->buf_page_list[i].virt_addr) {
+				mem_map_unreserve(virt_to_page(async->
+						buf_page_list[i].virt_addr));
+				if (s->async_dma_dir != DMA_NONE) {
+					dma_free_coherent(dev->hw_dev,
+						PAGE_SIZE,
+						async->buf_page_list[i].
+						virt_addr,
+						async->buf_page_list[i].
+						dma_addr);
+				} else {
+					free_page((unsigned long)async->
+						buf_page_list[i].virt_addr);
+				}
+			}
+		}
+		vfree(async->buf_page_list);
+		async->buf_page_list = NULL;
+		async->n_buf_pages = 0;
+	}
+	// allocate new buffer
+	if (new_size) {
+		unsigned i = 0;
+		unsigned n_pages = new_size >> PAGE_SHIFT;
+		struct page **pages = NULL;
+
+		async->buf_page_list =
+			vmalloc(sizeof(struct comedi_buf_page) * n_pages);
+		if (async->buf_page_list) {
+			memset(async->buf_page_list, 0,
+				sizeof(struct comedi_buf_page) * n_pages);
+			pages = vmalloc(sizeof(struct page *) * n_pages);
+		}
+		if (pages) {
+			for (i = 0; i < n_pages; i++) {
+				if (s->async_dma_dir != DMA_NONE) {
+					async->buf_page_list[i].virt_addr =
+						dma_alloc_coherent(dev->hw_dev,
+						PAGE_SIZE,
+						&async->buf_page_list[i].
+						dma_addr,
+						GFP_KERNEL | __GFP_COMP);
+				} else {
+					async->buf_page_list[i].virt_addr =
+						(void *)
+						get_zeroed_page(GFP_KERNEL);
+				}
+				if (async->buf_page_list[i].virt_addr == NULL) {
+					break;
+				}
+				mem_map_reserve(virt_to_page(async->
+						buf_page_list[i].virt_addr));
+				pages[i] =
+					virt_to_page(async->buf_page_list[i].
+					virt_addr);
+			}
+		}
+		if (i == n_pages) {
+			async->prealloc_buf =
+				vmap(pages, n_pages, VM_MAP,
+				PAGE_KERNEL_NOCACHE);
+		}
+		if (pages) {
+			vfree(pages);
+		}
+		if (async->prealloc_buf == NULL) {
+			/* Some allocation failed above. */
+			if (async->buf_page_list) {
+				for (i = 0; i < n_pages; i++) {
+					if (async->buf_page_list[i].virt_addr ==
+						NULL) {
+						break;
+					}
+					mem_map_unreserve(virt_to_page(async->
+							buf_page_list[i].
+							virt_addr));
+					if (s->async_dma_dir != DMA_NONE) {
+						dma_free_coherent(dev->hw_dev,
+							PAGE_SIZE,
+							async->buf_page_list[i].
+							virt_addr,
+							async->buf_page_list[i].
+							dma_addr);
+					} else {
+						free_page((unsigned long)async->
+							buf_page_list[i].
+							virt_addr);
+					}
+				}
+				vfree(async->buf_page_list);
+				async->buf_page_list = NULL;
+			}
+			return -ENOMEM;
+		}
+		async->n_buf_pages = n_pages;
+	}
+	async->prealloc_bufsz = new_size;
+
+	return 0;
+}
+
+/* munging is applied to data by core as it passes between user
+ * and kernel space */
+unsigned int comedi_buf_munge(comedi_async * async, unsigned int num_bytes)
+{
+	comedi_subdevice *s = async->subdevice;
+	unsigned int count = 0;
+	const unsigned num_sample_bytes = bytes_per_sample(s);
+
+	if (s->munge == NULL || (async->cmd.flags & CMDF_RAWDATA)) {
+		async->munge_count += num_bytes;
+		if ((int)(async->munge_count - async->buf_write_count) > 0)
+			BUG();
+		return num_bytes;
+	}
+	/* don't munge partial samples */
+	num_bytes -= num_bytes % num_sample_bytes;
+	while (count < num_bytes) {
+		int block_size;
+
+		block_size = num_bytes - count;
+		if (block_size < 0) {
+			rt_printk("%s: %s: bug! block_size is negative\n",
+				__FILE__, __FUNCTION__);
+			break;
+		}
+		if ((int)(async->munge_ptr + block_size -
+				async->prealloc_bufsz) > 0)
+			block_size = async->prealloc_bufsz - async->munge_ptr;
+
+		s->munge(s->device, s, async->prealloc_buf + async->munge_ptr,
+			block_size, async->munge_chan);
+
+		smp_wmb();	//barrier insures data is munged in buffer before munge_count is incremented
+
+		async->munge_chan += block_size / num_sample_bytes;
+		async->munge_chan %= async->cmd.chanlist_len;
+		async->munge_count += block_size;
+		async->munge_ptr += block_size;
+		async->munge_ptr %= async->prealloc_bufsz;
+		count += block_size;
+	}
+	if ((int)(async->munge_count - async->buf_write_count) > 0)
+		BUG();
+	return count;
+}
+
+unsigned int comedi_buf_write_n_available(comedi_async * async)
+{
+	unsigned int free_end;
+	unsigned int nbytes;
+
+	if (async == NULL)
+		return 0;
+
+	free_end = async->buf_read_count + async->prealloc_bufsz;
+	nbytes = free_end - async->buf_write_alloc_count;
+	nbytes -= nbytes % bytes_per_sample(async->subdevice);
+	/* barrier insures the read of buf_read_count in this
+	   query occurs before any following writes to the buffer which
+	   might be based on the return value from this query.
+	 */
+	smp_mb();
+	return nbytes;
+}
+
+/* allocates chunk for the writer from free buffer space */
+unsigned int comedi_buf_write_alloc(comedi_async * async, unsigned int nbytes)
+{
+	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
+
+	if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0) {
+		nbytes = free_end - async->buf_write_alloc_count;
+	}
+	async->buf_write_alloc_count += nbytes;
+	/* barrier insures the read of buf_read_count above occurs before
+	   we write data to the write-alloc'ed buffer space */
+	smp_mb();
+	return nbytes;
+}
+
+/* allocates nothing unless it can completely fulfill the request */
+unsigned int comedi_buf_write_alloc_strict(comedi_async * async,
+	unsigned int nbytes)
+{
+	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
+
+	if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0) {
+		nbytes = 0;
+	}
+	async->buf_write_alloc_count += nbytes;
+	/* barrier insures the read of buf_read_count above occurs before
+	   we write data to the write-alloc'ed buffer space */
+	smp_mb();
+	return nbytes;
+}
+
+/* transfers a chunk from writer to filled buffer space */
+unsigned comedi_buf_write_free(comedi_async * async, unsigned int nbytes)
+{
+	if ((int)(async->buf_write_count + nbytes -
+			async->buf_write_alloc_count) > 0) {
+		rt_printk
+			("comedi: attempted to write-free more bytes than have been write-allocated.\n");
+		nbytes = async->buf_write_alloc_count - async->buf_write_count;
+	}
+	async->buf_write_count += nbytes;
+	async->buf_write_ptr += nbytes;
+	comedi_buf_munge(async, async->buf_write_count - async->munge_count);
+	if (async->buf_write_ptr >= async->prealloc_bufsz) {
+		async->buf_write_ptr %= async->prealloc_bufsz;
+	}
+	return nbytes;
+}
+
+/* allocates a chunk for the reader from filled (and munged) buffer space */
+unsigned comedi_buf_read_alloc(comedi_async * async, unsigned nbytes)
+{
+	if ((int)(async->buf_read_alloc_count + nbytes - async->munge_count) >
+		0) {
+		nbytes = async->munge_count - async->buf_read_alloc_count;
+	}
+	async->buf_read_alloc_count += nbytes;
+	/* barrier insures read of munge_count occurs before we actually read
+	   data out of buffer */
+	smp_rmb();
+	return nbytes;
+}
+
+/* transfers control of a chunk from reader to free buffer space */
+unsigned comedi_buf_read_free(comedi_async * async, unsigned int nbytes)
+{
+	// barrier insures data has been read out of buffer before read count is incremented
+	smp_mb();
+	if ((int)(async->buf_read_count + nbytes -
+			async->buf_read_alloc_count) > 0) {
+		rt_printk
+			("comedi: attempted to read-free more bytes than have been read-allocated.\n");
+		nbytes = async->buf_read_alloc_count - async->buf_read_count;
+	}
+	async->buf_read_count += nbytes;
+	async->buf_read_ptr += nbytes;
+	async->buf_read_ptr %= async->prealloc_bufsz;
+	return nbytes;
+}
+
+void comedi_buf_memcpy_to(comedi_async * async, unsigned int offset,
+	const void *data, unsigned int num_bytes)
+{
+	unsigned int write_ptr = async->buf_write_ptr + offset;
+
+	if (write_ptr >= async->prealloc_bufsz)
+		write_ptr %= async->prealloc_bufsz;
+
+	while (num_bytes) {
+		unsigned int block_size;
+
+		if (write_ptr + num_bytes > async->prealloc_bufsz)
+			block_size = async->prealloc_bufsz - write_ptr;
+		else
+			block_size = num_bytes;
+
+		memcpy(async->prealloc_buf + write_ptr, data, block_size);
+
+		data += block_size;
+		num_bytes -= block_size;
+
+		write_ptr = 0;
+	}
+}
+
+void comedi_buf_memcpy_from(comedi_async * async, unsigned int offset,
+	void *dest, unsigned int nbytes)
+{
+	void *src;
+	unsigned int read_ptr = async->buf_read_ptr + offset;
+
+	if (read_ptr >= async->prealloc_bufsz)
+		read_ptr %= async->prealloc_bufsz;
+
+	while (nbytes) {
+		unsigned int block_size;
+
+		src = async->prealloc_buf + read_ptr;
+
+		if (nbytes >= async->prealloc_bufsz - read_ptr)
+			block_size = async->prealloc_bufsz - read_ptr;
+		else
+			block_size = nbytes;
+
+		memcpy(dest, src, block_size);
+		nbytes -= block_size;
+		dest += block_size;
+		read_ptr = 0;
+	}
+}
+
+unsigned int comedi_buf_read_n_available(comedi_async * async)
+{
+	unsigned num_bytes;
+
+	if (async == NULL)
+		return 0;
+	num_bytes = async->munge_count - async->buf_read_count;
+	/* barrier insures the read of munge_count in this
+	   query occurs before any following reads of the buffer which
+	   might be based on the return value from this query.
+	 */
+	smp_rmb();
+	return num_bytes;
+}
+
+int comedi_buf_get(comedi_async * async, sampl_t * x)
+{
+	unsigned int n = comedi_buf_read_n_available(async);
+
+	if (n < sizeof(sampl_t))
+		return 0;
+	comedi_buf_read_alloc(async, sizeof(sampl_t));
+	*x = *(sampl_t *) (async->prealloc_buf + async->buf_read_ptr);
+	comedi_buf_read_free(async, sizeof(sampl_t));
+	return 1;
+}
+
+int comedi_buf_put(comedi_async * async, sampl_t x)
+{
+	unsigned int n = comedi_buf_write_alloc_strict(async, sizeof(sampl_t));
+
+	if (n < sizeof(sampl_t)) {
+		async->events |= COMEDI_CB_ERROR;
+		return 0;
+	}
+	*(sampl_t *) (async->prealloc_buf + async->buf_write_ptr) = x;
+	comedi_buf_write_free(async, sizeof(sampl_t));
+	return 1;
+}
+
+void comedi_reset_async_buf(comedi_async * async)
+{
+	async->buf_write_alloc_count = 0;
+	async->buf_write_count = 0;
+	async->buf_read_alloc_count = 0;
+	async->buf_read_count = 0;
+
+	async->buf_write_ptr = 0;
+	async->buf_read_ptr = 0;
+
+	async->cur_chan = 0;
+	async->scan_progress = 0;
+	async->munge_chan = 0;
+	async->munge_count = 0;
+	async->munge_ptr = 0;
+
+	async->events = 0;
+}
+
+int comedi_auto_config(struct device *hardware_device, const char *board_name, const int *options, unsigned num_options)
+{
+	comedi_devconfig it;
+	int minor;
+	struct comedi_device_file_info *dev_file_info;
+	int retval;
+
+	minor = comedi_alloc_board_minor(hardware_device);
+	if(minor < 0) return minor;
+	dev_set_drvdata(hardware_device, (void*)(unsigned long)minor);
+
+	dev_file_info = comedi_get_device_file_info(minor);
+
+	memset(&it, 0, sizeof(it));
+	strncpy(it.board_name, board_name, COMEDI_NAMELEN);
+	it.board_name[COMEDI_NAMELEN - 1] = '\0';
+	BUG_ON(num_options > COMEDI_NDEVCONFOPTS);
+	memcpy(it.options, options, num_options * sizeof(int));
+
+	mutex_lock(&dev_file_info->device->mutex);
+	retval = comedi_device_attach(dev_file_info->device, &it);
+	mutex_unlock(&dev_file_info->device->mutex);
+	if(retval < 0)
+	{
+		comedi_free_board_minor(minor);
+	}
+	return retval;
+}
+
+void comedi_auto_unconfig(struct device *hardware_device)
+{
+	unsigned long minor = (unsigned long)dev_get_drvdata(hardware_device);
+
+	BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
+
+	comedi_free_board_minor(minor);
+}
+
+int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name)
+{
+	int options[2];
+
+	// pci bus
+	options[0] = pcidev->bus->number;
+	// pci slot
+	options[1] = PCI_SLOT(pcidev->devfn);
+
+	return comedi_auto_config(&pcidev->dev, board_name, options, sizeof(options) / sizeof(options[0]));
+}
+
+void comedi_pci_auto_unconfig(struct pci_dev *pcidev)
+{
+	comedi_auto_unconfig(&pcidev->dev);
+}

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików