Browse Source

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (116 commits)
  sk98lin: planned removal
  AT91: MACB support
  sky2: version 1.12
  sky2: add new chip ids
  sky2: Yukon Extreme support
  sky2: safer transmit timeout
  sky2: TSO support for EC_U
  sky2: use dev_err for error reports
  sky2: add Wake On Lan support
  fix unaligned exception in /drivers/net/wireless/orinoco.c
  Remove unused kernel config option DLCI_COUNT
  z85230: spinlock logic
  mips: declance: Driver model for the PMAD-A
  Spidernet: Rework RX linked list
  NET: turn local_save_flags() + local_irq_disable() into local_irq_save()
  NET-3c59x: turn local_save_flags() + local_irq_disable() into local_irq_save()
  hp100: convert pci_module_init() to pci_register_driver()
  NetXen: Added ethtool support for user level tools.
  NetXen: Firmware crb init changes.
  maintainers: add atl1 maintainers
  ...
Linus Torvalds 18 years ago
parent
commit
7677ced48e
100 changed files with 23962 additions and 4321 deletions
  1. 7 0
      Documentation/feature-removal-schedule.txt
  2. 17 1
      MAINTAINERS
  3. 1 2
      drivers/net/3c59x.c
  4. 39 26
      drivers/net/Kconfig
  5. 3 3
      drivers/net/Makefile
  6. 0 4
      drivers/net/Space.c
  7. 1 2
      drivers/net/amd8111e.c
  8. 4 4
      drivers/net/b44.c
  9. 5 5
      drivers/net/b44.h
  10. 4 16
      drivers/net/bmac.c
  11. 2 11
      drivers/net/bnx2.c
  12. 17 6
      drivers/net/bonding/bond_main.c
  13. 15 0
      drivers/net/bonding/bond_sysfs.c
  14. 5 4
      drivers/net/bonding/bonding.h
  15. 1 1
      drivers/net/chelsio/common.h
  16. 9 9
      drivers/net/chelsio/cpl5_cmd.h
  17. 59 90
      drivers/net/chelsio/cxgb2.c
  18. 20 20
      drivers/net/chelsio/elmer0.h
  19. 22 22
      drivers/net/chelsio/espi.c
  20. 3 3
      drivers/net/chelsio/fpga_defs.h
  21. 9 2
      drivers/net/chelsio/gmac.h
  22. 60 40
      drivers/net/chelsio/ixf1010.c
  23. 13 14
      drivers/net/chelsio/mv88e1xxx.c
  24. 8 8
      drivers/net/chelsio/my3126.c
  25. 52 39
      drivers/net/chelsio/pm3393.c
  26. 155 173
      drivers/net/chelsio/sge.c
  27. 45 44
      drivers/net/chelsio/subr.c
  28. 30 32
      drivers/net/chelsio/tp.c
  29. 72 67
      drivers/net/chelsio/vsc7326.c
  30. 75 64
      drivers/net/chelsio/vsc7326_reg.h
  31. 20 21
      drivers/net/chelsio/vsc8244.c
  32. 8 0
      drivers/net/cxgb3/Makefile
  33. 279 0
      drivers/net/cxgb3/adapter.h
  34. 251 0
      drivers/net/cxgb3/ael1002.c
  35. 729 0
      drivers/net/cxgb3/common.h
  36. 164 0
      drivers/net/cxgb3/cxgb3_ctl_defs.h
  37. 99 0
      drivers/net/cxgb3/cxgb3_defs.h
  38. 185 0
      drivers/net/cxgb3/cxgb3_ioctl.h
  39. 2515 0
      drivers/net/cxgb3/cxgb3_main.c
  40. 1222 0
      drivers/net/cxgb3/cxgb3_offload.c
  41. 193 0
      drivers/net/cxgb3/cxgb3_offload.h
  42. 177 0
      drivers/net/cxgb3/firmware_exports.h
  43. 450 0
      drivers/net/cxgb3/l2t.c
  44. 143 0
      drivers/net/cxgb3/l2t.h
  45. 473 0
      drivers/net/cxgb3/mc5.c
  46. 2195 0
      drivers/net/cxgb3/regs.h
  47. 2681 0
      drivers/net/cxgb3/sge.c
  48. 251 0
      drivers/net/cxgb3/sge_defs.h
  49. 1444 0
      drivers/net/cxgb3/t3_cpl.h
  50. 3375 0
      drivers/net/cxgb3/t3_hw.c
  51. 73 0
      drivers/net/cxgb3/t3cdev.h
  52. 39 0
      drivers/net/cxgb3/version.h
  53. 228 0
      drivers/net/cxgb3/vsc8211.c
  54. 409 0
      drivers/net/cxgb3/xgmac.c
  55. 117 47
      drivers/net/declance.c
  56. 0 7
      drivers/net/e1000/e1000.h
  57. 0 6
      drivers/net/e1000/e1000_ethtool.c
  58. 47 81
      drivers/net/e1000/e1000_main.c
  59. 1 3
      drivers/net/e1000/e1000_osdep.h
  60. 3 12
      drivers/net/e1000/e1000_param.c
  61. 479 190
      drivers/net/forcedeth.c
  62. 1 1
      drivers/net/hp100.c
  63. 0 2
      drivers/net/ixgb/ixgb.h
  64. 0 6
      drivers/net/ixgb/ixgb_ethtool.c
  65. 0 4
      drivers/net/ixgb/ixgb_main.c
  66. 23 2
      drivers/net/macb.c
  67. 7 1
      drivers/net/macb.h
  68. 2 14
      drivers/net/mace.c
  69. 3 15
      drivers/net/macmace.c
  70. 1 5
      drivers/net/macsonic.c
  71. 0 10
      drivers/net/myri10ge/myri10ge.c
  72. 15 2
      drivers/net/netxen/netxen_nic.h
  73. 81 15
      drivers/net/netxen/netxen_nic_ethtool.c
  74. 274 5
      drivers/net/netxen/netxen_nic_init.c
  75. 0 666
      drivers/net/oaknet.c
  76. 1019 0
      drivers/net/pasemi_mac.c
  77. 460 0
      drivers/net/pasemi_mac.h
  78. 301 62
      drivers/net/qla3xxx.c
  79. 78 10
      drivers/net/qla3xxx.h
  80. 4 3
      drivers/net/s2io-regs.h
  81. 200 277
      drivers/net/s2io.c
  82. 115 108
      drivers/net/s2io.h
  83. 1620 0
      drivers/net/sc92031.c
  84. 0 1216
      drivers/net/sk_mca.c
  85. 0 170
      drivers/net/sk_mca.h
  86. 0 83
      drivers/net/skfp/can.c
  87. 10 14
      drivers/net/skfp/drvfbi.c
  88. 2 2
      drivers/net/skfp/fplustm.c
  89. 4 6
      drivers/net/skfp/smt.c
  90. 159 76
      drivers/net/skge.c
  91. 2 0
      drivers/net/skge.h
  92. 336 207
      drivers/net/sky2.c
  93. 60 25
      drivers/net/sky2.h
  94. 150 163
      drivers/net/spider_net.c
  95. 6 14
      drivers/net/spider_net.h
  96. 2 2
      drivers/net/spider_net_ethtool.c
  97. 0 32
      drivers/net/tg3.c
  98. 10 13
      drivers/net/ucc_geth.c
  99. 13 11
      drivers/net/wan/Kconfig
  100. 1 0
      drivers/net/wan/Makefile

+ 7 - 0
Documentation/feature-removal-schedule.txt

@@ -333,3 +333,10 @@ Why:	Unmaintained for years, superceded by JFFS2 for years.
 Who:	Jeff Garzik <jeff@garzik.org>
 Who:	Jeff Garzik <jeff@garzik.org>
 
 
 ---------------------------
 ---------------------------
+
+What:   sk98lin network driver
+When:   July 2007
+Why:    In kernel tree version of driver is unmaintained. Sk98lin driver
+	replaced by the skge driver. 
+Who:    Stephen Hemminger <shemminger@osdl.org>
+

+ 17 - 1
MAINTAINERS

@@ -598,6 +598,16 @@ M:	ecashin@coraid.com
 W:	http://www.coraid.com/support/linux
 W:	http://www.coraid.com/support/linux
 S:	Supported
 S:	Supported
 
 
+ATL1 ETHERNET DRIVER
+P:	Jay Cliburn
+M:	jcliburn@gmail.com
+P:	Chris Snook
+M:	csnook@redhat.com
+L:	atl1-devel@lists.sourceforge.net
+W:	http://sourceforge.net/projects/atl1
+W:	http://atl1.sourceforge.net
+S:	Maintained
+
 ATM
 ATM
 P:	Chas Williams
 P:	Chas Williams
 M:	chas@cmf.nrl.navy.mil
 M:	chas@cmf.nrl.navy.mil
@@ -2485,6 +2495,12 @@ L:	orinoco-devel@lists.sourceforge.net
 W:	http://www.nongnu.org/orinoco/
 W:	http://www.nongnu.org/orinoco/
 S:	Maintained
 S:	Maintained
 
 
+PA SEMI ETHERNET DRIVER
+P:	Olof Johansson
+M:	olof@lixom.net
+L:	netdev@vger.kernel.org
+S:	Maintained
+
 PARALLEL PORT SUPPORT
 PARALLEL PORT SUPPORT
 P:	Phil Blundell
 P:	Phil Blundell
 M:	philb@gnu.org
 M:	philb@gnu.org
@@ -2654,7 +2670,7 @@ S:	Supported
 
 
 PRISM54 WIRELESS DRIVER
 PRISM54 WIRELESS DRIVER
 P:	Prism54 Development Team
 P:	Prism54 Development Team
-M:	prism54-private@prism54.org
+M:	developers@islsm.org
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 W:	http://prism54.org
 W:	http://prism54.org
 S:	Maintained
 S:	Maintained

+ 1 - 2
drivers/net/3c59x.c

@@ -792,8 +792,7 @@ static void poll_vortex(struct net_device *dev)
 {
 {
 	struct vortex_private *vp = netdev_priv(dev);
 	struct vortex_private *vp = netdev_priv(dev);
 	unsigned long flags;
 	unsigned long flags;
-	local_save_flags(flags);
-	local_irq_disable();
+	local_irq_save(flags);
 	(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
 	(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }

+ 39 - 26
drivers/net/Kconfig

@@ -190,7 +190,7 @@ config MII
 
 
 config MACB
 config MACB
 	tristate "Atmel MACB support"
 	tristate "Atmel MACB support"
-	depends on NET_ETHERNET && AVR32
+	depends on NET_ETHERNET && (AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263)
 	select MII
 	select MII
 	help
 	help
 	  The Atmel MACB ethernet interface is found on many AT32 and AT91
 	  The Atmel MACB ethernet interface is found on many AT32 and AT91
@@ -235,16 +235,6 @@ config BMAC
 	  To compile this driver as a module, choose M here: the module
 	  To compile this driver as a module, choose M here: the module
 	  will be called bmac.
 	  will be called bmac.
 
 
-config OAKNET
-	tristate "National DP83902AV (Oak ethernet) support"
-	depends on NET_ETHERNET && PPC && BROKEN
-	select CRC32
-	help
-	  Say Y if your machine has this type of Ethernet network card.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called oaknet.
-
 config ARIADNE
 config ARIADNE
 	tristate "Ariadne support"
 	tristate "Ariadne support"
 	depends on NET_ETHERNET && ZORRO
 	depends on NET_ETHERNET && ZORRO
@@ -1155,21 +1145,6 @@ config SEEQ8005
 	  <file:Documentation/networking/net-modules.txt>. The module
 	  <file:Documentation/networking/net-modules.txt>. The module
 	  will be called seeq8005.
 	  will be called seeq8005.
 
 
-config SKMC
-	tristate "SKnet MCA support"
-	depends on NET_ETHERNET && MCA && BROKEN
-	---help---
-	  These are Micro Channel Ethernet adapters. You need to say Y to "MCA
-	  support" in order to use this driver.  Supported cards are the SKnet
-	  Junior MC2 and the SKnet MC2(+).  The driver automatically
-	  distinguishes between the two cards. Note that using multiple boards
-	  of different type hasn't been tested with this driver.  Say Y if you
-	  have one of these Ethernet adapters.
-
-	  To compile this driver as a module, choose M here and read
-	  <file:Documentation/networking/net-modules.txt>. The module
-	  will be called sk_mca.
-
 config NE2_MCA
 config NE2_MCA
 	tristate "NE/2 (ne2000 MCA version) support"
 	tristate "NE/2 (ne2000 MCA version) support"
 	depends on NET_ETHERNET && MCA_LEGACY
 	depends on NET_ETHERNET && MCA_LEGACY
@@ -1788,6 +1763,18 @@ config LAN_SAA9730
 	  workstations.
 	  workstations.
 	  See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>.
 	  See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>.
 
 
+config SC92031
+	tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
+	depends on NET_PCI && PCI && EXPERIMENTAL
+	select CRC32
+	---help---
+	  This is a driver for the Fast Ethernet PCI network cards based on
+	  the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
+	  have one of these, say Y here.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sc92031.  This is recommended.
+
 config NET_POCKET
 config NET_POCKET
 	bool "Pocket and portable adapters"
 	bool "Pocket and portable adapters"
 	depends on NET_ETHERNET && PARPORT
 	depends on NET_ETHERNET && PARPORT
@@ -2392,6 +2379,24 @@ config CHELSIO_T1_NAPI
 	  NAPI is a driver API designed to reduce CPU and interrupt load
 	  NAPI is a driver API designed to reduce CPU and interrupt load
 	  when the driver is receiving lots of packets from the card.
 	  when the driver is receiving lots of packets from the card.
 
 
+config CHELSIO_T3
+        tristate "Chelsio Communications T3 10Gb Ethernet support"
+        depends on PCI
+        help
+          This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
+          adapters.
+
+          For general information about Chelsio and our products, visit
+          our website at <http://www.chelsio.com>.
+
+          For customer support, please visit our customer support page at
+          <http://www.chelsio.com/support.htm>.
+
+          Please send feedback to <linux-bugs@chelsio.com>.
+
+          To compile this driver as a module, choose M here: the module
+          will be called cxgb3.
+
 config EHEA
 config EHEA
 	tristate "eHEA Ethernet support"
 	tristate "eHEA Ethernet support"
 	depends on IBMEBUS
 	depends on IBMEBUS
@@ -2488,6 +2493,13 @@ config NETXEN_NIC
 	help
 	help
 	  This enables the support for NetXen's Gigabit Ethernet card.
 	  This enables the support for NetXen's Gigabit Ethernet card.
 
 
+config PASEMI_MAC
+	tristate "PA Semi 1/10Gbit MAC"
+	depends on PPC64 && PCI
+	help
+	  This driver supports the on-chip 1/10Gbit Ethernet controller on
+	  PA Semi's PWRficient line of chips.
+
 endmenu
 endmenu
 
 
 source "drivers/net/tokenring/Kconfig"
 source "drivers/net/tokenring/Kconfig"
@@ -2541,6 +2553,7 @@ config DEFXX
 config SKFP
 config SKFP
 	tristate "SysKonnect FDDI PCI support"
 	tristate "SysKonnect FDDI PCI support"
 	depends on FDDI && PCI
 	depends on FDDI && PCI
+	select BITREVERSE
 	---help---
 	---help---
 	  Say Y here if you have a SysKonnect FDDI PCI adapter.
 	  Say Y here if you have a SysKonnect FDDI PCI adapter.
 	  The following adapters are supported by this driver:
 	  The following adapters are supported by this driver:

+ 3 - 3
drivers/net/Makefile

@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
 obj-$(CONFIG_IXGB) += ixgb/
 obj-$(CONFIG_IXGB) += ixgb/
 obj-$(CONFIG_CHELSIO_T1) += chelsio/
 obj-$(CONFIG_CHELSIO_T1) += chelsio/
+obj-$(CONFIG_CHELSIO_T3) += cxgb3/
 obj-$(CONFIG_EHEA) += ehea/
 obj-$(CONFIG_EHEA) += ehea/
 obj-$(CONFIG_BONDING) += bonding/
 obj-$(CONFIG_BONDING) += bonding/
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
@@ -36,8 +37,6 @@ obj-$(CONFIG_CASSINI) += cassini.o
 obj-$(CONFIG_MACE) += mace.o
 obj-$(CONFIG_MACE) += mace.o
 obj-$(CONFIG_BMAC) += bmac.o
 obj-$(CONFIG_BMAC) += bmac.o
 
 
-obj-$(CONFIG_OAKNET) += oaknet.o 8390.o
-
 obj-$(CONFIG_DGRS) += dgrs.o
 obj-$(CONFIG_DGRS) += dgrs.o
 obj-$(CONFIG_VORTEX) += 3c59x.o
 obj-$(CONFIG_VORTEX) += 3c59x.o
 obj-$(CONFIG_TYPHOON) += typhoon.o
 obj-$(CONFIG_TYPHOON) += typhoon.o
@@ -137,7 +136,6 @@ obj-$(CONFIG_AT1700) += at1700.o
 obj-$(CONFIG_EL1) += 3c501.o
 obj-$(CONFIG_EL1) += 3c501.o
 obj-$(CONFIG_EL16) += 3c507.o
 obj-$(CONFIG_EL16) += 3c507.o
 obj-$(CONFIG_ELMC) += 3c523.o
 obj-$(CONFIG_ELMC) += 3c523.o
-obj-$(CONFIG_SKMC) += sk_mca.o
 obj-$(CONFIG_IBMLANA) += ibmlana.o
 obj-$(CONFIG_IBMLANA) += ibmlana.o
 obj-$(CONFIG_ELMC_II) += 3c527.o
 obj-$(CONFIG_ELMC_II) += 3c527.o
 obj-$(CONFIG_EL3) += 3c509.o
 obj-$(CONFIG_EL3) += 3c509.o
@@ -160,6 +158,7 @@ obj-$(CONFIG_APRICOT) += 82596.o
 obj-$(CONFIG_LASI_82596) += lasi_82596.o
 obj-$(CONFIG_LASI_82596) += lasi_82596.o
 obj-$(CONFIG_MVME16x_NET) += 82596.o
 obj-$(CONFIG_MVME16x_NET) += 82596.o
 obj-$(CONFIG_BVME6000_NET) += 82596.o
 obj-$(CONFIG_BVME6000_NET) += 82596.o
+obj-$(CONFIG_SC92031) += sc92031.o
 
 
 # This is also a 82596 and should probably be merged
 # This is also a 82596 and should probably be merged
 obj-$(CONFIG_LP486E) += lp486e.o
 obj-$(CONFIG_LP486E) += lp486e.o
@@ -196,6 +195,7 @@ obj-$(CONFIG_SMC91X) += smc91x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_FEC_8XX) += fec_8xx/
 obj-$(CONFIG_FEC_8XX) += fec_8xx/
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
 
 
 obj-$(CONFIG_MACB) += macb.o
 obj-$(CONFIG_MACB) += macb.o
 
 

+ 0 - 4
drivers/net/Space.c

@@ -59,7 +59,6 @@ extern struct net_device *wavelan_probe(int unit);
 extern struct net_device *arlan_probe(int unit);
 extern struct net_device *arlan_probe(int unit);
 extern struct net_device *el16_probe(int unit);
 extern struct net_device *el16_probe(int unit);
 extern struct net_device *elmc_probe(int unit);
 extern struct net_device *elmc_probe(int unit);
-extern struct net_device *skmca_probe(int unit);
 extern struct net_device *elplus_probe(int unit);
 extern struct net_device *elplus_probe(int unit);
 extern struct net_device *ac3200_probe(int unit);
 extern struct net_device *ac3200_probe(int unit);
 extern struct net_device *es_probe(int unit);
 extern struct net_device *es_probe(int unit);
@@ -152,9 +151,6 @@ static struct devprobe2 mca_probes[] __initdata = {
 #endif
 #endif
 #ifdef CONFIG_ELMC_II		/* 3c527 */
 #ifdef CONFIG_ELMC_II		/* 3c527 */
 	{mc32_probe, 0},
 	{mc32_probe, 0},
-#endif
-#ifdef CONFIG_SKMC              /* SKnet Microchannel */
-        {skmca_probe, 0},
 #endif
 #endif
 	{NULL, 0},
 	{NULL, 0},
 };
 };

+ 1 - 2
drivers/net/amd8111e.c

@@ -1334,8 +1334,7 @@ err_no_interrupt:
 static void amd8111e_poll(struct net_device *dev)
 static void amd8111e_poll(struct net_device *dev)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
-	local_save_flags(flags);
-	local_irq_disable();
+	local_irq_save(flags);
 	amd8111e_interrupt(0, dev);
 	amd8111e_interrupt(0, dev);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }

+ 4 - 4
drivers/net/b44.c

@@ -721,7 +721,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 	struct ring_info *src_map, *dest_map;
 	struct ring_info *src_map, *dest_map;
 	struct rx_header *rh;
 	struct rx_header *rh;
 	int dest_idx;
 	int dest_idx;
-	u32 ctrl;
+	__le32 ctrl;
 
 
 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 	dest_desc = &bp->rx_ring[dest_idx];
 	dest_desc = &bp->rx_ring[dest_idx];
@@ -783,7 +783,7 @@ static int b44_rx(struct b44 *bp, int budget)
 					    RX_PKT_BUF_SZ,
 					    RX_PKT_BUF_SZ,
 					    PCI_DMA_FROMDEVICE);
 					    PCI_DMA_FROMDEVICE);
 		rh = (struct rx_header *) skb->data;
 		rh = (struct rx_header *) skb->data;
-		len = cpu_to_le16(rh->len);
+		len = le16_to_cpu(rh->len);
 		if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
 		if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
 		drop_it:
 		drop_it:
@@ -799,7 +799,7 @@ static int b44_rx(struct b44 *bp, int budget)
 			do {
 			do {
 				udelay(2);
 				udelay(2);
 				barrier();
 				barrier();
-				len = cpu_to_le16(rh->len);
+				len = le16_to_cpu(rh->len);
 			} while (len == 0 && i++ < 5);
 			} while (len == 0 && i++ < 5);
 			if (len == 0)
 			if (len == 0)
 				goto drop_it;
 				goto drop_it;
@@ -2061,7 +2061,7 @@ out:
 static int b44_read_eeprom(struct b44 *bp, u8 *data)
 static int b44_read_eeprom(struct b44 *bp, u8 *data)
 {
 {
 	long i;
 	long i;
-	u16 *ptr = (u16 *) data;
+	__le16 *ptr = (__le16 *) data;
 
 
 	for (i = 0; i < 128; i += 2)
 	for (i = 0; i < 128; i += 2)
 		ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
 		ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));

+ 5 - 5
drivers/net/b44.h

@@ -308,8 +308,8 @@
 #define  MII_TLEDCTRL_ENABLE	0x0040
 #define  MII_TLEDCTRL_ENABLE	0x0040
 
 
 struct dma_desc {
 struct dma_desc {
-	u32	ctrl;
-	u32	addr;
+	__le32	ctrl;
+	__le32	addr;
 };
 };
 
 
 /* There are only 12 bits in the DMA engine for descriptor offsetting
 /* There are only 12 bits in the DMA engine for descriptor offsetting
@@ -327,9 +327,9 @@ struct dma_desc {
 #define RX_COPY_THRESHOLD  	256
 #define RX_COPY_THRESHOLD  	256
 
 
 struct rx_header {
 struct rx_header {
-	u16	len;
-	u16	flags;
-	u16	pad[12];
+	__le16	len;
+	__le16	flags;
+	__le16	pad[12];
 };
 };
 #define RX_HEADER_LEN	28
 #define RX_HEADER_LEN	28
 
 

+ 4 - 16
drivers/net/bmac.c

@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/crc32.h>
 #include <linux/crc32.h>
+#include <linux/bitrev.h>
 #include <asm/prom.h>
 #include <asm/prom.h>
 #include <asm/dbdma.h>
 #include <asm/dbdma.h>
 #include <asm/io.h>
 #include <asm/io.h>
@@ -140,7 +141,6 @@ static unsigned char *bmac_emergency_rxbuf;
 	+ (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
 	+ (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
 	+ sizeof(struct sk_buff_head))
 	+ sizeof(struct sk_buff_head))
 
 
-static unsigned char bitrev(unsigned char b);
 static int bmac_open(struct net_device *dev);
 static int bmac_open(struct net_device *dev);
 static int bmac_close(struct net_device *dev);
 static int bmac_close(struct net_device *dev);
 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
@@ -586,18 +586,6 @@ bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
 		     virt_to_bus(addr), 0);
 		     virt_to_bus(addr), 0);
 }
 }
 
 
-/* Bit-reverse one byte of an ethernet hardware address. */
-static unsigned char
-bitrev(unsigned char b)
-{
-	int d = 0, i;
-
-	for (i = 0; i < 8; ++i, b >>= 1)
-		d = (d << 1) | (b & 1);
-	return d;
-}
-
-
 static void
 static void
 bmac_init_tx_ring(struct bmac_data *bp)
 bmac_init_tx_ring(struct bmac_data *bp)
 {
 {
@@ -1224,8 +1212,8 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
 		{
 		{
 			reset_and_select_srom(dev);
 			reset_and_select_srom(dev);
 			data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
 			data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
-			ea[2*i]   = bitrev(data & 0x0ff);
-			ea[2*i+1] = bitrev((data >> 8) & 0x0ff);
+			ea[2*i]   = bitrev8(data & 0x0ff);
+			ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
 		}
 		}
 }
 }
 
 
@@ -1315,7 +1303,7 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
 
 
 	rev = addr[0] == 0 && addr[1] == 0xA0;
 	rev = addr[0] == 0 && addr[1] == 0xA0;
 	for (j = 0; j < 6; ++j)
 	for (j = 0; j < 6; ++j)
-		dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
+		dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
 
 
 	/* Enable chip without interrupts for now */
 	/* Enable chip without interrupts for now */
 	bmac_enable_and_reset_chip(dev);
 	bmac_enable_and_reset_chip(dev);

+ 2 - 11
drivers/net/bnx2.c

@@ -39,12 +39,9 @@
 #include <linux/if_vlan.h>
 #include <linux/if_vlan.h>
 #define BCM_VLAN 1
 #define BCM_VLAN 1
 #endif
 #endif
-#ifdef NETIF_F_TSO
 #include <net/ip.h>
 #include <net/ip.h>
 #include <net/tcp.h>
 #include <net/tcp.h>
 #include <net/checksum.h>
 #include <net/checksum.h>
-#define BCM_TSO 1
-#endif
 #include <linux/workqueue.h>
 #include <linux/workqueue.h>
 #include <linux/crc32.h>
 #include <linux/crc32.h>
 #include <linux/prefetch.h>
 #include <linux/prefetch.h>
@@ -1728,7 +1725,7 @@ bnx2_tx_int(struct bnx2 *bp)
 
 
 		tx_buf = &bp->tx_buf_ring[sw_ring_cons];
 		tx_buf = &bp->tx_buf_ring[sw_ring_cons];
 		skb = tx_buf->skb;
 		skb = tx_buf->skb;
-#ifdef BCM_TSO
+
 		/* partial BD completions possible with TSO packets */
 		/* partial BD completions possible with TSO packets */
 		if (skb_is_gso(skb)) {
 		if (skb_is_gso(skb)) {
 			u16 last_idx, last_ring_idx;
 			u16 last_idx, last_ring_idx;
@@ -1744,7 +1741,7 @@ bnx2_tx_int(struct bnx2 *bp)
 				break;
 				break;
 			}
 			}
 		}
 		}
-#endif
+
 		pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
 		pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
 			skb_headlen(skb), PCI_DMA_TODEVICE);
 			skb_headlen(skb), PCI_DMA_TODEVICE);
 
 
@@ -4514,7 +4511,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		vlan_tag_flags |=
 		vlan_tag_flags |=
 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
 	}
 	}
-#ifdef BCM_TSO
 	if ((mss = skb_shinfo(skb)->gso_size) &&
 	if ((mss = skb_shinfo(skb)->gso_size) &&
 		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
 		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
 		u32 tcp_opt_len, ip_tcp_len;
 		u32 tcp_opt_len, ip_tcp_len;
@@ -4547,7 +4543,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		}
 		}
 	}
 	}
 	else
 	else
-#endif
 	{
 	{
 		mss = 0;
 		mss = 0;
 	}
 	}
@@ -5544,10 +5539,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
 	.set_tx_csum		= ethtool_op_set_tx_csum,
 	.set_tx_csum		= ethtool_op_set_tx_csum,
 	.get_sg			= ethtool_op_get_sg,
 	.get_sg			= ethtool_op_get_sg,
 	.set_sg			= ethtool_op_set_sg,
 	.set_sg			= ethtool_op_set_sg,
-#ifdef BCM_TSO
 	.get_tso		= ethtool_op_get_tso,
 	.get_tso		= ethtool_op_get_tso,
 	.set_tso		= bnx2_set_tso,
 	.set_tso		= bnx2_set_tso,
-#endif
 	.self_test_count	= bnx2_self_test_count,
 	.self_test_count	= bnx2_self_test_count,
 	.self_test		= bnx2_self_test,
 	.self_test		= bnx2_self_test,
 	.get_strings		= bnx2_get_strings,
 	.get_strings		= bnx2_get_strings,
@@ -6104,9 +6097,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 #ifdef BCM_VLAN
 #ifdef BCM_VLAN
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 #endif
 #endif
-#ifdef BCM_TSO
 	dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
 	dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
-#endif
 
 
 	netif_carrier_off(bp->dev);
 	netif_carrier_off(bp->dev);
 
 

+ 17 - 6
drivers/net/bonding/bond_main.c

@@ -4704,6 +4704,7 @@ static int bond_check_params(struct bond_params *params)
 static struct lock_class_key bonding_netdev_xmit_lock_key;
 static struct lock_class_key bonding_netdev_xmit_lock_key;
 
 
 /* Create a new bond based on the specified name and bonding parameters.
 /* Create a new bond based on the specified name and bonding parameters.
+ * If name is NULL, obtain a suitable "bond%d" name for us.
  * Caller must NOT hold rtnl_lock; we need to release it here before we
  * Caller must NOT hold rtnl_lock; we need to release it here before we
  * set up our sysfs entries.
  * set up our sysfs entries.
  */
  */
@@ -4713,7 +4714,8 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
 	int res;
 	int res;
 
 
 	rtnl_lock();
 	rtnl_lock();
-	bond_dev = alloc_netdev(sizeof(struct bonding), name, ether_setup);
+	bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
+				ether_setup);
 	if (!bond_dev) {
 	if (!bond_dev) {
 		printk(KERN_ERR DRV_NAME
 		printk(KERN_ERR DRV_NAME
 		       ": %s: eek! can't alloc netdev!\n",
 		       ": %s: eek! can't alloc netdev!\n",
@@ -4722,6 +4724,12 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
 		goto out_rtnl;
 		goto out_rtnl;
 	}
 	}
 
 
+	if (!name) {
+		res = dev_alloc_name(bond_dev, "bond%d");
+		if (res < 0)
+			goto out_netdev;
+	}
+
 	/* bond_init() must be called after dev_alloc_name() (for the
 	/* bond_init() must be called after dev_alloc_name() (for the
 	 * /proc files), but before register_netdevice(), because we
 	 * /proc files), but before register_netdevice(), because we
 	 * need to set function pointers.
 	 * need to set function pointers.
@@ -4748,14 +4756,19 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
 
 
 	rtnl_unlock(); /* allows sysfs registration of net device */
 	rtnl_unlock(); /* allows sysfs registration of net device */
 	res = bond_create_sysfs_entry(bond_dev->priv);
 	res = bond_create_sysfs_entry(bond_dev->priv);
-	goto done;
+	if (res < 0) {
+		rtnl_lock();
+		goto out_bond;
+	}
+
+	return 0;
+
 out_bond:
 out_bond:
 	bond_deinit(bond_dev);
 	bond_deinit(bond_dev);
 out_netdev:
 out_netdev:
 	free_netdev(bond_dev);
 	free_netdev(bond_dev);
 out_rtnl:
 out_rtnl:
 	rtnl_unlock();
 	rtnl_unlock();
-done:
 	return res;
 	return res;
 }
 }
 
 
@@ -4763,7 +4776,6 @@ static int __init bonding_init(void)
 {
 {
 	int i;
 	int i;
 	int res;
 	int res;
-	char new_bond_name[8];  /* Enough room for 999 bonds at init. */
 
 
 	printk(KERN_INFO "%s", version);
 	printk(KERN_INFO "%s", version);
 
 
@@ -4776,8 +4788,7 @@ static int __init bonding_init(void)
 	bond_create_proc_dir();
 	bond_create_proc_dir();
 #endif
 #endif
 	for (i = 0; i < max_bonds; i++) {
 	for (i = 0; i < max_bonds; i++) {
-		sprintf(new_bond_name, "bond%d",i);
-		res = bond_create(new_bond_name,&bonding_defaults, NULL);
+		res = bond_create(NULL, &bonding_defaults, NULL);
 		if (res)
 		if (res)
 			goto err;
 			goto err;
 	}
 	}

+ 15 - 0
drivers/net/bonding/bond_sysfs.c

@@ -1372,6 +1372,21 @@ int bond_create_sysfs(void)
 		return -ENODEV;
 		return -ENODEV;
 
 
 	ret = class_create_file(netdev_class, &class_attr_bonding_masters);
 	ret = class_create_file(netdev_class, &class_attr_bonding_masters);
+	/*
+	 * Permit multiple loads of the module by ignoring failures to
+	 * create the bonding_masters sysfs file.  Bonding devices
+	 * created by second or subsequent loads of the module will
+	 * not be listed in, or controllable by, bonding_masters, but
+	 * will have the usual "bonding" sysfs directory.
+	 *
+	 * This is done to preserve backwards compatibility for
+	 * initscripts/sysconfig, which load bonding multiple times to
+	 * configure multiple bonding devices.
+	 */
+	if (ret == -EEXIST) {
+		netdev_class = NULL;
+		return 0;
+	}
 
 
 	return ret;
 	return ret;
 
 

+ 5 - 4
drivers/net/bonding/bonding.h

@@ -22,8 +22,8 @@
 #include "bond_3ad.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
 #include "bond_alb.h"
 
 
-#define DRV_VERSION	"3.1.1"
-#define DRV_RELDATE	"September 26, 2006"
+#define DRV_VERSION	"3.1.2"
+#define DRV_RELDATE	"January 20, 2007"
 #define DRV_NAME	"bonding"
 #define DRV_NAME	"bonding"
 #define DRV_DESCRIPTION	"Ethernet Channel Bonding Driver"
 #define DRV_DESCRIPTION	"Ethernet Channel Bonding Driver"
 
 
@@ -237,12 +237,13 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
 #define BOND_ARP_VALIDATE_ALL		(BOND_ARP_VALIDATE_ACTIVE | \
 #define BOND_ARP_VALIDATE_ALL		(BOND_ARP_VALIDATE_ACTIVE | \
 					 BOND_ARP_VALIDATE_BACKUP)
 					 BOND_ARP_VALIDATE_BACKUP)
 
 
-extern inline int slave_do_arp_validate(struct bonding *bond, struct slave *slave)
+static inline int slave_do_arp_validate(struct bonding *bond,
+					struct slave *slave)
 {
 {
 	return bond->params.arp_validate & (1 << slave->state);
 	return bond->params.arp_validate & (1 << slave->state);
 }
 }
 
 
-extern inline unsigned long slave_last_rx(struct bonding *bond,
+static inline unsigned long slave_last_rx(struct bonding *bond,
 					struct slave *slave)
 					struct slave *slave)
 {
 {
 	if (slave_do_arp_validate(bond, slave))
 	if (slave_do_arp_validate(bond, slave))

+ 1 - 1
drivers/net/chelsio/common.h

@@ -324,7 +324,7 @@ struct board_info {
 	unsigned char           mdio_phybaseaddr;
 	unsigned char           mdio_phybaseaddr;
 	struct gmac            *gmac;
 	struct gmac            *gmac;
 	struct gphy            *gphy;
 	struct gphy            *gphy;
-	struct mdio_ops	       *mdio_ops;
+	struct mdio_ops        *mdio_ops;
 	const char             *desc;
 	const char             *desc;
 };
 };
 
 

+ 9 - 9
drivers/net/chelsio/cpl5_cmd.h

@@ -103,7 +103,7 @@ enum CPL_opcode {
 	CPL_MIGRATE_C2T_RPL   = 0xDD,
 	CPL_MIGRATE_C2T_RPL   = 0xDD,
 	CPL_ERROR             = 0xD7,
 	CPL_ERROR             = 0xD7,
 
 
-    /* internal: driver -> TOM */
+	/* internal: driver -> TOM */
 	CPL_MSS_CHANGE        = 0xE1
 	CPL_MSS_CHANGE        = 0xE1
 };
 };
 
 
@@ -159,8 +159,8 @@ enum {                // TX_PKT_LSO ethernet types
 };
 };
 
 
 union opcode_tid {
 union opcode_tid {
-    u32 opcode_tid;
-    u8 opcode;
+	u32 opcode_tid;
+	u8 opcode;
 };
 };
 
 
 #define S_OPCODE 24
 #define S_OPCODE 24
@@ -234,7 +234,7 @@ struct cpl_pass_accept_req {
 	u32 local_ip;
 	u32 local_ip;
 	u32 peer_ip;
 	u32 peer_ip;
 	u32 tos_tid;
 	u32 tos_tid;
-    struct tcp_options tcp_options;
+	struct tcp_options tcp_options;
 	u8  dst_mac[6];
 	u8  dst_mac[6];
 	u16 vlan_tag;
 	u16 vlan_tag;
 	u8  src_mac[6];
 	u8  src_mac[6];
@@ -250,12 +250,12 @@ struct cpl_pass_accept_rpl {
 	u32 peer_ip;
 	u32 peer_ip;
 	u32 opt0h;
 	u32 opt0h;
 	union {
 	union {
-	u32 opt0l;
-	struct {
-	    u8 rsvd[3];
-	    u8 status;
+		u32 opt0l;
+		struct {
+		    u8 rsvd[3];
+		    u8 status;
+		};
 	};
 	};
-    };
 };
 };
 
 
 struct cpl_act_open_req {
 struct cpl_act_open_req {

+ 59 - 90
drivers/net/chelsio/cxgb2.c

@@ -69,14 +69,14 @@ static inline void cancel_mac_stats_update(struct adapter *ap)
 	cancel_delayed_work(&ap->stats_update_task);
 	cancel_delayed_work(&ap->stats_update_task);
 }
 }
 
 
-#define MAX_CMDQ_ENTRIES 16384
-#define MAX_CMDQ1_ENTRIES 1024
-#define MAX_RX_BUFFERS 16384
-#define MAX_RX_JUMBO_BUFFERS 16384
+#define MAX_CMDQ_ENTRIES	16384
+#define MAX_CMDQ1_ENTRIES	1024
+#define MAX_RX_BUFFERS		16384
+#define MAX_RX_JUMBO_BUFFERS	16384
 #define MAX_TX_BUFFERS_HIGH	16384U
 #define MAX_TX_BUFFERS_HIGH	16384U
 #define MAX_TX_BUFFERS_LOW	1536U
 #define MAX_TX_BUFFERS_LOW	1536U
 #define MAX_TX_BUFFERS		1460U
 #define MAX_TX_BUFFERS		1460U
-#define MIN_FL_ENTRIES 32
+#define MIN_FL_ENTRIES		32
 
 
 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
@@ -143,7 +143,7 @@ static void link_report(struct port_info *p)
 			case SPEED_100:   s = "100Mbps"; break;
 			case SPEED_100:   s = "100Mbps"; break;
 		}
 		}
 
 
-	printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
+		printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
 		       p->dev->name, s,
 		       p->dev->name, s,
 		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 	}
 	}
@@ -233,7 +233,7 @@ static int cxgb_up(struct adapter *adapter)
 
 
 	t1_sge_start(adapter->sge);
 	t1_sge_start(adapter->sge);
 	t1_interrupts_enable(adapter);
 	t1_interrupts_enable(adapter);
- out_err:
+out_err:
 	return err;
 	return err;
 }
 }
 
 
@@ -454,51 +454,21 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 	const struct cmac_statistics *s;
 	const struct cmac_statistics *s;
 	const struct sge_intr_counts *t;
 	const struct sge_intr_counts *t;
 	struct sge_port_stats ss;
 	struct sge_port_stats ss;
+	unsigned int len;
 
 
 	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
 	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
 
 
-	*data++ = s->TxOctetsOK;
-	*data++ = s->TxOctetsBad;
-	*data++ = s->TxUnicastFramesOK;
-	*data++ = s->TxMulticastFramesOK;
-	*data++ = s->TxBroadcastFramesOK;
-	*data++ = s->TxPauseFrames;
-	*data++ = s->TxFramesWithDeferredXmissions;
-	*data++ = s->TxLateCollisions;
-	*data++ = s->TxTotalCollisions;
-	*data++ = s->TxFramesAbortedDueToXSCollisions;
-	*data++ = s->TxUnderrun;
-	*data++ = s->TxLengthErrors;
-	*data++ = s->TxInternalMACXmitError;
-	*data++ = s->TxFramesWithExcessiveDeferral;
-	*data++ = s->TxFCSErrors;
-
-	*data++ = s->RxOctetsOK;
-	*data++ = s->RxOctetsBad;
-	*data++ = s->RxUnicastFramesOK;
-	*data++ = s->RxMulticastFramesOK;
-	*data++ = s->RxBroadcastFramesOK;
-	*data++ = s->RxPauseFrames;
-	*data++ = s->RxFCSErrors;
-	*data++ = s->RxAlignErrors;
-	*data++ = s->RxSymbolErrors;
-	*data++ = s->RxDataErrors;
-	*data++ = s->RxSequenceErrors;
-	*data++ = s->RxRuntErrors;
-	*data++ = s->RxJabberErrors;
-	*data++ = s->RxInternalMACRcvError;
-	*data++ = s->RxInRangeLengthErrors;
-	*data++ = s->RxOutOfRangeLengthField;
-	*data++ = s->RxFrameTooLongErrors;
+	len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
+	memcpy(data, &s->TxOctetsOK, len);
+	data += len;
+
+	len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
+	memcpy(data, &s->RxOctetsOK, len);
+	data += len;
 
 
 	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
 	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
-	*data++ = ss.rx_packets;
-	*data++ = ss.rx_cso_good;
-	*data++ = ss.tx_packets;
-	*data++ = ss.tx_cso;
-	*data++ = ss.tx_tso;
-	*data++ = ss.vlan_xtract;
-	*data++ = ss.vlan_insert;
+	memcpy(data, &ss, sizeof(ss));
+	data += sizeof(ss);
 
 
 	t = t1_sge_get_intr_counts(adapter->sge);
 	t = t1_sge_get_intr_counts(adapter->sge);
 	*data++ = t->rx_drops;
 	*data++ = t->rx_drops;
@@ -749,7 +719,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (adapter->flags & FULL_INIT_DONE)
 	if (adapter->flags & FULL_INIT_DONE)
-	return -EBUSY;
+		return -EBUSY;
 
 
 	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
 	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
 	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
 	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
@@ -764,7 +734,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 	struct adapter *adapter = dev->priv;
 	struct adapter *adapter = dev->priv;
 
 
 	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
 	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
- 	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
+	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
 	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
 	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
 	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
 	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
 	return 0;
 	return 0;
@@ -782,9 +752,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 
 
 static int get_eeprom_len(struct net_device *dev)
 static int get_eeprom_len(struct net_device *dev)
 {
 {
- 	struct adapter *adapter = dev->priv;
+	struct adapter *adapter = dev->priv;
 
 
- 	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
+	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
 }
 }
 
 
 #define EEPROM_MAGIC(ap) \
 #define EEPROM_MAGIC(ap) \
@@ -848,7 +818,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 		u32 val;
 		u32 val;
 
 
 		if (!phy->mdio_read)
 		if (!phy->mdio_read)
-	    return -EOPNOTSUPP;
+			return -EOPNOTSUPP;
 		phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
 		phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
 			       &val);
 			       &val);
 		data->val_out = val;
 		data->val_out = val;
@@ -860,7 +830,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 		if (!capable(CAP_NET_ADMIN))
 		if (!capable(CAP_NET_ADMIN))
 		    return -EPERM;
 		    return -EPERM;
 		if (!phy->mdio_write)
 		if (!phy->mdio_write)
-	    return -EOPNOTSUPP;
+			return -EOPNOTSUPP;
 		phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
 		phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
 			        data->val_in);
 			        data->val_in);
 		break;
 		break;
@@ -879,9 +849,9 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
 	struct cmac *mac = adapter->port[dev->if_port].mac;
 	struct cmac *mac = adapter->port[dev->if_port].mac;
 
 
 	if (!mac->ops->set_mtu)
 	if (!mac->ops->set_mtu)
-	return -EOPNOTSUPP;
+		return -EOPNOTSUPP;
 	if (new_mtu < 68)
 	if (new_mtu < 68)
-	return -EINVAL;
+		return -EINVAL;
 	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
 	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
 		return ret;
 		return ret;
 	dev->mtu = new_mtu;
 	dev->mtu = new_mtu;
@@ -1211,9 +1181,9 @@ static int __devinit init_one(struct pci_dev *pdev,
 
 
 	return 0;
 	return 0;
 
 
- out_release_adapter_res:
+out_release_adapter_res:
 	t1_free_sw_modules(adapter);
 	t1_free_sw_modules(adapter);
- out_free_dev:
+out_free_dev:
 	if (adapter) {
 	if (adapter) {
 		if (adapter->regs)
 		if (adapter->regs)
 			iounmap(adapter->regs);
 			iounmap(adapter->regs);
@@ -1222,7 +1192,7 @@ static int __devinit init_one(struct pci_dev *pdev,
 				free_netdev(adapter->port[i].dev);
 				free_netdev(adapter->port[i].dev);
 	}
 	}
 	pci_release_regions(pdev);
 	pci_release_regions(pdev);
- out_disable_pdev:
+out_disable_pdev:
 	pci_disable_device(pdev);
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
 	pci_set_drvdata(pdev, NULL);
 	return err;
 	return err;
@@ -1273,28 +1243,27 @@ static int t1_clock(struct adapter *adapter, int mode)
 	int M_MEM_VAL;
 	int M_MEM_VAL;
 
 
 	enum {
 	enum {
-		M_CORE_BITS = 9,
-		T_CORE_VAL = 0,
-		T_CORE_BITS = 2,
-		N_CORE_VAL = 0,
-		N_CORE_BITS = 2,
-		M_MEM_BITS = 9,
-		T_MEM_VAL = 0,
-		T_MEM_BITS = 2,
-		N_MEM_VAL = 0,
-		N_MEM_BITS = 2,
-		NP_LOAD = 1 << 17,
-		S_LOAD_MEM = 1 << 5,
-		S_LOAD_CORE = 1 << 6,
-		S_CLOCK = 1 << 3
+		M_CORE_BITS	= 9,
+		T_CORE_VAL	= 0,
+		T_CORE_BITS	= 2,
+		N_CORE_VAL	= 0,
+		N_CORE_BITS	= 2,
+		M_MEM_BITS	= 9,
+		T_MEM_VAL	= 0,
+		T_MEM_BITS	= 2,
+		N_MEM_VAL	= 0,
+		N_MEM_BITS	= 2,
+		NP_LOAD		= 1 << 17,
+		S_LOAD_MEM	= 1 << 5,
+		S_LOAD_CORE	= 1 << 6,
+		S_CLOCK		= 1 << 3
 	};
 	};
 
 
 	if (!t1_is_T1B(adapter))
 	if (!t1_is_T1B(adapter))
 		return -ENODEV;	/* Can't re-clock this chip. */
 		return -ENODEV;	/* Can't re-clock this chip. */
 
 
-	if (mode & 2) {
+	if (mode & 2)
 		return 0;	/* show current mode. */
 		return 0;	/* show current mode. */
-	}
 
 
 	if ((adapter->t1powersave & 1) == (mode & 1))
 	if ((adapter->t1powersave & 1) == (mode & 1))
 		return -EALREADY;	/* ASIC already running in mode. */
 		return -EALREADY;	/* ASIC already running in mode. */
@@ -1386,26 +1355,26 @@ static inline void t1_sw_reset(struct pci_dev *pdev)
 static void __devexit remove_one(struct pci_dev *pdev)
 static void __devexit remove_one(struct pci_dev *pdev)
 {
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct net_device *dev = pci_get_drvdata(pdev);
+	struct adapter *adapter = dev->priv;
+	int i;
 
 
-	if (dev) {
-		int i;
-		struct adapter *adapter = dev->priv;
-
-		for_each_port(adapter, i)
-			if (test_bit(i, &adapter->registered_device_map))
-				unregister_netdev(adapter->port[i].dev);
+	for_each_port(adapter, i) {
+		if (test_bit(i, &adapter->registered_device_map))
+			unregister_netdev(adapter->port[i].dev);
+	}
 
 
-		t1_free_sw_modules(adapter);
-		iounmap(adapter->regs);
-		while (--i >= 0)
-			if (adapter->port[i].dev)
-				free_netdev(adapter->port[i].dev);
+	t1_free_sw_modules(adapter);
+	iounmap(adapter->regs);
 
 
-		pci_release_regions(pdev);
-		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
-		t1_sw_reset(pdev);
+	while (--i >= 0) {
+		if (adapter->port[i].dev)
+			free_netdev(adapter->port[i].dev);
 	}
 	}
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	t1_sw_reset(pdev);
 }
 }
 
 
 static struct pci_driver driver = {
 static struct pci_driver driver = {

+ 20 - 20
drivers/net/chelsio/elmer0.h

@@ -46,14 +46,14 @@ enum {
 };
 };
 
 
 /* ELMER0 registers */
 /* ELMER0 registers */
-#define A_ELMER0_VERSION 0x100000
-#define A_ELMER0_PHY_CFG 0x100004
-#define A_ELMER0_INT_ENABLE 0x100008
-#define A_ELMER0_INT_CAUSE 0x10000c
-#define A_ELMER0_GPI_CFG 0x100010
-#define A_ELMER0_GPI_STAT 0x100014
-#define A_ELMER0_GPO 0x100018
-#define A_ELMER0_PORT0_MI1_CFG 0x400000
+#define A_ELMER0_VERSION	0x100000
+#define A_ELMER0_PHY_CFG	0x100004
+#define A_ELMER0_INT_ENABLE	0x100008
+#define A_ELMER0_INT_CAUSE	0x10000c
+#define A_ELMER0_GPI_CFG	0x100010
+#define A_ELMER0_GPI_STAT	0x100014
+#define A_ELMER0_GPO		0x100018
+#define A_ELMER0_PORT0_MI1_CFG	0x400000
 
 
 #define S_MI1_MDI_ENABLE    0
 #define S_MI1_MDI_ENABLE    0
 #define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
 #define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
@@ -111,18 +111,18 @@ enum {
 #define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
 #define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
 #define F_MI1_OP_BUSY    V_MI1_OP_BUSY(1U)
 #define F_MI1_OP_BUSY    V_MI1_OP_BUSY(1U)
 
 
-#define A_ELMER0_PORT1_MI1_CFG 0x500000
-#define A_ELMER0_PORT1_MI1_ADDR 0x500004
-#define A_ELMER0_PORT1_MI1_DATA 0x500008
-#define A_ELMER0_PORT1_MI1_OP 0x50000c
-#define A_ELMER0_PORT2_MI1_CFG 0x600000
-#define A_ELMER0_PORT2_MI1_ADDR 0x600004
-#define A_ELMER0_PORT2_MI1_DATA 0x600008
-#define A_ELMER0_PORT2_MI1_OP 0x60000c
-#define A_ELMER0_PORT3_MI1_CFG 0x700000
-#define A_ELMER0_PORT3_MI1_ADDR 0x700004
-#define A_ELMER0_PORT3_MI1_DATA 0x700008
-#define A_ELMER0_PORT3_MI1_OP 0x70000c
+#define A_ELMER0_PORT1_MI1_CFG	0x500000
+#define A_ELMER0_PORT1_MI1_ADDR	0x500004
+#define A_ELMER0_PORT1_MI1_DATA	0x500008
+#define A_ELMER0_PORT1_MI1_OP	0x50000c
+#define A_ELMER0_PORT2_MI1_CFG	0x600000
+#define A_ELMER0_PORT2_MI1_ADDR	0x600004
+#define A_ELMER0_PORT2_MI1_DATA	0x600008
+#define A_ELMER0_PORT2_MI1_OP	0x60000c
+#define A_ELMER0_PORT3_MI1_CFG	0x700000
+#define A_ELMER0_PORT3_MI1_ADDR	0x700004
+#define A_ELMER0_PORT3_MI1_DATA	0x700008
+#define A_ELMER0_PORT3_MI1_OP	0x70000c
 
 
 /* Simple bit definition for GPI and GP0 registers. */
 /* Simple bit definition for GPI and GP0 registers. */
 #define     ELMER0_GP_BIT0              0x0001
 #define     ELMER0_GP_BIT0              0x0001

+ 22 - 22
drivers/net/chelsio/espi.c

@@ -202,9 +202,9 @@ static void espi_setup_for_pm3393(adapter_t *adapter)
 
 
 static void espi_setup_for_vsc7321(adapter_t *adapter)
 static void espi_setup_for_vsc7321(adapter_t *adapter)
 {
 {
-        writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
-        writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
-        writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+	writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
 	writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
 	writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
 	writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
 	writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
 	writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
 	writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
@@ -247,10 +247,10 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
 		writel(V_OUT_OF_SYNC_COUNT(4) |
 		writel(V_OUT_OF_SYNC_COUNT(4) |
 		       V_DIP2_PARITY_ERR_THRES(3) |
 		       V_DIP2_PARITY_ERR_THRES(3) |
 		       V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
 		       V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
-        	writel(nports == 4 ? 0x200040 : 0x1000080,
+		writel(nports == 4 ? 0x200040 : 0x1000080,
 		       adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
 		       adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
 	} else
 	} else
-        	writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+		writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
 
 
 	if (mac_type == CHBT_MAC_PM3393)
 	if (mac_type == CHBT_MAC_PM3393)
 		espi_setup_for_pm3393(adapter);
 		espi_setup_for_pm3393(adapter);
@@ -301,7 +301,8 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
 {
 {
 	struct peespi *espi = adapter->espi;
 	struct peespi *espi = adapter->espi;
 
 
-	if (!is_T2(adapter)) return;
+	if (!is_T2(adapter))
+		return;
 	spin_lock(&espi->lock);
 	spin_lock(&espi->lock);
 	espi->misc_ctrl = (val & ~MON_MASK) |
 	espi->misc_ctrl = (val & ~MON_MASK) |
 			  (espi->misc_ctrl & MON_MASK);
 			  (espi->misc_ctrl & MON_MASK);
@@ -340,32 +341,31 @@ u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
  * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
  * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
  * one shot, since there is no per port counter on the out side.
  * one shot, since there is no per port counter on the out side.
  */
  */
-int
-t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
+int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
 {
 {
-        struct peespi *espi = adapter->espi;
+	struct peespi *espi = adapter->espi;
 	u8 i, nport = (u8)adapter->params.nports;
 	u8 i, nport = (u8)adapter->params.nports;
 
 
-        if (!wait) {
-                if (!spin_trylock(&espi->lock))
-                        return -1;
-        } else
-                spin_lock(&espi->lock);
+	if (!wait) {
+		if (!spin_trylock(&espi->lock))
+			return -1;
+	} else
+		spin_lock(&espi->lock);
 
 
-	if ( (espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION ) {
+	if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) {
 		espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
 		espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
 					F_MONITORED_DIRECTION;
 					F_MONITORED_DIRECTION;
-                writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
- 	}
+		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	}
 	for (i = 0 ; i < nport; i++, valp++) {
 	for (i = 0 ; i < nport; i++, valp++) {
 		if (i) {
 		if (i) {
 			writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
 			writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
 			       adapter->regs + A_ESPI_MISC_CONTROL);
 			       adapter->regs + A_ESPI_MISC_CONTROL);
 		}
 		}
-                *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
-        }
+		*valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+	}
 
 
-        writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
-        spin_unlock(&espi->lock);
-        return 0;
+	writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	spin_unlock(&espi->lock);
+	return 0;
 }
 }

+ 3 - 3
drivers/net/chelsio/fpga_defs.h

@@ -98,9 +98,9 @@
 #define A_MI0_DATA_INT 0xb10
 #define A_MI0_DATA_INT 0xb10
 
 
 /* GMAC registers */
 /* GMAC registers */
-#define A_GMAC_MACID_LO 0x28
-#define A_GMAC_MACID_HI 0x2c
-#define A_GMAC_CSR 0x30
+#define A_GMAC_MACID_LO	0x28
+#define A_GMAC_MACID_HI	0x2c
+#define A_GMAC_CSR	0x30
 
 
 #define S_INTERFACE    0
 #define S_INTERFACE    0
 #define M_INTERFACE    0x3
 #define M_INTERFACE    0x3

+ 9 - 2
drivers/net/chelsio/gmac.h

@@ -42,8 +42,15 @@
 
 
 #include "common.h"
 #include "common.h"
 
 
-enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
-enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
+enum {
+	MAC_STATS_UPDATE_FAST,
+	MAC_STATS_UPDATE_FULL
+};
+
+enum {
+	MAC_DIRECTION_RX = 1,
+	MAC_DIRECTION_TX = 2
+};
 
 
 struct cmac_statistics {
 struct cmac_statistics {
 	/* Transmit */
 	/* Transmit */

+ 60 - 40
drivers/net/chelsio/ixf1010.c

@@ -145,48 +145,61 @@ static void disable_port(struct cmac *mac)
 	t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val);
 	t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val);
 }
 }
 
 
-#define RMON_UPDATE(mac, name, stat_name) \
-	t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
-	(mac)->stats.stat_name += val;
-
 /*
 /*
  * Read the current values of the RMON counters and add them to the cumulative
  * Read the current values of the RMON counters and add them to the cumulative
  * port statistics.  The HW RMON counters are cleared by this operation.
  * port statistics.  The HW RMON counters are cleared by this operation.
  */
  */
 static void port_stats_update(struct cmac *mac)
 static void port_stats_update(struct cmac *mac)
 {
 {
-	u32 val;
+	static struct {
+		unsigned int reg;
+		unsigned int offset;
+	} hw_stats[] = {
+
+#define HW_STAT(name, stat_name) \
+	{ REG_##name, \
+	  (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+
+		/* Rx stats */
+		HW_STAT(RxOctetsTotalOK, RxOctetsOK),
+		HW_STAT(RxOctetsBad, RxOctetsBad),
+		HW_STAT(RxUCPkts, RxUnicastFramesOK),
+		HW_STAT(RxMCPkts, RxMulticastFramesOK),
+		HW_STAT(RxBCPkts, RxBroadcastFramesOK),
+		HW_STAT(RxJumboPkts, RxJumboFramesOK),
+		HW_STAT(RxFCSErrors, RxFCSErrors),
+		HW_STAT(RxAlignErrors, RxAlignErrors),
+		HW_STAT(RxLongErrors, RxFrameTooLongErrors),
+		HW_STAT(RxVeryLongErrors, RxFrameTooLongErrors),
+		HW_STAT(RxPauseMacControlCounter, RxPauseFrames),
+		HW_STAT(RxDataErrors, RxDataErrors),
+		HW_STAT(RxJabberErrors, RxJabberErrors),
+		HW_STAT(RxRuntErrors, RxRuntErrors),
+		HW_STAT(RxShortErrors, RxRuntErrors),
+		HW_STAT(RxSequenceErrors, RxSequenceErrors),
+		HW_STAT(RxSymbolErrors, RxSymbolErrors),
+
+		/* Tx stats (skip collision stats as we are full-duplex only) */
+		HW_STAT(TxOctetsTotalOK, TxOctetsOK),
+		HW_STAT(TxOctetsBad, TxOctetsBad),
+		HW_STAT(TxUCPkts, TxUnicastFramesOK),
+		HW_STAT(TxMCPkts, TxMulticastFramesOK),
+		HW_STAT(TxBCPkts, TxBroadcastFramesOK),
+		HW_STAT(TxJumboPkts, TxJumboFramesOK),
+		HW_STAT(TxPauseFrames, TxPauseFrames),
+		HW_STAT(TxExcessiveLengthDrop, TxLengthErrors),
+		HW_STAT(TxUnderrun, TxUnderrun),
+		HW_STAT(TxCRCErrors, TxFCSErrors)
+	}, *p = hw_stats;
+	u64 *stats = (u64 *) &mac->stats;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
+		u32 val;
 
 
-	/* Rx stats */
-	RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK);
-	RMON_UPDATE(mac, RxOctetsBad, RxOctetsBad);
-	RMON_UPDATE(mac, RxUCPkts, RxUnicastFramesOK);
-	RMON_UPDATE(mac, RxMCPkts, RxMulticastFramesOK);
-	RMON_UPDATE(mac, RxBCPkts, RxBroadcastFramesOK);
-	RMON_UPDATE(mac, RxJumboPkts, RxJumboFramesOK);
-	RMON_UPDATE(mac, RxFCSErrors, RxFCSErrors);
-	RMON_UPDATE(mac, RxAlignErrors, RxAlignErrors);
-	RMON_UPDATE(mac, RxLongErrors, RxFrameTooLongErrors);
-	RMON_UPDATE(mac, RxVeryLongErrors, RxFrameTooLongErrors);
-	RMON_UPDATE(mac, RxPauseMacControlCounter, RxPauseFrames);
-	RMON_UPDATE(mac, RxDataErrors, RxDataErrors);
-	RMON_UPDATE(mac, RxJabberErrors, RxJabberErrors);
-	RMON_UPDATE(mac, RxRuntErrors, RxRuntErrors);
-	RMON_UPDATE(mac, RxShortErrors, RxRuntErrors);
-	RMON_UPDATE(mac, RxSequenceErrors, RxSequenceErrors);
-	RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
-
-	/* Tx stats (skip collision stats as we are full-duplex only) */
-	RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK);
-	RMON_UPDATE(mac, TxOctetsBad, TxOctetsBad);
-	RMON_UPDATE(mac, TxUCPkts, TxUnicastFramesOK);
-	RMON_UPDATE(mac, TxMCPkts, TxMulticastFramesOK);
-	RMON_UPDATE(mac, TxBCPkts, TxBroadcastFramesOK);
-	RMON_UPDATE(mac, TxJumboPkts, TxJumboFramesOK);
-	RMON_UPDATE(mac, TxPauseFrames, TxPauseFrames);
-	RMON_UPDATE(mac, TxExcessiveLengthDrop, TxLengthErrors);
-	RMON_UPDATE(mac, TxUnderrun, TxUnderrun);
-	RMON_UPDATE(mac, TxCRCErrors, TxFCSErrors);
+		t1_tpi_read(mac->adapter, MACREG(mac, p->reg), &val);
+		stats[p->offset] += val;
+	}
 }
 }
 
 
 /* No-op interrupt operation as this MAC does not support interrupts */
 /* No-op interrupt operation as this MAC does not support interrupts */
@@ -273,7 +286,8 @@ static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
 static int mac_set_mtu(struct cmac *mac, int mtu)
 static int mac_set_mtu(struct cmac *mac, int mtu)
 {
 {
 	/* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */
 	/* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */
-	if (mtu > (MAX_FRAME_SIZE - 14 - 4)) return -EINVAL;
+	if (mtu > (MAX_FRAME_SIZE - 14 - 4))
+		return -EINVAL;
 	t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE),
 	t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE),
 		     mtu + 14 + 4);
 		     mtu + 14 + 4);
 	return 0;
 	return 0;
@@ -357,8 +371,8 @@ static void enable_port(struct cmac *mac)
 	val |= (1 << index);
 	val |= (1 << index);
 	t1_tpi_write(adapter, REG_PORT_ENABLE, val);
 	t1_tpi_write(adapter, REG_PORT_ENABLE, val);
 
 
-       	index <<= 2;
-        if (is_T2(adapter)) {
+	index <<= 2;
+	if (is_T2(adapter)) {
 		/* T204: set the Fifo water level & threshold */
 		/* T204: set the Fifo water level & threshold */
 		t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740);
 		t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740);
 		t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730);
 		t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730);
@@ -389,6 +403,10 @@ static int mac_disable(struct cmac *mac, int which)
 	return 0;
 	return 0;
 }
 }
 
 
+#define RMON_UPDATE(mac, name, stat_name) \
+	t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
+	(mac)->stats.stat_name += val;
+
 /*
 /*
  * This function is called periodically to accumulate the current values of the
  * This function is called periodically to accumulate the current values of the
  * RMON counters into the port statistics.  Since the counters are only 32 bits
  * RMON counters into the port statistics.  Since the counters are only 32 bits
@@ -460,10 +478,12 @@ static struct cmac *ixf1010_mac_create(adapter_t *adapter, int index)
 	struct cmac *mac;
 	struct cmac *mac;
 	u32 val;
 	u32 val;
 
 
-	if (index > 9) return NULL;
+	if (index > 9)
+		return NULL;
 
 
 	mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
 	mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
-	if (!mac) return NULL;
+	if (!mac)
+		return NULL;
 
 
 	mac->ops = &ixf1010_ops;
 	mac->ops = &ixf1010_ops;
 	mac->instance = (cmac_instance *)(mac + 1);
 	mac->instance = (cmac_instance *)(mac + 1);

+ 13 - 14
drivers/net/chelsio/mv88e1xxx.c

@@ -73,9 +73,8 @@ static int mv88e1xxx_interrupt_enable(struct cphy *cphy)
 
 
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
 		elmer |= ELMER0_GP_BIT1;
 		elmer |= ELMER0_GP_BIT1;
-		if (is_T2(cphy->adapter)) {
-		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
-                }
+		if (is_T2(cphy->adapter))
+		    elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4;
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
 	}
 	}
 	return 0;
 	return 0;
@@ -92,9 +91,8 @@ static int mv88e1xxx_interrupt_disable(struct cphy *cphy)
 
 
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
 		elmer &= ~ELMER0_GP_BIT1;
 		elmer &= ~ELMER0_GP_BIT1;
-		if (is_T2(cphy->adapter)) {
+		if (is_T2(cphy->adapter))
 		    elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
 		    elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
-                }
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
 	}
 	}
 	return 0;
 	return 0;
@@ -112,9 +110,8 @@ static int mv88e1xxx_interrupt_clear(struct cphy *cphy)
 	if (t1_is_asic(cphy->adapter)) {
 	if (t1_is_asic(cphy->adapter)) {
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
 		elmer |= ELMER0_GP_BIT1;
 		elmer |= ELMER0_GP_BIT1;
-		if (is_T2(cphy->adapter)) {
+		if (is_T2(cphy->adapter))
 		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
 		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
-                }
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
 	}
 	}
 	return 0;
 	return 0;
@@ -300,7 +297,7 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
 
 
 	/*
 	/*
 	 * Loop until cause reads zero. Need to handle bouncing interrupts.
 	 * Loop until cause reads zero. Need to handle bouncing interrupts.
-         */
+	 */
 	while (1) {
 	while (1) {
 		u32 cause;
 		u32 cause;
 
 
@@ -308,15 +305,16 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
 				MV88E1XXX_INTERRUPT_STATUS_REGISTER,
 				MV88E1XXX_INTERRUPT_STATUS_REGISTER,
 				&cause);
 				&cause);
 		cause &= INTR_ENABLE_MASK;
 		cause &= INTR_ENABLE_MASK;
-		if (!cause) break;
+		if (!cause)
+			break;
 
 
 		if (cause & MV88E1XXX_INTR_LINK_CHNG) {
 		if (cause & MV88E1XXX_INTR_LINK_CHNG) {
 			(void) simple_mdio_read(cphy,
 			(void) simple_mdio_read(cphy,
 				MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
 				MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
 
 
-			if (status & MV88E1XXX_INTR_LINK_CHNG) {
+			if (status & MV88E1XXX_INTR_LINK_CHNG)
 				cphy->state |= PHY_LINK_UP;
 				cphy->state |= PHY_LINK_UP;
-			} else {
+			else {
 				cphy->state &= ~PHY_LINK_UP;
 				cphy->state &= ~PHY_LINK_UP;
 				if (cphy->state & PHY_AUTONEG_EN)
 				if (cphy->state & PHY_AUTONEG_EN)
 					cphy->state &= ~PHY_AUTONEG_RDY;
 					cphy->state &= ~PHY_AUTONEG_RDY;
@@ -360,7 +358,8 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
 {
 {
 	struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
 	struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
 
 
-	if (!cphy) return NULL;
+	if (!cphy)
+		return NULL;
 
 
 	cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
 	cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
 
 
@@ -377,11 +376,11 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
 	}
 	}
 	(void) mv88e1xxx_downshift_set(cphy, 1);   /* Enable downshift */
 	(void) mv88e1xxx_downshift_set(cphy, 1);   /* Enable downshift */
 
 
-        /* LED */
+	/* LED */
 	if (is_T2(adapter)) {
 	if (is_T2(adapter)) {
 		(void) simple_mdio_write(cphy,
 		(void) simple_mdio_write(cphy,
 				MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
 				MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
-        }
+	}
 
 
 	return cphy;
 	return cphy;
 }
 }

+ 8 - 8
drivers/net/chelsio/my3126.c

@@ -10,25 +10,25 @@ static int my3126_reset(struct cphy *cphy, int wait)
 	 * This can be done through registers.  It is not required since
 	 * This can be done through registers.  It is not required since
 	 * a full chip reset is used.
 	 * a full chip reset is used.
 	 */
 	 */
-	return (0);
+	return 0;
 }
 }
 
 
 static int my3126_interrupt_enable(struct cphy *cphy)
 static int my3126_interrupt_enable(struct cphy *cphy)
 {
 {
 	schedule_delayed_work(&cphy->phy_update, HZ/30);
 	schedule_delayed_work(&cphy->phy_update, HZ/30);
 	t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
 	t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
-	return (0);
+	return 0;
 }
 }
 
 
 static int my3126_interrupt_disable(struct cphy *cphy)
 static int my3126_interrupt_disable(struct cphy *cphy)
 {
 {
 	cancel_rearming_delayed_work(&cphy->phy_update);
 	cancel_rearming_delayed_work(&cphy->phy_update);
-	return (0);
+	return 0;
 }
 }
 
 
 static int my3126_interrupt_clear(struct cphy *cphy)
 static int my3126_interrupt_clear(struct cphy *cphy)
 {
 {
-	return (0);
+	return 0;
 }
 }
 
 
 #define OFFSET(REG_ADDR)    (REG_ADDR << 2)
 #define OFFSET(REG_ADDR)    (REG_ADDR << 2)
@@ -102,7 +102,7 @@ static void my3216_poll(struct work_struct *work)
 
 
 static int my3126_set_loopback(struct cphy *cphy, int on)
 static int my3126_set_loopback(struct cphy *cphy, int on)
 {
 {
-	return (0);
+	return 0;
 }
 }
 
 
 /* To check the activity LED */
 /* To check the activity LED */
@@ -146,7 +146,7 @@ static int my3126_get_link_status(struct cphy *cphy,
 	if (fc)
 	if (fc)
 		*fc = PAUSE_RX | PAUSE_TX;
 		*fc = PAUSE_RX | PAUSE_TX;
 
 
-	return (0);
+	return 0;
 }
 }
 
 
 static void my3126_destroy(struct cphy *cphy)
 static void my3126_destroy(struct cphy *cphy)
@@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
 	INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
 	INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
 	cphy->bmsr = 0;
 	cphy->bmsr = 0;
 
 
-	return (cphy);
+	return cphy;
 }
 }
 
 
 /* Chip Reset */
 /* Chip Reset */
@@ -198,7 +198,7 @@ static int my3126_phy_reset(adapter_t * adapter)
 	val |= 0x8000;
 	val |= 0x8000;
 	t1_tpi_write(adapter, A_ELMER0_GPO, val);
 	t1_tpi_write(adapter, A_ELMER0_GPO, val);
 	udelay(100);
 	udelay(100);
-	return (0);
+	return 0;
 }
 }
 
 
 struct gphy t1_my3126_ops = {
 struct gphy t1_my3126_ops = {

+ 52 - 39
drivers/net/chelsio/pm3393.c

@@ -446,17 +446,51 @@ static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val,
 		*val += 1ull << 40;
 		*val += 1ull << 40;
 }
 }
 
 
-#define RMON_UPDATE(mac, name, stat_name) \
-	pm3393_rmon_update((mac)->adapter, OFFSET(name), 		\
-			   &(mac)->stats.stat_name,			\
-		   (ro &((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)))
-
-
 static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
 static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
 							      int flag)
 							      int flag)
 {
 {
-	u64	ro;
-	u32	val0, val1, val2, val3;
+	static struct {
+		unsigned int reg;
+		unsigned int offset;
+	} hw_stats [] = {
+
+#define HW_STAT(name, stat_name) \
+	{ name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+
+		/* Rx stats */
+		HW_STAT(RxOctetsReceivedOK, RxOctetsOK),
+		HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK),
+		HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK),
+		HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK),
+		HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames),
+		HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors),
+		HW_STAT(RxFramesLostDueToInternalMACErrors,
+				RxInternalMACRcvError),
+		HW_STAT(RxSymbolErrors, RxSymbolErrors),
+		HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors),
+		HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors),
+		HW_STAT(RxJabbers, RxJabberErrors),
+		HW_STAT(RxFragments, RxRuntErrors),
+		HW_STAT(RxUndersizedFrames, RxRuntErrors),
+		HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK),
+		HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK),
+
+		/* Tx stats */
+		HW_STAT(TxOctetsTransmittedOK, TxOctetsOK),
+		HW_STAT(TxFramesLostDueToInternalMACTransmissionError,
+				TxInternalMACXmitError),
+		HW_STAT(TxTransmitSystemError, TxFCSErrors),
+		HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK),
+		HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK),
+		HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK),
+		HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames),
+		HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK),
+		HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK)
+	}, *p = hw_stats;
+	u64 ro;
+	u32 val0, val1, val2, val3;
+	u64 *stats = (u64 *) &mac->stats;
+	unsigned int i;
 
 
 	/* Snap the counters */
 	/* Snap the counters */
 	pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
 	pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
@@ -470,35 +504,14 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
 	ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
 	ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
 		(((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
 		(((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
 
 
-	/* Rx stats */
-	RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
-	RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
-	RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
-	RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
-	RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
-	RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
-	RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
-				RxInternalMACRcvError);
-	RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
-	RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
-	RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
-	RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
-	RMON_UPDATE(mac, RxFragments, RxRuntErrors);
-	RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
-	RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
-	RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
-
-	/* Tx stats */
-	RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
-	RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
-				TxInternalMACXmitError);
-	RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
-	RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
-	RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
-	RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
-	RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
-	RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
-	RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
+	for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
+		unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW;
+
+		pm3393_rmon_update((mac)->adapter, OFFSET(p->reg),
+				   stats + p->offset, ro & (reg >> 2));
+	}
+
+
 
 
 	return &mac->stats;
 	return &mac->stats;
 }
 }
@@ -534,9 +547,9 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
 	/* Store local copy */
 	/* Store local copy */
 	memcpy(cmac->instance->mac_addr, ma, 6);
 	memcpy(cmac->instance->mac_addr, ma, 6);
 
 
-	lo = ((u32) ma[1] << 8) | (u32) ma[0];
+	lo  = ((u32) ma[1] << 8) | (u32) ma[0];
 	mid = ((u32) ma[3] << 8) | (u32) ma[2];
 	mid = ((u32) ma[3] << 8) | (u32) ma[2];
-	hi = ((u32) ma[5] << 8) | (u32) ma[4];
+	hi  = ((u32) ma[5] << 8) | (u32) ma[4];
 
 
 	/* Disable Rx/Tx MAC before configuring it. */
 	/* Disable Rx/Tx MAC before configuring it. */
 	if (enabled)
 	if (enabled)

+ 155 - 173
drivers/net/chelsio/sge.c

@@ -71,12 +71,9 @@
 #define SGE_FREEL_REFILL_THRESH	16
 #define SGE_FREEL_REFILL_THRESH	16
 #define SGE_RESPQ_E_N		1024
 #define SGE_RESPQ_E_N		1024
 #define SGE_INTRTIMER_NRES	1000
 #define SGE_INTRTIMER_NRES	1000
-#define SGE_RX_COPY_THRES	256
 #define SGE_RX_SM_BUF_SIZE	1536
 #define SGE_RX_SM_BUF_SIZE	1536
 #define SGE_TX_DESC_MAX_PLEN	16384
 #define SGE_TX_DESC_MAX_PLEN	16384
 
 
-# define SGE_RX_DROP_THRES 2
-
 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
 
 
 /*
 /*
@@ -85,10 +82,6 @@
  */
  */
 #define TX_RECLAIM_PERIOD (HZ / 4)
 #define TX_RECLAIM_PERIOD (HZ / 4)
 
 
-#ifndef NET_IP_ALIGN
-# define NET_IP_ALIGN 2
-#endif
-
 #define M_CMD_LEN       0x7fffffff
 #define M_CMD_LEN       0x7fffffff
 #define V_CMD_LEN(v)    (v)
 #define V_CMD_LEN(v)    (v)
 #define G_CMD_LEN(v)    ((v) & M_CMD_LEN)
 #define G_CMD_LEN(v)    ((v) & M_CMD_LEN)
@@ -195,7 +188,7 @@ struct cmdQ {
 	struct cmdQ_e  *entries;        /* HW command descriptor Q */
 	struct cmdQ_e  *entries;        /* HW command descriptor Q */
 	struct cmdQ_ce *centries;       /* SW command context descriptor Q */
 	struct cmdQ_ce *centries;       /* SW command context descriptor Q */
 	dma_addr_t	dma_addr;       /* DMA addr HW command descriptor Q */
 	dma_addr_t	dma_addr;       /* DMA addr HW command descriptor Q */
- 	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */
+	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */
 };
 };
 
 
 struct freelQ {
 struct freelQ {
@@ -241,9 +234,9 @@ struct sched_port {
 /* Per T204 device */
 /* Per T204 device */
 struct sched {
 struct sched {
 	ktime_t         last_updated;   /* last time quotas were computed */
 	ktime_t         last_updated;   /* last time quotas were computed */
-	unsigned int 	max_avail;	/* max bits to be sent to any port */
-	unsigned int 	port;		/* port index (round robin ports) */
-	unsigned int 	num;		/* num skbs in per port queues */
+	unsigned int	max_avail;	/* max bits to be sent to any port */
+	unsigned int	port;		/* port index (round robin ports) */
+	unsigned int	num;		/* num skbs in per port queues */
 	struct sched_port p[MAX_NPORTS];
 	struct sched_port p[MAX_NPORTS];
 	struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
 	struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
 };
 };
@@ -259,10 +252,10 @@ static void restart_sched(unsigned long);
  * contention.
  * contention.
  */
  */
 struct sge {
 struct sge {
-	struct adapter *adapter; 	/* adapter backpointer */
+	struct adapter *adapter;	/* adapter backpointer */
 	struct net_device *netdev;      /* netdevice backpointer */
 	struct net_device *netdev;      /* netdevice backpointer */
-	struct freelQ 	freelQ[SGE_FREELQ_N]; /* buffer free lists */
-	struct respQ 	respQ;		/* response Q */
+	struct freelQ	freelQ[SGE_FREELQ_N]; /* buffer free lists */
+	struct respQ	respQ;		/* response Q */
 	unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */
 	unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */
 	unsigned int	rx_pkt_pad;     /* RX padding for L2 packets */
 	unsigned int	rx_pkt_pad;     /* RX padding for L2 packets */
 	unsigned int	jumbo_fl;       /* jumbo freelist Q index */
 	unsigned int	jumbo_fl;       /* jumbo freelist Q index */
@@ -460,7 +453,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
 	if (credits < MAX_SKB_FRAGS + 1)
 	if (credits < MAX_SKB_FRAGS + 1)
 		goto out;
 		goto out;
 
 
- again:
+again:
 	for (i = 0; i < MAX_NPORTS; i++) {
 	for (i = 0; i < MAX_NPORTS; i++) {
 		s->port = ++s->port & (MAX_NPORTS - 1);
 		s->port = ++s->port & (MAX_NPORTS - 1);
 		skbq = &s->p[s->port].skbq;
 		skbq = &s->p[s->port].skbq;
@@ -483,8 +476,8 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
 	if (update-- && sched_update_avail(sge))
 	if (update-- && sched_update_avail(sge))
 		goto again;
 		goto again;
 
 
- out:
- 	/* If there are more pending skbs, we use the hardware to schedule us
+out:
+	/* If there are more pending skbs, we use the hardware to schedule us
 	 * again.
 	 * again.
 	 */
 	 */
 	if (s->num && !skb) {
 	if (s->num && !skb) {
@@ -575,11 +568,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
 		q->size = p->freelQ_size[i];
 		q->size = p->freelQ_size[i];
 		q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
 		q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
 		size = sizeof(struct freelQ_e) * q->size;
 		size = sizeof(struct freelQ_e) * q->size;
-		q->entries = (struct freelQ_e *)
-			      pci_alloc_consistent(pdev, size, &q->dma_addr);
+		q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
 		if (!q->entries)
 		if (!q->entries)
 			goto err_no_mem;
 			goto err_no_mem;
-		memset(q->entries, 0, size);
+
 		size = sizeof(struct freelQ_ce) * q->size;
 		size = sizeof(struct freelQ_ce) * q->size;
 		q->centries = kzalloc(size, GFP_KERNEL);
 		q->centries = kzalloc(size, GFP_KERNEL);
 		if (!q->centries)
 		if (!q->centries)
@@ -613,11 +605,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
 	sge->respQ.size = SGE_RESPQ_E_N;
 	sge->respQ.size = SGE_RESPQ_E_N;
 	sge->respQ.credits = 0;
 	sge->respQ.credits = 0;
 	size = sizeof(struct respQ_e) * sge->respQ.size;
 	size = sizeof(struct respQ_e) * sge->respQ.size;
-	sge->respQ.entries = (struct respQ_e *)
+	sge->respQ.entries =
 		pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
 		pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
 	if (!sge->respQ.entries)
 	if (!sge->respQ.entries)
 		goto err_no_mem;
 		goto err_no_mem;
-	memset(sge->respQ.entries, 0, size);
 	return 0;
 	return 0;
 
 
 err_no_mem:
 err_no_mem:
@@ -637,20 +628,12 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
 	q->in_use -= n;
 	q->in_use -= n;
 	ce = &q->centries[cidx];
 	ce = &q->centries[cidx];
 	while (n--) {
 	while (n--) {
-		if (q->sop) {
-			if (likely(pci_unmap_len(ce, dma_len))) {
-				pci_unmap_single(pdev,
-						 pci_unmap_addr(ce, dma_addr),
-			 			 pci_unmap_len(ce, dma_len),
-						 PCI_DMA_TODEVICE);
+		if (likely(pci_unmap_len(ce, dma_len))) {
+			pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+					 pci_unmap_len(ce, dma_len),
+					 PCI_DMA_TODEVICE);
+			if (q->sop)
 				q->sop = 0;
 				q->sop = 0;
-			}
-		} else {
-			if (likely(pci_unmap_len(ce, dma_len))) {
-				pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
-			 		       pci_unmap_len(ce, dma_len),
-					       PCI_DMA_TODEVICE);
-			}
 		}
 		}
 		if (ce->skb) {
 		if (ce->skb) {
 			dev_kfree_skb_any(ce->skb);
 			dev_kfree_skb_any(ce->skb);
@@ -711,11 +694,10 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
 		q->stop_thres = 0;
 		q->stop_thres = 0;
 		spin_lock_init(&q->lock);
 		spin_lock_init(&q->lock);
 		size = sizeof(struct cmdQ_e) * q->size;
 		size = sizeof(struct cmdQ_e) * q->size;
-		q->entries = (struct cmdQ_e *)
-			      pci_alloc_consistent(pdev, size, &q->dma_addr);
+		q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
 		if (!q->entries)
 		if (!q->entries)
 			goto err_no_mem;
 			goto err_no_mem;
-		memset(q->entries, 0, size);
+
 		size = sizeof(struct cmdQ_ce) * q->size;
 		size = sizeof(struct cmdQ_ce) * q->size;
 		q->centries = kzalloc(size, GFP_KERNEL);
 		q->centries = kzalloc(size, GFP_KERNEL);
 		if (!q->centries)
 		if (!q->centries)
@@ -770,7 +752,7 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off)
 static void configure_sge(struct sge *sge, struct sge_params *p)
 static void configure_sge(struct sge *sge, struct sge_params *p)
 {
 {
 	struct adapter *ap = sge->adapter;
 	struct adapter *ap = sge->adapter;
-	
+
 	writel(0, ap->regs + A_SG_CONTROL);
 	writel(0, ap->regs + A_SG_CONTROL);
 	setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
 	setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
 			  A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
 			  A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
@@ -850,7 +832,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
 	struct freelQ_e *e = &q->entries[q->pidx];
 	struct freelQ_e *e = &q->entries[q->pidx];
 	unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
 	unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
 
 
-
 	while (q->credits < q->size) {
 	while (q->credits < q->size) {
 		struct sk_buff *skb;
 		struct sk_buff *skb;
 		dma_addr_t mapping;
 		dma_addr_t mapping;
@@ -862,6 +843,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
 		skb_reserve(skb, q->dma_offset);
 		skb_reserve(skb, q->dma_offset);
 		mapping = pci_map_single(pdev, skb->data, dma_len,
 		mapping = pci_map_single(pdev, skb->data, dma_len,
 					 PCI_DMA_FROMDEVICE);
 					 PCI_DMA_FROMDEVICE);
+		skb_reserve(skb, sge->rx_pkt_pad);
+
 		ce->skb = skb;
 		ce->skb = skb;
 		pci_unmap_addr_set(ce, dma_addr, mapping);
 		pci_unmap_addr_set(ce, dma_addr, mapping);
 		pci_unmap_len_set(ce, dma_len, dma_len);
 		pci_unmap_len_set(ce, dma_len, dma_len);
@@ -881,7 +864,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
 		}
 		}
 		q->credits++;
 		q->credits++;
 	}
 	}
-
 }
 }
 
 
 /*
 /*
@@ -1041,6 +1023,10 @@ static void recycle_fl_buf(struct freelQ *fl, int idx)
 	}
 	}
 }
 }
 
 
+static int copybreak __read_mostly = 256;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
 /**
 /**
  *	get_packet - return the next ingress packet buffer
  *	get_packet - return the next ingress packet buffer
  *	@pdev: the PCI device that received the packet
  *	@pdev: the PCI device that received the packet
@@ -1060,45 +1046,42 @@ static void recycle_fl_buf(struct freelQ *fl, int idx)
  *	be copied but there is no memory for the copy.
  *	be copied but there is no memory for the copy.
  */
  */
 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
-					 struct freelQ *fl, unsigned int len,
-					 int dma_pad, int skb_pad,
-					 unsigned int copy_thres,
-					 unsigned int drop_thres)
+					 struct freelQ *fl, unsigned int len)
 {
 {
 	struct sk_buff *skb;
 	struct sk_buff *skb;
-	struct freelQ_ce *ce = &fl->centries[fl->cidx];
+	const struct freelQ_ce *ce = &fl->centries[fl->cidx];
 
 
-	if (len < copy_thres) {
-		skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
-		if (likely(skb != NULL)) {
-			skb_reserve(skb, skb_pad);
-			skb_put(skb, len);
-			pci_dma_sync_single_for_cpu(pdev,
-					    pci_unmap_addr(ce, dma_addr),
- 					    pci_unmap_len(ce, dma_len),
-					    PCI_DMA_FROMDEVICE);
-			memcpy(skb->data, ce->skb->data + dma_pad, len);
-			pci_dma_sync_single_for_device(pdev,
-					    pci_unmap_addr(ce, dma_addr),
- 					    pci_unmap_len(ce, dma_len),
-					    PCI_DMA_FROMDEVICE);
-		} else if (!drop_thres)
+	if (len < copybreak) {
+		skb = alloc_skb(len + 2, GFP_ATOMIC);
+		if (!skb)
 			goto use_orig_buf;
 			goto use_orig_buf;
 
 
+		skb_reserve(skb, 2);	/* align IP header */
+		skb_put(skb, len);
+		pci_dma_sync_single_for_cpu(pdev,
+					    pci_unmap_addr(ce, dma_addr),
+					    pci_unmap_len(ce, dma_len),
+					    PCI_DMA_FROMDEVICE);
+		memcpy(skb->data, ce->skb->data, len);
+		pci_dma_sync_single_for_device(pdev,
+					       pci_unmap_addr(ce, dma_addr),
+					       pci_unmap_len(ce, dma_len),
+					       PCI_DMA_FROMDEVICE);
 		recycle_fl_buf(fl, fl->cidx);
 		recycle_fl_buf(fl, fl->cidx);
 		return skb;
 		return skb;
 	}
 	}
 
 
-	if (fl->credits < drop_thres) {
+use_orig_buf:
+	if (fl->credits < 2) {
 		recycle_fl_buf(fl, fl->cidx);
 		recycle_fl_buf(fl, fl->cidx);
 		return NULL;
 		return NULL;
 	}
 	}
 
 
-use_orig_buf:
 	pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
 	pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
 			 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
 			 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
 	skb = ce->skb;
 	skb = ce->skb;
-	skb_reserve(skb, dma_pad);
+	prefetch(skb->data);
+
 	skb_put(skb, len);
 	skb_put(skb, len);
 	return skb;
 	return skb;
 }
 }
@@ -1137,6 +1120,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
 static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
 static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
 {
 {
 	unsigned int count = 0;
 	unsigned int count = 0;
+
 	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
 	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
 		unsigned int nfrags = skb_shinfo(skb)->nr_frags;
 		unsigned int nfrags = skb_shinfo(skb)->nr_frags;
 		unsigned int i, len = skb->len - skb->data_len;
 		unsigned int i, len = skb->len - skb->data_len;
@@ -1343,7 +1327,7 @@ static void restart_sched(unsigned long arg)
 	while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
 	while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
 		unsigned int genbit, pidx, count;
 		unsigned int genbit, pidx, count;
 	        count = 1 + skb_shinfo(skb)->nr_frags;
 	        count = 1 + skb_shinfo(skb)->nr_frags;
-       		count += compute_large_page_tx_descs(skb);
+		count += compute_large_page_tx_descs(skb);
 		q->in_use += count;
 		q->in_use += count;
 		genbit = q->genbit;
 		genbit = q->genbit;
 		pidx = q->pidx;
 		pidx = q->pidx;
@@ -1375,27 +1359,25 @@ static void restart_sched(unsigned long arg)
  *
  *
  *	Process an ingress ethernet pakcet and deliver it to the stack.
  *	Process an ingress ethernet pakcet and deliver it to the stack.
  */
  */
-static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
+static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
 {
 {
 	struct sk_buff *skb;
 	struct sk_buff *skb;
-	struct cpl_rx_pkt *p;
+	const struct cpl_rx_pkt *p;
 	struct adapter *adapter = sge->adapter;
 	struct adapter *adapter = sge->adapter;
 	struct sge_port_stats *st;
 	struct sge_port_stats *st;
 
 
-	skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
-			 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
-			 SGE_RX_DROP_THRES);
+	skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
 	if (unlikely(!skb)) {
 	if (unlikely(!skb)) {
 		sge->stats.rx_drops++;
 		sge->stats.rx_drops++;
-		return 0;
+		return;
 	}
 	}
 
 
-	p = (struct cpl_rx_pkt *)skb->data;
-	skb_pull(skb, sizeof(*p));
+	p = (const struct cpl_rx_pkt *) skb->data;
 	if (p->iff >= adapter->params.nports) {
 	if (p->iff >= adapter->params.nports) {
 		kfree_skb(skb);
 		kfree_skb(skb);
-		return 0;
+		return;
 	}
 	}
+	__skb_pull(skb, sizeof(*p));
 
 
 	skb->dev = adapter->port[p->iff].dev;
 	skb->dev = adapter->port[p->iff].dev;
 	skb->dev->last_rx = jiffies;
 	skb->dev->last_rx = jiffies;
@@ -1427,7 +1409,6 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
 		netif_rx(skb);
 		netif_rx(skb);
 #endif
 #endif
 	}
 	}
-	return 0;
 }
 }
 
 
 /*
 /*
@@ -1448,29 +1429,28 @@ static inline int enough_free_Tx_descs(const struct cmdQ *q)
 static void restart_tx_queues(struct sge *sge)
 static void restart_tx_queues(struct sge *sge)
 {
 {
 	struct adapter *adap = sge->adapter;
 	struct adapter *adap = sge->adapter;
+	int i;
 
 
-	if (enough_free_Tx_descs(&sge->cmdQ[0])) {
-		int i;
+	if (!enough_free_Tx_descs(&sge->cmdQ[0]))
+		return;
 
 
-		for_each_port(adap, i) {
-			struct net_device *nd = adap->port[i].dev;
+	for_each_port(adap, i) {
+		struct net_device *nd = adap->port[i].dev;
 
 
-			if (test_and_clear_bit(nd->if_port,
-					       &sge->stopped_tx_queues) &&
-			    netif_running(nd)) {
-				sge->stats.cmdQ_restarted[2]++;
-				netif_wake_queue(nd);
-			}
+		if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
+		    netif_running(nd)) {
+			sge->stats.cmdQ_restarted[2]++;
+			netif_wake_queue(nd);
 		}
 		}
 	}
 	}
 }
 }
 
 
 /*
 /*
- * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 
+ * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
  * information.
  * information.
  */
  */
-static unsigned int update_tx_info(struct adapter *adapter, 
-					  unsigned int flags, 
+static unsigned int update_tx_info(struct adapter *adapter,
+					  unsigned int flags,
 					  unsigned int pr0)
 					  unsigned int pr0)
 {
 {
 	struct sge *sge = adapter->sge;
 	struct sge *sge = adapter->sge;
@@ -1510,29 +1490,30 @@ static int process_responses(struct adapter *adapter, int budget)
 	struct sge *sge = adapter->sge;
 	struct sge *sge = adapter->sge;
 	struct respQ *q = &sge->respQ;
 	struct respQ *q = &sge->respQ;
 	struct respQ_e *e = &q->entries[q->cidx];
 	struct respQ_e *e = &q->entries[q->cidx];
-	int budget_left = budget;
+	int done = 0;
 	unsigned int flags = 0;
 	unsigned int flags = 0;
 	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
 	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
-	
 
 
-	while (likely(budget_left && e->GenerationBit == q->genbit)) {
+	while (done < budget && e->GenerationBit == q->genbit) {
 		flags |= e->Qsleeping;
 		flags |= e->Qsleeping;
-		
+
 		cmdq_processed[0] += e->Cmdq0CreditReturn;
 		cmdq_processed[0] += e->Cmdq0CreditReturn;
 		cmdq_processed[1] += e->Cmdq1CreditReturn;
 		cmdq_processed[1] += e->Cmdq1CreditReturn;
-		
+
 		/* We batch updates to the TX side to avoid cacheline
 		/* We batch updates to the TX side to avoid cacheline
 		 * ping-pong of TX state information on MP where the sender
 		 * ping-pong of TX state information on MP where the sender
 		 * might run on a different CPU than this function...
 		 * might run on a different CPU than this function...
 		 */
 		 */
-		if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
+		if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
 			flags = update_tx_info(adapter, flags, cmdq_processed[0]);
 			flags = update_tx_info(adapter, flags, cmdq_processed[0]);
 			cmdq_processed[0] = 0;
 			cmdq_processed[0] = 0;
 		}
 		}
+
 		if (unlikely(cmdq_processed[1] > 16)) {
 		if (unlikely(cmdq_processed[1] > 16)) {
 			sge->cmdQ[1].processed += cmdq_processed[1];
 			sge->cmdQ[1].processed += cmdq_processed[1];
 			cmdq_processed[1] = 0;
 			cmdq_processed[1] = 0;
 		}
 		}
+
 		if (likely(e->DataValid)) {
 		if (likely(e->DataValid)) {
 			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
 			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
 
 
@@ -1542,12 +1523,16 @@ static int process_responses(struct adapter *adapter, int budget)
 			else
 			else
 				sge_rx(sge, fl, e->BufferLength);
 				sge_rx(sge, fl, e->BufferLength);
 
 
+			++done;
+
 			/*
 			/*
 			 * Note: this depends on each packet consuming a
 			 * Note: this depends on each packet consuming a
 			 * single free-list buffer; cf. the BUG above.
 			 * single free-list buffer; cf. the BUG above.
 			 */
 			 */
 			if (++fl->cidx == fl->size)
 			if (++fl->cidx == fl->size)
 				fl->cidx = 0;
 				fl->cidx = 0;
+			prefetch(fl->centries[fl->cidx].skb);
+
 			if (unlikely(--fl->credits <
 			if (unlikely(--fl->credits <
 				     fl->size - SGE_FREEL_REFILL_THRESH))
 				     fl->size - SGE_FREEL_REFILL_THRESH))
 				refill_free_list(sge, fl);
 				refill_free_list(sge, fl);
@@ -1566,14 +1551,20 @@ static int process_responses(struct adapter *adapter, int budget)
 			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
 			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
 			q->credits = 0;
 			q->credits = 0;
 		}
 		}
-		--budget_left;
 	}
 	}
 
 
-	flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+	flags = update_tx_info(adapter, flags, cmdq_processed[0]);
 	sge->cmdQ[1].processed += cmdq_processed[1];
 	sge->cmdQ[1].processed += cmdq_processed[1];
 
 
-	budget -= budget_left;
-	return budget;
+	return done;
+}
+
+static inline int responses_pending(const struct adapter *adapter)
+{
+	const struct respQ *Q = &adapter->sge->respQ;
+	const struct respQ_e *e = &Q->entries[Q->cidx];
+
+	return (e->GenerationBit == Q->genbit);
 }
 }
 
 
 #ifdef CONFIG_CHELSIO_T1_NAPI
 #ifdef CONFIG_CHELSIO_T1_NAPI
@@ -1585,19 +1576,25 @@ static int process_responses(struct adapter *adapter, int budget)
  * which the caller must ensure is a valid pure response.  Returns 1 if it
  * which the caller must ensure is a valid pure response.  Returns 1 if it
  * encounters a valid data-carrying response, 0 otherwise.
  * encounters a valid data-carrying response, 0 otherwise.
  */
  */
-static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
+static int process_pure_responses(struct adapter *adapter)
 {
 {
 	struct sge *sge = adapter->sge;
 	struct sge *sge = adapter->sge;
 	struct respQ *q = &sge->respQ;
 	struct respQ *q = &sge->respQ;
+	struct respQ_e *e = &q->entries[q->cidx];
+	const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
 	unsigned int flags = 0;
 	unsigned int flags = 0;
 	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
 	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
 
 
+	prefetch(fl->centries[fl->cidx].skb);
+	if (e->DataValid)
+		return 1;
+
 	do {
 	do {
 		flags |= e->Qsleeping;
 		flags |= e->Qsleeping;
 
 
 		cmdq_processed[0] += e->Cmdq0CreditReturn;
 		cmdq_processed[0] += e->Cmdq0CreditReturn;
 		cmdq_processed[1] += e->Cmdq1CreditReturn;
 		cmdq_processed[1] += e->Cmdq1CreditReturn;
-		
+
 		e++;
 		e++;
 		if (unlikely(++q->cidx == q->size)) {
 		if (unlikely(++q->cidx == q->size)) {
 			q->cidx = 0;
 			q->cidx = 0;
@@ -1613,7 +1610,7 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
 		sge->stats.pure_rsps++;
 		sge->stats.pure_rsps++;
 	} while (e->GenerationBit == q->genbit && !e->DataValid);
 	} while (e->GenerationBit == q->genbit && !e->DataValid);
 
 
-	flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+	flags = update_tx_info(adapter, flags, cmdq_processed[0]);
 	sge->cmdQ[1].processed += cmdq_processed[1];
 	sge->cmdQ[1].processed += cmdq_processed[1];
 
 
 	return e->GenerationBit == q->genbit;
 	return e->GenerationBit == q->genbit;
@@ -1627,23 +1624,20 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
 int t1_poll(struct net_device *dev, int *budget)
 int t1_poll(struct net_device *dev, int *budget)
 {
 {
 	struct adapter *adapter = dev->priv;
 	struct adapter *adapter = dev->priv;
-	int effective_budget = min(*budget, dev->quota);
-	int work_done = process_responses(adapter, effective_budget);
+	int work_done;
 
 
+	work_done = process_responses(adapter, min(*budget, dev->quota));
 	*budget -= work_done;
 	*budget -= work_done;
 	dev->quota -= work_done;
 	dev->quota -= work_done;
 
 
-	if (work_done >= effective_budget)
+	if (unlikely(responses_pending(adapter)))
 		return 1;
 		return 1;
 
 
- 	spin_lock_irq(&adapter->async_lock);
-	__netif_rx_complete(dev);
+	netif_rx_complete(dev);
 	writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
 	writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
-	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
-	       adapter->regs + A_PL_ENABLE);
- 	spin_unlock_irq(&adapter->async_lock);
 
 
 	return 0;
 	return 0;
+
 }
 }
 
 
 /*
 /*
@@ -1652,44 +1646,33 @@ int t1_poll(struct net_device *dev, int *budget)
 irqreturn_t t1_interrupt(int irq, void *data)
 irqreturn_t t1_interrupt(int irq, void *data)
 {
 {
 	struct adapter *adapter = data;
 	struct adapter *adapter = data;
- 	struct net_device *dev = adapter->sge->netdev;
 	struct sge *sge = adapter->sge;
 	struct sge *sge = adapter->sge;
- 	u32 cause;
-	int handled = 0;
+	int handled;
 
 
-	cause = readl(adapter->regs + A_PL_CAUSE);
-	if (cause == 0 || cause == ~0)
-		return IRQ_NONE;
+	if (likely(responses_pending(adapter))) {
+		struct net_device *dev = sge->netdev;
 
 
-	spin_lock(&adapter->async_lock);
- 	if (cause & F_PL_INTR_SGE_DATA) {
-		struct respQ *q = &adapter->sge->respQ;
-		struct respQ_e *e = &q->entries[q->cidx];
-
- 		handled = 1;
- 		writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
-
-		if (e->GenerationBit == q->genbit &&
-		    __netif_rx_schedule_prep(dev)) {
-			if (e->DataValid || process_pure_responses(adapter, e)) {
-				/* mask off data IRQ */
-				writel(adapter->slow_intr_mask,
-				       adapter->regs + A_PL_ENABLE);
-				__netif_rx_schedule(sge->netdev);
-				goto unlock;
-			}
-			/* no data, no NAPI needed */
-			netif_poll_enable(dev);
+		writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
 
 
+		if (__netif_rx_schedule_prep(dev)) {
+			if (process_pure_responses(adapter))
+				__netif_rx_schedule(dev);
+			else {
+				/* no data, no NAPI needed */
+				writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
+				netif_poll_enable(dev);	/* undo schedule_prep */
+			}
 		}
 		}
-		writel(q->cidx, adapter->regs + A_SG_SLEEPING);
-	} else
-		handled = t1_slow_intr_handler(adapter);
+		return IRQ_HANDLED;
+	}
+
+	spin_lock(&adapter->async_lock);
+	handled = t1_slow_intr_handler(adapter);
+	spin_unlock(&adapter->async_lock);
 
 
 	if (!handled)
 	if (!handled)
 		sge->stats.unhandled_irqs++;
 		sge->stats.unhandled_irqs++;
-unlock:
-	spin_unlock(&adapter->async_lock);
+
 	return IRQ_RETVAL(handled != 0);
 	return IRQ_RETVAL(handled != 0);
 }
 }
 
 
@@ -1712,17 +1695,13 @@ unlock:
 irqreturn_t t1_interrupt(int irq, void *cookie)
 irqreturn_t t1_interrupt(int irq, void *cookie)
 {
 {
 	int work_done;
 	int work_done;
-	struct respQ_e *e;
 	struct adapter *adapter = cookie;
 	struct adapter *adapter = cookie;
-	struct respQ *Q = &adapter->sge->respQ;
 
 
 	spin_lock(&adapter->async_lock);
 	spin_lock(&adapter->async_lock);
-	e = &Q->entries[Q->cidx];
-	prefetch(e);
 
 
 	writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
 	writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
 
 
-	if (likely(e->GenerationBit == Q->genbit))
+	if (likely(responses_pending(adapter)))
 		work_done = process_responses(adapter, -1);
 		work_done = process_responses(adapter, -1);
 	else
 	else
 		work_done = t1_slow_intr_handler(adapter);
 		work_done = t1_slow_intr_handler(adapter);
@@ -1796,7 +1775,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
 	 * through the scheduler.
 	 * through the scheduler.
 	 */
 	 */
 	if (sge->tx_sched && !qid && skb->dev) {
 	if (sge->tx_sched && !qid && skb->dev) {
-	use_sched:
+use_sched:
 		use_sched_skb = 1;
 		use_sched_skb = 1;
 		/* Note that the scheduler might return a different skb than
 		/* Note that the scheduler might return a different skb than
 		 * the one passed in.
 		 * the one passed in.
@@ -1900,7 +1879,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		cpl = (struct cpl_tx_pkt *)hdr;
 		cpl = (struct cpl_tx_pkt *)hdr;
 	} else {
 	} else {
 		/*
 		/*
-	 	 * Packets shorter than ETH_HLEN can break the MAC, drop them
+		 * Packets shorter than ETH_HLEN can break the MAC, drop them
 		 * early.  Also, we may get oversized packets because some
 		 * early.  Also, we may get oversized packets because some
 		 * parts of the kernel don't handle our unusual hard_header_len
 		 * parts of the kernel don't handle our unusual hard_header_len
 		 * right, drop those too.
 		 * right, drop those too.
@@ -1984,9 +1963,9 @@ send:
 	 * then silently discard to avoid leak.
 	 * then silently discard to avoid leak.
 	 */
 	 */
 	if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
 	if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
- 		dev_kfree_skb_any(skb);
+		dev_kfree_skb_any(skb);
 		ret = NETDEV_TX_OK;
 		ret = NETDEV_TX_OK;
- 	}
+	}
 	return ret;
 	return ret;
 }
 }
 
 
@@ -2099,31 +2078,35 @@ static void espibug_workaround_t204(unsigned long data)
 
 
 	if (adapter->open_device_map & PORT_MASK) {
 	if (adapter->open_device_map & PORT_MASK) {
 		int i;
 		int i;
-		if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) {
+
+		if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
 			return;
 			return;
-		}
+
 		for (i = 0; i < nports; i++) {
 		for (i = 0; i < nports; i++) {
-	        	struct sk_buff *skb = sge->espibug_skb[i];
-			if ( (netif_running(adapter->port[i].dev)) &&
-			     !(netif_queue_stopped(adapter->port[i].dev)) &&
-			     (seop[i] && ((seop[i] & 0xfff) == 0)) &&
-			     skb ) {
-	                	if (!skb->cb[0]) {
-	                        	u8 ch_mac_addr[ETH_ALEN] =
-	                            	{0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
-	                        	memcpy(skb->data + sizeof(struct cpl_tx_pkt),
-	                               	ch_mac_addr, ETH_ALEN);
-	                        	memcpy(skb->data + skb->len - 10,
-						ch_mac_addr, ETH_ALEN);
-	                        	skb->cb[0] = 0xff;
-	                	}
-
-	                	/* bump the reference count to avoid freeing of
-	                 	 * the skb once the DMA has completed.
-	                 	 */
-	                	skb = skb_get(skb);
-	                	t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
+			struct sk_buff *skb = sge->espibug_skb[i];
+
+			if (!netif_running(adapter->port[i].dev) ||
+			    netif_queue_stopped(adapter->port[i].dev) ||
+			    !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
+				continue;
+
+			if (!skb->cb[0]) {
+				u8 ch_mac_addr[ETH_ALEN] = {
+					0x0, 0x7, 0x43, 0x0, 0x0, 0x0
+				};
+
+				memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+					ch_mac_addr, ETH_ALEN);
+				memcpy(skb->data + skb->len - 10,
+					ch_mac_addr, ETH_ALEN);
+				skb->cb[0] = 0xff;
 			}
 			}
+
+			/* bump the reference count to avoid freeing of
+			 * the skb once the DMA has completed.
+			 */
+			skb = skb_get(skb);
+			t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
 		}
 		}
 	}
 	}
 	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
 	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
@@ -2192,9 +2175,8 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
 		if (adapter->params.nports > 1) {
 		if (adapter->params.nports > 1) {
 			tx_sched_init(sge);
 			tx_sched_init(sge);
 			sge->espibug_timer.function = espibug_workaround_t204;
 			sge->espibug_timer.function = espibug_workaround_t204;
-		} else {
+		} else
 			sge->espibug_timer.function = espibug_workaround;
 			sge->espibug_timer.function = espibug_workaround;
-		}
 		sge->espibug_timer.data = (unsigned long)sge->adapter;
 		sge->espibug_timer.data = (unsigned long)sge->adapter;
 
 
 		sge->espibug_timeout = 1;
 		sge->espibug_timeout = 1;
@@ -2202,7 +2184,7 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
 		if (adapter->params.nports > 1)
 		if (adapter->params.nports > 1)
 			sge->espibug_timeout = HZ/100;
 			sge->espibug_timeout = HZ/100;
 	}
 	}
-	 
+
 
 
 	p->cmdQ_size[0] = SGE_CMDQ0_E_N;
 	p->cmdQ_size[0] = SGE_CMDQ0_E_N;
 	p->cmdQ_size[1] = SGE_CMDQ1_E_N;
 	p->cmdQ_size[1] = SGE_CMDQ1_E_N;

+ 45 - 44
drivers/net/chelsio/subr.c

@@ -223,13 +223,13 @@ static int fpga_slow_intr(adapter_t *adapter)
 		t1_sge_intr_error_handler(adapter->sge);
 		t1_sge_intr_error_handler(adapter->sge);
 
 
 	if (cause & FPGA_PCIX_INTERRUPT_GMAC)
 	if (cause & FPGA_PCIX_INTERRUPT_GMAC)
-                fpga_phy_intr_handler(adapter);
+		fpga_phy_intr_handler(adapter);
 
 
 	if (cause & FPGA_PCIX_INTERRUPT_TP) {
 	if (cause & FPGA_PCIX_INTERRUPT_TP) {
-                /*
+		/*
 		 * FPGA doesn't support MC4 interrupts and it requires
 		 * FPGA doesn't support MC4 interrupts and it requires
 		 * this odd layer of indirection for MC5.
 		 * this odd layer of indirection for MC5.
-                 */
+		 */
 		u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
 		u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
 
 
 		/* Clear TP interrupt */
 		/* Clear TP interrupt */
@@ -262,8 +262,7 @@ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
 			udelay(10);
 			udelay(10);
 	} while (busy && --attempts);
 	} while (busy && --attempts);
 	if (busy)
 	if (busy)
-		CH_ALERT("%s: MDIO operation timed out\n",
-			 adapter->name);
+		CH_ALERT("%s: MDIO operation timed out\n", adapter->name);
 	return busy;
 	return busy;
 }
 }
 
 
@@ -605,22 +604,23 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
 
 
 	switch (board_info(adapter)->board) {
 	switch (board_info(adapter)->board) {
 #ifdef CONFIG_CHELSIO_T1_1G
 #ifdef CONFIG_CHELSIO_T1_1G
-        case CHBT_BOARD_CHT204:
-        case CHBT_BOARD_CHT204E:
-        case CHBT_BOARD_CHN204:
-        case CHBT_BOARD_CHT204V: {
-                int i, port_bit;
+	case CHBT_BOARD_CHT204:
+	case CHBT_BOARD_CHT204E:
+	case CHBT_BOARD_CHN204:
+	case CHBT_BOARD_CHT204V: {
+		int i, port_bit;
 		for_each_port(adapter, i) {
 		for_each_port(adapter, i) {
 			port_bit = i + 1;
 			port_bit = i + 1;
-			if (!(cause & (1 << port_bit))) continue;
+			if (!(cause & (1 << port_bit)))
+				continue;
 
 
-	                phy = adapter->port[i].phy;
+			phy = adapter->port[i].phy;
 			phy_cause = phy->ops->interrupt_handler(phy);
 			phy_cause = phy->ops->interrupt_handler(phy);
 			if (phy_cause & cphy_cause_link_change)
 			if (phy_cause & cphy_cause_link_change)
 				t1_link_changed(adapter, i);
 				t1_link_changed(adapter, i);
 		}
 		}
-                break;
-        }
+		break;
+	}
 	case CHBT_BOARD_CHT101:
 	case CHBT_BOARD_CHT101:
 		if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */
 		if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */
 			phy = adapter->port[0].phy;
 			phy = adapter->port[0].phy;
@@ -631,13 +631,13 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
 		break;
 		break;
 	case CHBT_BOARD_7500: {
 	case CHBT_BOARD_7500: {
 		int p;
 		int p;
-    		/*
+		/*
 		 * Elmer0's interrupt cause isn't useful here because there is
 		 * Elmer0's interrupt cause isn't useful here because there is
 		 * only one bit that can be set for all 4 ports.  This means
 		 * only one bit that can be set for all 4 ports.  This means
 		 * we are forced to check every PHY's interrupt status
 		 * we are forced to check every PHY's interrupt status
 		 * register to see who initiated the interrupt.
 		 * register to see who initiated the interrupt.
-     		 */
-    		for_each_port(adapter, p) {
+		 */
+		for_each_port(adapter, p) {
 			phy = adapter->port[p].phy;
 			phy = adapter->port[p].phy;
 			phy_cause = phy->ops->interrupt_handler(phy);
 			phy_cause = phy->ops->interrupt_handler(phy);
 			if (phy_cause & cphy_cause_link_change)
 			if (phy_cause & cphy_cause_link_change)
@@ -658,7 +658,7 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
 		break;
 		break;
 	case CHBT_BOARD_8000:
 	case CHBT_BOARD_8000:
 	case CHBT_BOARD_CHT110:
 	case CHBT_BOARD_CHT110:
-    		CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
+		CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
 		       cause);
 		       cause);
 		if (cause & ELMER0_GP_BIT1) {        /* PMC3393 INTB */
 		if (cause & ELMER0_GP_BIT1) {        /* PMC3393 INTB */
 			struct cmac *mac = adapter->port[0].mac;
 			struct cmac *mac = adapter->port[0].mac;
@@ -670,9 +670,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
 
 
 			t1_tpi_read(adapter,
 			t1_tpi_read(adapter,
 					A_ELMER0_GPI_STAT, &mod_detect);
 					A_ELMER0_GPI_STAT, &mod_detect);
-	    		CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
+			CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
 			       mod_detect ? "removed" : "inserted");
 			       mod_detect ? "removed" : "inserted");
-    		}
+		}
 		break;
 		break;
 #ifdef CONFIG_CHELSIO_T1_COUGAR
 #ifdef CONFIG_CHELSIO_T1_COUGAR
 	case CHBT_BOARD_COUGAR:
 	case CHBT_BOARD_COUGAR:
@@ -688,7 +688,8 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
 
 
 			for_each_port(adapter, i) {
 			for_each_port(adapter, i) {
 				port_bit = i ? i + 1 : 0;
 				port_bit = i ? i + 1 : 0;
-				if (!(cause & (1 << port_bit))) continue;
+				if (!(cause & (1 << port_bit)))
+					continue;
 
 
 				phy = adapter->port[i].phy;
 				phy = adapter->port[i].phy;
 				phy_cause = phy->ops->interrupt_handler(phy);
 				phy_cause = phy->ops->interrupt_handler(phy);
@@ -755,7 +756,7 @@ void t1_interrupts_disable(adapter_t* adapter)
 
 
 	/* Disable PCIX & external chip interrupts. */
 	/* Disable PCIX & external chip interrupts. */
 	if (t1_is_asic(adapter))
 	if (t1_is_asic(adapter))
-	    	writel(0, adapter->regs + A_PL_ENABLE);
+		writel(0, adapter->regs + A_PL_ENABLE);
 
 
 	/* PCI-X interrupts */
 	/* PCI-X interrupts */
 	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
 	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
@@ -830,11 +831,11 @@ int t1_slow_intr_handler(adapter_t *adapter)
 /* Power sequencing is a work-around for Intel's XPAKs. */
 /* Power sequencing is a work-around for Intel's XPAKs. */
 static void power_sequence_xpak(adapter_t* adapter)
 static void power_sequence_xpak(adapter_t* adapter)
 {
 {
-    	u32 mod_detect;
-    	u32 gpo;
+	u32 mod_detect;
+	u32 gpo;
 
 
-    	/* Check for XPAK */
-    	t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
+	/* Check for XPAK */
+	t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
 	if (!(ELMER0_GP_BIT5 & mod_detect)) {
 	if (!(ELMER0_GP_BIT5 & mod_detect)) {
 		/* XPAK is present */
 		/* XPAK is present */
 		t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
 		t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
@@ -877,31 +878,31 @@ static int board_init(adapter_t *adapter, const struct board_info *bi)
 	case CHBT_BOARD_N210:
 	case CHBT_BOARD_N210:
 	case CHBT_BOARD_CHT210:
 	case CHBT_BOARD_CHT210:
 	case CHBT_BOARD_COUGAR:
 	case CHBT_BOARD_COUGAR:
-    		t1_tpi_par(adapter, 0xf);
-    		t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
+		t1_tpi_par(adapter, 0xf);
+		t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
 		break;
 		break;
 	case CHBT_BOARD_CHT110:
 	case CHBT_BOARD_CHT110:
-    		t1_tpi_par(adapter, 0xf);
-    		t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
+		t1_tpi_par(adapter, 0xf);
+		t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
 
 
-    		/* TBD XXX Might not need.  This fixes a problem
-     		 *         described in the Intel SR XPAK errata.
-     		 */
-    		power_sequence_xpak(adapter);
+		/* TBD XXX Might not need.  This fixes a problem
+		 *         described in the Intel SR XPAK errata.
+		 */
+		power_sequence_xpak(adapter);
 		break;
 		break;
 #ifdef CONFIG_CHELSIO_T1_1G
 #ifdef CONFIG_CHELSIO_T1_1G
-    case CHBT_BOARD_CHT204E:
-		        /* add config space write here */
+	case CHBT_BOARD_CHT204E:
+		/* add config space write here */
 	case CHBT_BOARD_CHT204:
 	case CHBT_BOARD_CHT204:
 	case CHBT_BOARD_CHT204V:
 	case CHBT_BOARD_CHT204V:
 	case CHBT_BOARD_CHN204:
 	case CHBT_BOARD_CHN204:
-                t1_tpi_par(adapter, 0xf);
-                t1_tpi_write(adapter, A_ELMER0_GPO, 0x804);
-                break;
+		t1_tpi_par(adapter, 0xf);
+		t1_tpi_write(adapter, A_ELMER0_GPO, 0x804);
+		break;
 	case CHBT_BOARD_CHT101:
 	case CHBT_BOARD_CHT101:
 	case CHBT_BOARD_7500:
 	case CHBT_BOARD_7500:
-    		t1_tpi_par(adapter, 0xf);
-    		t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804);
+		t1_tpi_par(adapter, 0xf);
+		t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804);
 		break;
 		break;
 #endif
 #endif
 	}
 	}
@@ -941,7 +942,7 @@ int t1_init_hw_modules(adapter_t *adapter)
 		goto out_err;
 		goto out_err;
 
 
 	err = 0;
 	err = 0;
- out_err:
+out_err:
 	return err;
 	return err;
 }
 }
 
 
@@ -983,7 +984,7 @@ void t1_free_sw_modules(adapter_t *adapter)
 	if (adapter->espi)
 	if (adapter->espi)
 		t1_espi_destroy(adapter->espi);
 		t1_espi_destroy(adapter->espi);
 #ifdef CONFIG_CHELSIO_T1_COUGAR
 #ifdef CONFIG_CHELSIO_T1_COUGAR
-        if (adapter->cspi)
+	if (adapter->cspi)
 		t1_cspi_destroy(adapter->cspi);
 		t1_cspi_destroy(adapter->cspi);
 #endif
 #endif
 }
 }
@@ -1010,7 +1011,7 @@ static void __devinit init_link_config(struct link_config *lc,
 		CH_ERR("%s: CSPI initialization failed\n",
 		CH_ERR("%s: CSPI initialization failed\n",
 		       adapter->name);
 		       adapter->name);
 		goto error;
 		goto error;
-        }
+	}
 #endif
 #endif
 
 
 /*
 /*

+ 30 - 32
drivers/net/chelsio/tp.c

@@ -17,39 +17,36 @@ struct petp {
 static void tp_init(adapter_t * ap, const struct tp_params *p,
 static void tp_init(adapter_t * ap, const struct tp_params *p,
 		    unsigned int tp_clk)
 		    unsigned int tp_clk)
 {
 {
-	if (t1_is_asic(ap)) {
-		u32 val;
-
-		val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
-		    F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
-		if (!p->pm_size)
-			val |= F_OFFLOAD_DISABLE;
-		else
-			val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
-			    F_TP_IN_ESPI_CHECK_TCP_CSUM;
-		writel(val, ap->regs + A_TP_IN_CONFIG);
-		writel(F_TP_OUT_CSPI_CPL |
-		       F_TP_OUT_ESPI_ETHERNET |
-		       F_TP_OUT_ESPI_GENERATE_IP_CSUM |
-		       F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
-		       ap->regs + A_TP_OUT_CONFIG);
-		writel(V_IP_TTL(64) |
-		       F_PATH_MTU /* IP DF bit */  |
-		       V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
-		       V_SYN_COOKIE_PARAMETER(29),
-		       ap->regs + A_TP_GLOBAL_CONFIG);
-		/*
-		 * Enable pause frame deadlock prevention.
-		 */
-		if (is_T2(ap) && ap->params.nports > 1) {
-			u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
-
-			writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
-			       V_DROP_TICKS_CNT(drop_ticks) |
-			       V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
-			       ap->regs + A_TP_TX_DROP_CONFIG);
-		}
+	u32 val;
 
 
+	if (!t1_is_asic(ap))
+		return;
+
+	val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
+		F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
+	if (!p->pm_size)
+		val |= F_OFFLOAD_DISABLE;
+	else
+		val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM;
+	writel(val, ap->regs + A_TP_IN_CONFIG);
+	writel(F_TP_OUT_CSPI_CPL |
+	       F_TP_OUT_ESPI_ETHERNET |
+	       F_TP_OUT_ESPI_GENERATE_IP_CSUM |
+	       F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG);
+	writel(V_IP_TTL(64) |
+	       F_PATH_MTU /* IP DF bit */  |
+	       V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
+	       V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG);
+	/*
+	 * Enable pause frame deadlock prevention.
+	 */
+	if (is_T2(ap) && ap->params.nports > 1) {
+		u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
+
+		writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
+		       V_DROP_TICKS_CNT(drop_ticks) |
+		       V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
+		       ap->regs + A_TP_TX_DROP_CONFIG);
 	}
 	}
 }
 }
 
 
@@ -61,6 +58,7 @@ void t1_tp_destroy(struct petp *tp)
 struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
 struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
 {
 {
 	struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
 	struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+
 	if (!tp)
 	if (!tp)
 		return NULL;
 		return NULL;
 
 

+ 72 - 67
drivers/net/chelsio/vsc7326.c

@@ -226,22 +226,21 @@ static void run_table(adapter_t *adapter, struct init_table *ib, int len)
 		if (ib[i].addr == INITBLOCK_SLEEP) {
 		if (ib[i].addr == INITBLOCK_SLEEP) {
 			udelay( ib[i].data );
 			udelay( ib[i].data );
 			CH_ERR("sleep %d us\n",ib[i].data);
 			CH_ERR("sleep %d us\n",ib[i].data);
-		} else {
+		} else
 			vsc_write( adapter, ib[i].addr, ib[i].data );
 			vsc_write( adapter, ib[i].addr, ib[i].data );
-		}
 	}
 	}
 }
 }
 
 
 static int bist_rd(adapter_t *adapter, int moduleid, int address)
 static int bist_rd(adapter_t *adapter, int moduleid, int address)
 {
 {
-	int data=0;
-	u32 result=0;
-
-	if(	(address != 0x0) &&
-		(address != 0x1) &&
-		(address != 0x2) &&
-		(address != 0xd) &&
-		(address != 0xe))
+	int data = 0;
+	u32 result = 0;
+
+	if ((address != 0x0) &&
+	    (address != 0x1) &&
+	    (address != 0x2) &&
+	    (address != 0xd) &&
+	    (address != 0xe))
 			CH_ERR("No bist address: 0x%x\n", address);
 			CH_ERR("No bist address: 0x%x\n", address);
 
 
 	data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
 	data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
@@ -251,27 +250,27 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
 	udelay(10);
 	udelay(10);
 
 
 	vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
 	vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
-	if((result & (1<<9)) != 0x0)
+	if ((result & (1 << 9)) != 0x0)
 		CH_ERR("Still in bist read: 0x%x\n", result);
 		CH_ERR("Still in bist read: 0x%x\n", result);
-	else if((result & (1<<8)) != 0x0)
+	else if ((result & (1 << 8)) != 0x0)
 		CH_ERR("bist read error: 0x%x\n", result);
 		CH_ERR("bist read error: 0x%x\n", result);
 
 
-	return(result & 0xff);
+	return (result & 0xff);
 }
 }
 
 
 static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
 static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
 {
 {
-	int data=0;
-	u32 result=0;
-
-	if(	(address != 0x0) &&
-		(address != 0x1) &&
-		(address != 0x2) &&
-		(address != 0xd) &&
-		(address != 0xe))
+	int data = 0;
+	u32 result = 0;
+
+	if ((address != 0x0) &&
+	    (address != 0x1) &&
+	    (address != 0x2) &&
+	    (address != 0xd) &&
+	    (address != 0xe))
 			CH_ERR("No bist address: 0x%x\n", address);
 			CH_ERR("No bist address: 0x%x\n", address);
 
 
-	if( value>255 )
+	if (value > 255)
 		CH_ERR("Suspicious write out of range value: 0x%x\n", value);
 		CH_ERR("Suspicious write out of range value: 0x%x\n", value);
 
 
 	data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
 	data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
@@ -281,12 +280,12 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
 	udelay(5);
 	udelay(5);
 
 
 	vsc_read(adapter, REG_RAM_BIST_CMD, &result);
 	vsc_read(adapter, REG_RAM_BIST_CMD, &result);
-	if((result & (1<<27)) != 0x0)
+	if ((result & (1 << 27)) != 0x0)
 		CH_ERR("Still in bist write: 0x%x\n", result);
 		CH_ERR("Still in bist write: 0x%x\n", result);
-	else if((result & (1<<26)) != 0x0)
+	else if ((result & (1 << 26)) != 0x0)
 		CH_ERR("bist write error: 0x%x\n", result);
 		CH_ERR("bist write error: 0x%x\n", result);
 
 
-	return(0);
+	return 0;
 }
 }
 
 
 static int run_bist(adapter_t *adapter, int moduleid)
 static int run_bist(adapter_t *adapter, int moduleid)
@@ -295,7 +294,7 @@ static int run_bist(adapter_t *adapter, int moduleid)
 	(void) bist_wr(adapter,moduleid, 0x00, 0x02);
 	(void) bist_wr(adapter,moduleid, 0x00, 0x02);
 	(void) bist_wr(adapter,moduleid, 0x01, 0x01);
 	(void) bist_wr(adapter,moduleid, 0x01, 0x01);
 
 
-	return(0);
+	return 0;
 }
 }
 
 
 static int check_bist(adapter_t *adapter, int moduleid)
 static int check_bist(adapter_t *adapter, int moduleid)
@@ -309,27 +308,26 @@ static int check_bist(adapter_t *adapter, int moduleid)
 	if ((result & 3) != 0x3)
 	if ((result & 3) != 0x3)
 		CH_ERR("Result: 0x%x  BIST error in ram %d, column: 0x%04x\n",
 		CH_ERR("Result: 0x%x  BIST error in ram %d, column: 0x%04x\n",
 			result, moduleid, column);
 			result, moduleid, column);
-	return(0);
+	return 0;
 }
 }
 
 
 static int enable_mem(adapter_t *adapter, int moduleid)
 static int enable_mem(adapter_t *adapter, int moduleid)
 {
 {
 	/*enable mem*/
 	/*enable mem*/
 	(void) bist_wr(adapter,moduleid, 0x00, 0x00);
 	(void) bist_wr(adapter,moduleid, 0x00, 0x00);
-	return(0);
+	return 0;
 }
 }
 
 
 static int run_bist_all(adapter_t *adapter)
 static int run_bist_all(adapter_t *adapter)
 {
 {
-	int port=0;
-	u32 val=0;
+	int port = 0;
+	u32 val = 0;
 
 
 	vsc_write(adapter, REG_MEM_BIST, 0x5);
 	vsc_write(adapter, REG_MEM_BIST, 0x5);
 	vsc_read(adapter, REG_MEM_BIST, &val);
 	vsc_read(adapter, REG_MEM_BIST, &val);
 
 
-	for(port=0; port<12; port++){
+	for (port = 0; port < 12; port++)
 		vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
 		vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
-	}
 
 
 	udelay(300);
 	udelay(300);
 	vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
 	vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
@@ -352,13 +350,13 @@ static int run_bist_all(adapter_t *adapter)
 	udelay(300);
 	udelay(300);
 	vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
 	vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
 	udelay(300);
 	udelay(300);
-	for(port=0; port<12; port++){
+	for (port = 0; port < 12; port++)
 		vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
 		vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
-	}
+
 	udelay(300);
 	udelay(300);
 	vsc_write(adapter, REG_MEM_BIST, 0x0);
 	vsc_write(adapter, REG_MEM_BIST, 0x0);
 	mdelay(10);
 	mdelay(10);
-	return(0);
+	return 0;
 }
 }
 
 
 static int mac_intr_handler(struct cmac *mac)
 static int mac_intr_handler(struct cmac *mac)
@@ -591,40 +589,46 @@ static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat)
 
 
 static void port_stats_update(struct cmac *mac)
 static void port_stats_update(struct cmac *mac)
 {
 {
-	int port = mac->instance->index;
+	struct {
+		unsigned int reg;
+		unsigned int offset;
+	} hw_stats[] = {
+
+#define HW_STAT(reg, stat_name) \
+	{ reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+
+		/* Rx stats */
+		HW_STAT(RxUnicast, RxUnicastFramesOK),
+		HW_STAT(RxMulticast, RxMulticastFramesOK),
+		HW_STAT(RxBroadcast, RxBroadcastFramesOK),
+		HW_STAT(Crc, RxFCSErrors),
+		HW_STAT(RxAlignment, RxAlignErrors),
+		HW_STAT(RxOversize, RxFrameTooLongErrors),
+		HW_STAT(RxPause, RxPauseFrames),
+		HW_STAT(RxJabbers, RxJabberErrors),
+		HW_STAT(RxFragments, RxRuntErrors),
+		HW_STAT(RxUndersize, RxRuntErrors),
+		HW_STAT(RxSymbolCarrier, RxSymbolErrors),
+		HW_STAT(RxSize1519ToMax, RxJumboFramesOK),
+
+		/* Tx stats (skip collision stats as we are full-duplex only) */
+		HW_STAT(TxUnicast, TxUnicastFramesOK),
+		HW_STAT(TxMulticast, TxMulticastFramesOK),
+		HW_STAT(TxBroadcast, TxBroadcastFramesOK),
+		HW_STAT(TxPause, TxPauseFrames),
+		HW_STAT(TxUnderrun, TxUnderrun),
+		HW_STAT(TxSize1519ToMax, TxJumboFramesOK),
+	}, *p = hw_stats;
+	unsigned int port = mac->instance->index;
+	u64 *stats = (u64 *)&mac->stats;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(hw_stats); i++)
+		rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset);
 
 
-	/* Rx stats */
+	rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
 	rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
 	rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
 	rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
 	rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
-	rmon_update(mac, REG_RX_UNICAST(port), &mac->stats.RxUnicastFramesOK);
-	rmon_update(mac, REG_RX_MULTICAST(port),
-		    &mac->stats.RxMulticastFramesOK);
-	rmon_update(mac, REG_RX_BROADCAST(port),
-		    &mac->stats.RxBroadcastFramesOK);
-	rmon_update(mac, REG_CRC(port), &mac->stats.RxFCSErrors);
-	rmon_update(mac, REG_RX_ALIGNMENT(port), &mac->stats.RxAlignErrors);
-	rmon_update(mac, REG_RX_OVERSIZE(port),
-		    &mac->stats.RxFrameTooLongErrors);
-	rmon_update(mac, REG_RX_PAUSE(port), &mac->stats.RxPauseFrames);
-	rmon_update(mac, REG_RX_JABBERS(port), &mac->stats.RxJabberErrors);
-	rmon_update(mac, REG_RX_FRAGMENTS(port), &mac->stats.RxRuntErrors);
-	rmon_update(mac, REG_RX_UNDERSIZE(port), &mac->stats.RxRuntErrors);
-	rmon_update(mac, REG_RX_SYMBOL_CARRIER(port),
-		    &mac->stats.RxSymbolErrors);
-	rmon_update(mac, REG_RX_SIZE_1519_TO_MAX(port),
-            &mac->stats.RxJumboFramesOK);
-
-	/* Tx stats (skip collision stats as we are full-duplex only) */
-	rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
-	rmon_update(mac, REG_TX_UNICAST(port), &mac->stats.TxUnicastFramesOK);
-	rmon_update(mac, REG_TX_MULTICAST(port),
-		    &mac->stats.TxMulticastFramesOK);
-	rmon_update(mac, REG_TX_BROADCAST(port),
-		    &mac->stats.TxBroadcastFramesOK);
-	rmon_update(mac, REG_TX_PAUSE(port), &mac->stats.TxPauseFrames);
-	rmon_update(mac, REG_TX_UNDERRUN(port), &mac->stats.TxUnderrun);
-	rmon_update(mac, REG_TX_SIZE_1519_TO_MAX(port),
-            &mac->stats.TxJumboFramesOK);
 }
 }
 
 
 /*
 /*
@@ -686,7 +690,8 @@ static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index)
 	int i;
 	int i;
 
 
 	mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
 	mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
-	if (!mac) return NULL;
+	if (!mac)
+		return NULL;
 
 
 	mac->ops = &vsc7326_ops;
 	mac->ops = &vsc7326_ops;
 	mac->instance = (cmac_instance *)(mac + 1);
 	mac->instance = (cmac_instance *)(mac + 1);

+ 75 - 64
drivers/net/chelsio/vsc7326_reg.h

@@ -192,73 +192,84 @@
 #define REG_HDX(pn)		CRA(0x1,pn,0x19)	/* Half-duplex config */
 #define REG_HDX(pn)		CRA(0x1,pn,0x19)	/* Half-duplex config */
 
 
 /* Statistics */
 /* Statistics */
+/* CRA(0x4,pn,reg) */
+/* reg below */
 /* pn = port number, 0-a, a = 10GbE */
 /* pn = port number, 0-a, a = 10GbE */
-#define REG_RX_IN_BYTES(pn)	CRA(0x4,pn,0x00)	/* # Rx in octets */
-#define REG_RX_SYMBOL_CARRIER(pn) CRA(0x4,pn,0x01)	/* Frames w/ symbol errors */
-#define REG_RX_PAUSE(pn)	CRA(0x4,pn,0x02)	/* # pause frames received */
-#define REG_RX_UNSUP_OPCODE(pn)	CRA(0x4,pn,0x03)	/* # control frames with unsupported opcode */
-#define REG_RX_OK_BYTES(pn)	CRA(0x4,pn,0x04)	/* # octets in good frames */
-#define REG_RX_BAD_BYTES(pn)	CRA(0x4,pn,0x05)	/* # octets in bad frames */
-#define REG_RX_UNICAST(pn)	CRA(0x4,pn,0x06)	/* # good unicast frames */
-#define REG_RX_MULTICAST(pn)	CRA(0x4,pn,0x07)	/* # good multicast frames */
-#define REG_RX_BROADCAST(pn)	CRA(0x4,pn,0x08)	/* # good broadcast frames */
-#define REG_CRC(pn)		CRA(0x4,pn,0x09)	/* # frames w/ bad CRC only */
-#define REG_RX_ALIGNMENT(pn)	CRA(0x4,pn,0x0a)	/* # frames w/ alignment err */
-#define REG_RX_UNDERSIZE(pn)	CRA(0x4,pn,0x0b)	/* # frames undersize */
-#define REG_RX_FRAGMENTS(pn)	CRA(0x4,pn,0x0c)	/* # frames undersize w/ crc err */
-#define REG_RX_IN_RANGE_LENGTH_ERROR(pn) CRA(0x4,pn,0x0d)	/* # frames with length error */
-#define REG_RX_OUT_OF_RANGE_ERROR(pn) CRA(0x4,pn,0x0e)	/* # frames with illegal length field */
-#define REG_RX_OVERSIZE(pn)	CRA(0x4,pn,0x0f)	/* # frames oversize */
-#define REG_RX_JABBERS(pn)	CRA(0x4,pn,0x10)	/* # frames oversize w/ crc err */
-#define REG_RX_SIZE_64(pn)	CRA(0x4,pn,0x11)	/* # frames 64 octets long */
-#define REG_RX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x12)	/* # frames 65-127 octets */
-#define REG_RX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x13)	/* # frames 128-255 */
-#define REG_RX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x14)	/* # frames 256-511 */
-#define REG_RX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x15)	/* # frames 512-1023 */
-#define REG_RX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x16)	/* # frames 1024-1518 */
-#define REG_RX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x17)	/* # frames 1519-max */
 
 
-#define REG_TX_OUT_BYTES(pn)	CRA(0x4,pn,0x18)	/* # octets tx */
-#define REG_TX_PAUSE(pn)	CRA(0x4,pn,0x19)	/* # pause frames sent */
-#define REG_TX_OK_BYTES(pn)	CRA(0x4,pn,0x1a)	/* # octets tx OK */
-#define REG_TX_UNICAST(pn)	CRA(0x4,pn,0x1b)	/* # frames unicast */
-#define REG_TX_MULTICAST(pn)	CRA(0x4,pn,0x1c)	/* # frames multicast */
-#define REG_TX_BROADCAST(pn)	CRA(0x4,pn,0x1d)	/* # frames broadcast */
-#define REG_TX_MULTIPLE_COLL(pn) CRA(0x4,pn,0x1e)	/* # frames tx after multiple collisions */
-#define REG_TX_LATE_COLL(pn)	CRA(0x4,pn,0x1f)	/* # late collisions detected */
-#define REG_TX_XCOLL(pn)	CRA(0x4,pn,0x20)	/* # frames lost, excessive collisions */
-#define REG_TX_DEFER(pn)	CRA(0x4,pn,0x21)	/* # frames deferred on first tx attempt */
-#define REG_TX_XDEFER(pn)	CRA(0x4,pn,0x22)	/* # frames excessively deferred */
-#define REG_TX_CSENSE(pn)	CRA(0x4,pn,0x23)	/* carrier sense errors at frame end */
-#define REG_TX_SIZE_64(pn)	CRA(0x4,pn,0x24)	/* # frames 64 octets long */
-#define REG_TX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x25)	/* # frames 65-127 octets */
-#define REG_TX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x26)	/* # frames 128-255 */
-#define REG_TX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x27)	/* # frames 256-511 */
-#define REG_TX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x28)	/* # frames 512-1023 */
-#define REG_TX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x29)	/* # frames 1024-1518 */
-#define REG_TX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x2a)	/* # frames 1519-max */
-#define REG_TX_SINGLE_COLL(pn)	CRA(0x4,pn,0x2b)	/* # frames tx after single collision */
-#define REG_TX_BACKOFF2(pn)	CRA(0x4,pn,0x2c)	/* # frames tx ok after 2 backoffs/collisions */
-#define REG_TX_BACKOFF3(pn)	CRA(0x4,pn,0x2d)	/*   after 3 backoffs/collisions */
-#define REG_TX_BACKOFF4(pn)	CRA(0x4,pn,0x2e)	/*   after 4 */
-#define REG_TX_BACKOFF5(pn)	CRA(0x4,pn,0x2f)	/*   after 5 */
-#define REG_TX_BACKOFF6(pn)	CRA(0x4,pn,0x30)	/*   after 6 */
-#define REG_TX_BACKOFF7(pn)	CRA(0x4,pn,0x31)	/*   after 7 */
-#define REG_TX_BACKOFF8(pn)	CRA(0x4,pn,0x32)	/*   after 8 */
-#define REG_TX_BACKOFF9(pn)	CRA(0x4,pn,0x33)	/*   after 9 */
-#define REG_TX_BACKOFF10(pn)	CRA(0x4,pn,0x34)	/*   after 10 */
-#define REG_TX_BACKOFF11(pn)	CRA(0x4,pn,0x35)	/*   after 11 */
-#define REG_TX_BACKOFF12(pn)	CRA(0x4,pn,0x36)	/*   after 12 */
-#define REG_TX_BACKOFF13(pn)	CRA(0x4,pn,0x37)	/*   after 13 */
-#define REG_TX_BACKOFF14(pn)	CRA(0x4,pn,0x38)	/*   after 14 */
-#define REG_TX_BACKOFF15(pn)	CRA(0x4,pn,0x39)	/*   after 15 */
-#define REG_TX_UNDERRUN(pn)	CRA(0x4,pn,0x3a)	/* # frames dropped from underrun */
-#define REG_RX_XGMII_PROT_ERR	CRA(0x4,0xa,0x3b)	/* # protocol errors detected on XGMII interface */
-#define REG_RX_IPG_SHRINK(pn)	CRA(0x4,pn,0x3c)	/* # of IPG shrinks detected */
+enum {
+	RxInBytes		= 0x00,	// # Rx in octets
+	RxSymbolCarrier		= 0x01,	// Frames w/ symbol errors
+	RxPause			= 0x02,	// # pause frames received
+	RxUnsupOpcode		= 0x03,	// # control frames with unsupported opcode
+	RxOkBytes		= 0x04,	// # octets in good frames
+	RxBadBytes		= 0x05,	// # octets in bad frames
+	RxUnicast		= 0x06,	// # good unicast frames
+	RxMulticast		= 0x07,	// # good multicast frames
+	RxBroadcast		= 0x08,	// # good broadcast frames
+	Crc			= 0x09,	// # frames w/ bad CRC only
+	RxAlignment		= 0x0a,	// # frames w/ alignment err
+	RxUndersize		= 0x0b,	// # frames undersize
+	RxFragments		= 0x0c,	// # frames undersize w/ crc err
+	RxInRangeLengthError	= 0x0d,	// # frames with length error
+	RxOutOfRangeError	= 0x0e,	// # frames with illegal length field
+	RxOversize		= 0x0f,	// # frames oversize
+	RxJabbers		= 0x10,	// # frames oversize w/ crc err
+	RxSize64		= 0x11,	// # frames 64 octets long
+	RxSize65To127		= 0x12,	// # frames 65-127 octets
+	RxSize128To255		= 0x13,	// # frames 128-255
+	RxSize256To511		= 0x14,	// # frames 256-511
+	RxSize512To1023		= 0x15,	// # frames 512-1023
+	RxSize1024To1518	= 0x16,	// # frames 1024-1518
+	RxSize1519ToMax		= 0x17,	// # frames 1519-max
 
 
-#define REG_STAT_STICKY1G(pn)	CRA(0x4,pn,0x3e)	/* tri-speed sticky bits */
-#define REG_STAT_STICKY10G	CRA(0x4,0xa,0x3e)	/* 10GbE sticky bits */
-#define REG_STAT_INIT(pn)	CRA(0x4,pn,0x3f)	/* Clear all statistics */
+	TxOutBytes		= 0x18,	// # octets tx
+	TxPause			= 0x19,	// # pause frames sent
+	TxOkBytes		= 0x1a, // # octets tx OK
+	TxUnicast		= 0x1b,	// # frames unicast
+	TxMulticast		= 0x1c,	// # frames multicast
+	TxBroadcast		= 0x1d,	// # frames broadcast
+	TxMultipleColl		= 0x1e,	// # frames tx after multiple collisions
+	TxLateColl		= 0x1f,	// # late collisions detected
+	TxXcoll			= 0x20,	// # frames lost, excessive collisions
+	TxDefer			= 0x21,	// # frames deferred on first tx attempt
+	TxXdefer		= 0x22,	// # frames excessively deferred
+	TxCsense		= 0x23,	// carrier sense errors at frame end
+	TxSize64		= 0x24,	// # frames 64 octets long
+	TxSize65To127		= 0x25,	// # frames 65-127 octets
+	TxSize128To255		= 0x26,	// # frames 128-255
+	TxSize256To511		= 0x27,	// # frames 256-511
+	TxSize512To1023		= 0x28,	// # frames 512-1023
+	TxSize1024To1518	= 0x29,	// # frames 1024-1518
+	TxSize1519ToMax		= 0x2a,	// # frames 1519-max
+	TxSingleColl		= 0x2b,	// # frames tx after single collision
+	TxBackoff2		= 0x2c,	// # frames tx ok after 2 backoffs/collisions
+	TxBackoff3		= 0x2d,	//   after 3 backoffs/collisions
+	TxBackoff4		= 0x2e,	//   after 4
+	TxBackoff5		= 0x2f,	//   after 5
+	TxBackoff6		= 0x30,	//   after 6
+	TxBackoff7		= 0x31,	//   after 7
+	TxBackoff8		= 0x32,	//   after 8
+	TxBackoff9		= 0x33,	//   after 9
+	TxBackoff10		= 0x34,	//   after 10
+	TxBackoff11		= 0x35,	//   after 11
+	TxBackoff12		= 0x36,	//   after 12
+	TxBackoff13		= 0x37,	//   after 13
+	TxBackoff14		= 0x38,	//   after 14
+	TxBackoff15		= 0x39,	//   after 15
+	TxUnderrun		= 0x3a,	// # frames dropped from underrun
+	// Hole. See REG_RX_XGMII_PROT_ERR below.
+	RxIpgShrink		= 0x3c,	// # of IPG shrinks detected
+	// Duplicate. See REG_STAT_STICKY10G below.
+	StatSticky1G		= 0x3e,	// tri-speed sticky bits
+	StatInit		= 0x3f	// Clear all statistics
+};
+
+#define REG_RX_XGMII_PROT_ERR	CRA(0x4,0xa,0x3b)		/* # protocol errors detected on XGMII interface */
+#define REG_STAT_STICKY10G	CRA(0x4,0xa,StatSticky1G)	/* 10GbE sticky bits */
+
+#define REG_RX_OK_BYTES(pn)	CRA(0x4,pn,RxOkBytes)
+#define REG_RX_BAD_BYTES(pn)	CRA(0x4,pn,RxBadBytes)
+#define REG_TX_OK_BYTES(pn)	CRA(0x4,pn,TxOkBytes)
 
 
 /* MII-Management Block registers */
 /* MII-Management Block registers */
 /* These are for MII-M interface 0, which is the bidirectional LVTTL one.  If
 /* These are for MII-M interface 0, which is the bidirectional LVTTL one.  If

+ 20 - 21
drivers/net/chelsio/vsc8244.c

@@ -54,7 +54,7 @@ enum {
 };
 };
 
 
 #define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
 #define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
-	 		   VSC_INTR_NEG_DONE)
+			   VSC_INTR_NEG_DONE)
 #define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
 #define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
 		   VSC_INTR_ENABLE)
 		   VSC_INTR_ENABLE)
 
 
@@ -94,19 +94,18 @@ static int vsc8244_intr_enable(struct cphy *cphy)
 {
 {
 	simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK);
 	simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK);
 
 
-    /* Enable interrupts through Elmer */
+	/* Enable interrupts through Elmer */
 	if (t1_is_asic(cphy->adapter)) {
 	if (t1_is_asic(cphy->adapter)) {
 		u32 elmer;
 		u32 elmer;
 
 
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
 		elmer |= ELMER0_GP_BIT1;
 		elmer |= ELMER0_GP_BIT1;
-		if (is_T2(cphy->adapter)) {
+		if (is_T2(cphy->adapter))
 		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
 		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
-                }
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
 	}
 	}
 
 
-    return 0;
+	return 0;
 }
 }
 
 
 static int vsc8244_intr_disable(struct cphy *cphy)
 static int vsc8244_intr_disable(struct cphy *cphy)
@@ -118,19 +117,18 @@ static int vsc8244_intr_disable(struct cphy *cphy)
 
 
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
 		elmer &= ~ELMER0_GP_BIT1;
 		elmer &= ~ELMER0_GP_BIT1;
-		if (is_T2(cphy->adapter)) {
+		if (is_T2(cphy->adapter))
 		    elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
 		    elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
-                }
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
 	}
 	}
 
 
-    return 0;
+	return 0;
 }
 }
 
 
 static int vsc8244_intr_clear(struct cphy *cphy)
 static int vsc8244_intr_clear(struct cphy *cphy)
 {
 {
 	u32 val;
 	u32 val;
-    u32 elmer;
+	u32 elmer;
 
 
 	/* Clear PHY interrupts by reading the register. */
 	/* Clear PHY interrupts by reading the register. */
 	simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val);
 	simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val);
@@ -138,13 +136,12 @@ static int vsc8244_intr_clear(struct cphy *cphy)
 	if (t1_is_asic(cphy->adapter)) {
 	if (t1_is_asic(cphy->adapter)) {
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
 		t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
 		elmer |= ELMER0_GP_BIT1;
 		elmer |= ELMER0_GP_BIT1;
-		if (is_T2(cphy->adapter)) {
+		if (is_T2(cphy->adapter))
 		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
 		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
-                }
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
 		t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
 	}
 	}
 
 
-    return 0;
+	return 0;
 }
 }
 
 
 /*
 /*
@@ -179,13 +176,13 @@ static int vsc8244_set_speed_duplex(struct cphy *phy, int speed, int duplex)
 
 
 int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits)
 int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits)
 {
 {
-    int ret;
-    unsigned int val;
+	int ret;
+	unsigned int val;
 
 
-    ret = mdio_read(phy, mmd, reg, &val);
-    if (!ret)
-        ret = mdio_write(phy, mmd, reg, val | bits);
-    return ret;
+	ret = mdio_read(phy, mmd, reg, &val);
+	if (!ret)
+		ret = mdio_write(phy, mmd, reg, val | bits);
+	return ret;
 }
 }
 
 
 static int vsc8244_autoneg_enable(struct cphy *cphy)
 static int vsc8244_autoneg_enable(struct cphy *cphy)
@@ -235,7 +232,7 @@ static int vsc8244_advertise(struct cphy *phy, unsigned int advertise_map)
 }
 }
 
 
 static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok,
 static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok,
-				     int *speed, int *duplex, int *fc)
+				   int *speed, int *duplex, int *fc)
 {
 {
 	unsigned int bmcr, status, lpa, adv;
 	unsigned int bmcr, status, lpa, adv;
 	int err, sp = -1, dplx = -1, pause = 0;
 	int err, sp = -1, dplx = -1, pause = 0;
@@ -343,11 +340,13 @@ static struct cphy_ops vsc8244_ops = {
 	.get_link_status      = vsc8244_get_link_status
 	.get_link_status      = vsc8244_get_link_status
 };
 };
 
 
-static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr, struct mdio_ops *mdio_ops)
+static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr,
+				       struct mdio_ops *mdio_ops)
 {
 {
 	struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
 	struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
 
 
-	if (!cphy) return NULL;
+	if (!cphy)
+		return NULL;
 
 
 	cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops);
 	cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops);
 
 

+ 8 - 0
drivers/net/cxgb3/Makefile

@@ -0,0 +1,8 @@
+#
+# Chelsio T3 driver
+#
+
+obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
+
+cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
+	      xgmac.o sge.o l2t.o cxgb3_offload.o

+ 279 - 0
drivers/net/cxgb3/adapter.h

@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file should not be included directly.  Include common.h instead. */
+
+#ifndef __T3_ADAPTER_H__
+#define __T3_ADAPTER_H__
+
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/cache.h>
+#include <linux/mutex.h>
+#include "t3cdev.h"
+#include <asm/semaphore.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+typedef irqreturn_t(*intr_handler_t) (int, void *);
+
+struct vlan_group;
+
+struct port_info {
+	struct vlan_group *vlan_grp;
+	const struct port_type_info *port_type;
+	u8 port_id;
+	u8 rx_csum_offload;
+	u8 nqsets;
+	u8 first_qset;
+	struct cphy phy;
+	struct cmac mac;
+	struct link_config link_config;
+	struct net_device_stats netstats;
+	int activity;
+};
+
+enum {				/* adapter flags */
+	FULL_INIT_DONE = (1 << 0),
+	USING_MSI = (1 << 1),
+	USING_MSIX = (1 << 2),
+	QUEUES_BOUND = (1 << 3),
+};
+
+struct rx_desc;
+struct rx_sw_desc;
+
+struct sge_fl {			/* SGE per free-buffer list state */
+	unsigned int buf_size;	/* size of each Rx buffer */
+	unsigned int credits;	/* # of available Rx buffers */
+	unsigned int size;	/* capacity of free list */
+	unsigned int cidx;	/* consumer index */
+	unsigned int pidx;	/* producer index */
+	unsigned int gen;	/* free list generation */
+	struct rx_desc *desc;	/* address of HW Rx descriptor ring */
+	struct rx_sw_desc *sdesc;	/* address of SW Rx descriptor ring */
+	dma_addr_t phys_addr;	/* physical address of HW ring start */
+	unsigned int cntxt_id;	/* SGE context id for the free list */
+	unsigned long empty;	/* # of times queue ran out of buffers */
+};
+
+/*
+ * Bundle size for grouping offload RX packets for delivery to the stack.
+ * Don't make this too big as we do prefetch on each packet in a bundle.
+ */
+# define RX_BUNDLE_SIZE 8
+
+struct rsp_desc;
+
+struct sge_rspq {		/* state for an SGE response queue */
+	unsigned int credits;	/* # of pending response credits */
+	unsigned int size;	/* capacity of response queue */
+	unsigned int cidx;	/* consumer index */
+	unsigned int gen;	/* current generation bit */
+	unsigned int polling;	/* is the queue serviced through NAPI? */
+	unsigned int holdoff_tmr;	/* interrupt holdoff timer in 100ns */
+	unsigned int next_holdoff;	/* holdoff time for next interrupt */
+	struct rsp_desc *desc;	/* address of HW response ring */
+	dma_addr_t phys_addr;	/* physical address of the ring */
+	unsigned int cntxt_id;	/* SGE context id for the response q */
+	spinlock_t lock;	/* guards response processing */
+	struct sk_buff *rx_head;	/* offload packet receive queue head */
+	struct sk_buff *rx_tail;	/* offload packet receive queue tail */
+
+	unsigned long offload_pkts;
+	unsigned long offload_bundles;
+	unsigned long eth_pkts;	/* # of ethernet packets */
+	unsigned long pure_rsps;	/* # of pure (non-data) responses */
+	unsigned long imm_data;	/* responses with immediate data */
+	unsigned long rx_drops;	/* # of packets dropped due to no mem */
+	unsigned long async_notif; /* # of asynchronous notification events */
+	unsigned long empty;	/* # of times queue ran out of credits */
+	unsigned long nomem;	/* # of responses deferred due to no mem */
+	unsigned long unhandled_irqs;	/* # of spurious intrs */
+};
+
+struct tx_desc;
+struct tx_sw_desc;
+
+struct sge_txq {		/* state for an SGE Tx queue */
+	unsigned long flags;	/* HW DMA fetch status */
+	unsigned int in_use;	/* # of in-use Tx descriptors */
+	unsigned int size;	/* # of descriptors */
+	unsigned int processed;	/* total # of descs HW has processed */
+	unsigned int cleaned;	/* total # of descs SW has reclaimed */
+	unsigned int stop_thres;	/* SW TX queue suspend threshold */
+	unsigned int cidx;	/* consumer index */
+	unsigned int pidx;	/* producer index */
+	unsigned int gen;	/* current value of generation bit */
+	unsigned int unacked;	/* Tx descriptors used since last COMPL */
+	struct tx_desc *desc;	/* address of HW Tx descriptor ring */
+	struct tx_sw_desc *sdesc;	/* address of SW Tx descriptor ring */
+	spinlock_t lock;	/* guards enqueueing of new packets */
+	unsigned int token;	/* WR token */
+	dma_addr_t phys_addr;	/* physical address of the ring */
+	struct sk_buff_head sendq;	/* List of backpressured offload packets */
+	struct tasklet_struct qresume_tsk;	/* restarts the queue */
+	unsigned int cntxt_id;	/* SGE context id for the Tx q */
+	unsigned long stops;	/* # of times q has been stopped */
+	unsigned long restarts;	/* # of queue restarts */
+};
+
+enum {				/* per port SGE statistics */
+	SGE_PSTAT_TSO,		/* # of TSO requests */
+	SGE_PSTAT_RX_CSUM_GOOD,	/* # of successful RX csum offloads */
+	SGE_PSTAT_TX_CSUM,	/* # of TX checksum offloads */
+	SGE_PSTAT_VLANEX,	/* # of VLAN tag extractions */
+	SGE_PSTAT_VLANINS,	/* # of VLAN tag insertions */
+
+	SGE_PSTAT_MAX		/* must be last */
+};
+
+struct sge_qset {		/* an SGE queue set */
+	struct sge_rspq rspq;
+	struct sge_fl fl[SGE_RXQ_PER_SET];
+	struct sge_txq txq[SGE_TXQ_PER_SET];
+	struct net_device *netdev;	/* associated net device */
+	unsigned long txq_stopped;	/* which Tx queues are stopped */
+	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
+	unsigned long port_stats[SGE_PSTAT_MAX];
+} ____cacheline_aligned;
+
+struct sge {
+	struct sge_qset qs[SGE_QSETS];
+	spinlock_t reg_lock;	/* guards non-atomic SGE registers (eg context) */
+};
+
+struct adapter {
+	struct t3cdev tdev;
+	struct list_head adapter_list;
+	void __iomem *regs;
+	struct pci_dev *pdev;
+	unsigned long registered_device_map;
+	unsigned long open_device_map;
+	unsigned long flags;
+
+	const char *name;
+	int msg_enable;
+	unsigned int mmio_len;
+
+	struct adapter_params params;
+	unsigned int slow_intr_mask;
+	unsigned long irq_stats[IRQ_NUM_STATS];
+
+	struct {
+		unsigned short vec;
+		char desc[22];
+	} msix_info[SGE_QSETS + 1];
+
+	/* T3 modules */
+	struct sge sge;
+	struct mc7 pmrx;
+	struct mc7 pmtx;
+	struct mc7 cm;
+	struct mc5 mc5;
+
+	struct net_device *port[MAX_NPORTS];
+	unsigned int check_task_cnt;
+	struct delayed_work adap_check_task;
+	struct work_struct ext_intr_handler_task;
+
+	/*
+	 * Dummy netdevices are needed when using multiple receive queues with
+	 * NAPI as each netdevice can service only one queue.
+	 */
+	struct net_device *dummy_netdev[SGE_QSETS - 1];
+
+	struct dentry *debugfs_root;
+
+	struct mutex mdio_lock;
+	spinlock_t stats_lock;
+	spinlock_t work_lock;
+};
+
+static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+	u32 val = readl(adapter->regs + reg_addr);
+
+	CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
+	return val;
+}
+
+static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+	CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
+	writel(val, adapter->regs + reg_addr);
+}
+
+static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+{
+	return netdev_priv(adap->port[idx]);
+}
+
+/*
+ * We use the spare atalk_ptr to map a net device to its SGE queue set.
+ * This is a macro so it can be used as l-value.
+ */
+#define dev2qset(netdev) ((netdev)->atalk_ptr)
+
+#define OFFLOAD_DEVMAP_BIT 15
+
+#define tdev2adap(d) container_of(d, struct adapter, tdev)
+
+static inline int offload_running(struct adapter *adapter)
+{
+	return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+}
+
+int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
+
+void t3_os_ext_intr_handler(struct adapter *adapter);
+void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
+			int speed, int duplex, int fc);
+
+void t3_sge_start(struct adapter *adap);
+void t3_sge_stop(struct adapter *adap);
+void t3_free_sge_resources(struct adapter *adap);
+void t3_sge_err_intr_handler(struct adapter *adapter);
+intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
+int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
+int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+		      int irq_vec_idx, const struct qset_params *p,
+		      int ntxq, struct net_device *netdev);
+int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
+		unsigned char *data);
+irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
+
+#endif				/* __T3_ADAPTER_H__ */

+ 251 - 0
drivers/net/cxgb3/ael1002.c

@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+enum {
+	AEL100X_TX_DISABLE = 9,
+	AEL100X_TX_CONFIG1 = 0xc002,
+	AEL1002_PWR_DOWN_HI = 0xc011,
+	AEL1002_PWR_DOWN_LO = 0xc012,
+	AEL1002_XFI_EQL = 0xc015,
+	AEL1002_LB_EN = 0xc017,
+
+	LASI_CTRL = 0x9002,
+	LASI_STAT = 0x9005
+};
+
+static void ael100x_txon(struct cphy *phy)
+{
+	int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
+
+	msleep(100);
+	t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
+	msleep(30);
+}
+
+static int ael1002_power_down(struct cphy *phy, int enable)
+{
+	int err;
+
+	err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
+	if (!err)
+		err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
+					  BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
+	return err;
+}
+
+static int ael1002_reset(struct cphy *phy, int wait)
+{
+	int err;
+
+	if ((err = ael1002_power_down(phy, 0)) ||
+	    (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
+	    (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
+	    (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
+	    (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
+	    (err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
+				       0, 1 << 5)))
+		return err;
+	return 0;
+}
+
+static int ael1002_intr_noop(struct cphy *phy)
+{
+	return 0;
+}
+
+static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
+				   int *speed, int *duplex, int *fc)
+{
+	if (link_ok) {
+		unsigned int status;
+		int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
+
+		/*
+		 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+		 * once more to get the current link state.
+		 */
+		if (!err && !(status & BMSR_LSTATUS))
+			err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
+					&status);
+		if (err)
+			return err;
+		*link_ok = !!(status & BMSR_LSTATUS);
+	}
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	return 0;
+}
+
+static struct cphy_ops ael1002_ops = {
+	.reset = ael1002_reset,
+	.intr_enable = ael1002_intr_noop,
+	.intr_disable = ael1002_intr_noop,
+	.intr_clear = ael1002_intr_noop,
+	.intr_handler = ael1002_intr_noop,
+	.get_link_status = ael100x_get_link_status,
+	.power_down = ael1002_power_down,
+};
+
+void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+			 int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
+	ael100x_txon(phy);
+}
+
+static int ael1006_reset(struct cphy *phy, int wait)
+{
+	return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
+}
+
+static int ael1006_intr_enable(struct cphy *phy)
+{
+	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
+}
+
+static int ael1006_intr_disable(struct cphy *phy)
+{
+	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
+}
+
+static int ael1006_intr_clear(struct cphy *phy)
+{
+	u32 val;
+
+	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
+}
+
+static int ael1006_intr_handler(struct cphy *phy)
+{
+	unsigned int status;
+	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
+
+	if (err)
+		return err;
+	return (status & 1) ? cphy_cause_link_change : 0;
+}
+
+static int ael1006_power_down(struct cphy *phy, int enable)
+{
+	return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
+				   BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
+}
+
+static struct cphy_ops ael1006_ops = {
+	.reset = ael1006_reset,
+	.intr_enable = ael1006_intr_enable,
+	.intr_disable = ael1006_intr_disable,
+	.intr_clear = ael1006_intr_clear,
+	.intr_handler = ael1006_intr_handler,
+	.get_link_status = ael100x_get_link_status,
+	.power_down = ael1006_power_down,
+};
+
+void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+			 int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
+	ael100x_txon(phy);
+}
+
+static struct cphy_ops qt2045_ops = {
+	.reset = ael1006_reset,
+	.intr_enable = ael1006_intr_enable,
+	.intr_disable = ael1006_intr_disable,
+	.intr_clear = ael1006_intr_clear,
+	.intr_handler = ael1006_intr_handler,
+	.get_link_status = ael100x_get_link_status,
+	.power_down = ael1006_power_down,
+};
+
+void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	unsigned int stat;
+
+	cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
+
+	/*
+	 * Some cards where the PHY is supposed to be at address 0 actually
+	 * have it at 1.
+	 */
+	if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
+	    stat == 0xffff)
+		phy->addr = 1;
+}
+
+static int xaui_direct_reset(struct cphy *phy, int wait)
+{
+	return 0;
+}
+
+static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
+				       int *speed, int *duplex, int *fc)
+{
+	if (link_ok) {
+		unsigned int status;
+
+		status = t3_read_reg(phy->adapter,
+				     XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
+		*link_ok = !(status & F_LOWSIG0);
+	}
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	return 0;
+}
+
+static int xaui_direct_power_down(struct cphy *phy, int enable)
+{
+	return 0;
+}
+
+static struct cphy_ops xaui_direct_ops = {
+	.reset = xaui_direct_reset,
+	.intr_enable = ael1002_intr_noop,
+	.intr_disable = ael1002_intr_noop,
+	.intr_clear = ael1002_intr_noop,
+	.intr_handler = ael1002_intr_noop,
+	.get_link_status = xaui_direct_get_link_status,
+	.power_down = xaui_direct_power_down,
+};
+
+void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+			     int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
+}

+ 729 - 0
drivers/net/cxgb3/common.h

@@ -0,0 +1,729 @@
+/*
+ * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __CHELSIO_COMMON_H
+#define __CHELSIO_COMMON_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include "version.h"
+
+#define CH_ERR(adap, fmt, ...)   dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_WARN(adap, fmt, ...)  dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) \
+	dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
+
+/*
+ * More powerful macro that selectively prints messages based on msg_enable.
+ * For info and debugging messages.
+ */
+#define CH_MSG(adapter, level, category, fmt, ...) do { \
+	if ((adapter)->msg_enable & NETIF_MSG_##category) \
+		dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
+			   ## __VA_ARGS__); \
+} while (0)
+
+#ifdef DEBUG
+# define CH_DBG(adapter, category, fmt, ...) \
+	CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
+#else
+# define CH_DBG(adapter, category, fmt, ...)
+#endif
+
+/* Additional NETIF_MSG_* categories */
+#define NETIF_MSG_MMIO 0x8000000
+
+struct t3_rx_mode {
+	struct net_device *dev;
+	struct dev_mc_list *mclist;
+	unsigned int idx;
+};
+
+static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
+				struct dev_mc_list *mclist)
+{
+	p->dev = dev;
+	p->mclist = mclist;
+	p->idx = 0;
+}
+
+static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
+{
+	u8 *addr = NULL;
+
+	if (rm->mclist && rm->idx < rm->dev->mc_count) {
+		addr = rm->mclist->dmi_addr;
+		rm->mclist = rm->mclist->next;
+		rm->idx++;
+	}
+	return addr;
+}
+
+enum {
+	MAX_NPORTS = 2,		/* max # of ports */
+	MAX_FRAME_SIZE = 10240,	/* max MAC frame size, including header + FCS */
+	EEPROMSIZE = 8192,	/* Serial EEPROM size */
+	RSS_TABLE_SIZE = 64,	/* size of RSS lookup and mapping tables */
+	TCB_SIZE = 128,		/* TCB size */
+	NMTUS = 16,		/* size of MTU table */
+	NCCTRL_WIN = 32,	/* # of congestion control windows */
+};
+
+#define MAX_RX_COALESCING_LEN 16224U
+
+enum {
+	PAUSE_RX = 1 << 0,
+	PAUSE_TX = 1 << 1,
+	PAUSE_AUTONEG = 1 << 2
+};
+
+enum {
+	SUPPORTED_OFFLOAD = 1 << 24,
+	SUPPORTED_IRQ = 1 << 25
+};
+
+enum {				/* adapter interrupt-maintained statistics */
+	STAT_ULP_CH0_PBL_OOB,
+	STAT_ULP_CH1_PBL_OOB,
+	STAT_PCI_CORR_ECC,
+
+	IRQ_NUM_STATS		/* keep last */
+};
+
+enum {
+	SGE_QSETS = 8,		/* # of SGE Tx/Rx/RspQ sets */
+	SGE_RXQ_PER_SET = 2,	/* # of Rx queues per set */
+	SGE_TXQ_PER_SET = 3	/* # of Tx queues per set */
+};
+
+enum sge_context_type {		/* SGE egress context types */
+	SGE_CNTXT_RDMA = 0,
+	SGE_CNTXT_ETH = 2,
+	SGE_CNTXT_OFLD = 4,
+	SGE_CNTXT_CTRL = 5
+};
+
+enum {
+	AN_PKT_SIZE = 32,	/* async notification packet size */
+	IMMED_PKT_SIZE = 48	/* packet size for immediate data */
+};
+
+struct sg_ent {			/* SGE scatter/gather entry */
+	u32 len[2];
+	u64 addr[2];
+};
+
+#ifndef SGE_NUM_GENBITS
+/* Must be 1 or 2 */
+# define SGE_NUM_GENBITS 2
+#endif
+
+#define TX_DESC_FLITS 16U
+#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
+
+struct cphy;
+struct adapter;
+
+struct mdio_ops {
+	int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
+		    int reg_addr, unsigned int *val);
+	int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
+		     int reg_addr, unsigned int val);
+};
+
+struct adapter_info {
+	unsigned char nports;	/* # of ports */
+	unsigned char phy_base_addr;	/* MDIO PHY base address */
+	unsigned char mdien;
+	unsigned char mdiinv;
+	unsigned int gpio_out;	/* GPIO output settings */
+	unsigned int gpio_intr;	/* GPIO IRQ enable mask */
+	unsigned long caps;	/* adapter capabilities */
+	const struct mdio_ops *mdio_ops;	/* MDIO operations */
+	const char *desc;	/* product description */
+};
+
+struct port_type_info {
+	void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
+			 int phy_addr, const struct mdio_ops *ops);
+	unsigned int caps;
+	const char *desc;
+};
+
+struct mc5_stats {
+	unsigned long parity_err;
+	unsigned long active_rgn_full;
+	unsigned long nfa_srch_err;
+	unsigned long unknown_cmd;
+	unsigned long reqq_parity_err;
+	unsigned long dispq_parity_err;
+	unsigned long del_act_empty;
+};
+
+struct mc7_stats {
+	unsigned long corr_err;
+	unsigned long uncorr_err;
+	unsigned long parity_err;
+	unsigned long addr_err;
+};
+
+struct mac_stats {
+	u64 tx_octets;		/* total # of octets in good frames */
+	u64 tx_octets_bad;	/* total # of octets in error frames */
+	u64 tx_frames;		/* all good frames */
+	u64 tx_mcast_frames;	/* good multicast frames */
+	u64 tx_bcast_frames;	/* good broadcast frames */
+	u64 tx_pause;		/* # of transmitted pause frames */
+	u64 tx_deferred;	/* frames with deferred transmissions */
+	u64 tx_late_collisions;	/* # of late collisions */
+	u64 tx_total_collisions;	/* # of total collisions */
+	u64 tx_excess_collisions;	/* frame errors from excessive collissions */
+	u64 tx_underrun;	/* # of Tx FIFO underruns */
+	u64 tx_len_errs;	/* # of Tx length errors */
+	u64 tx_mac_internal_errs;	/* # of internal MAC errors on Tx */
+	u64 tx_excess_deferral;	/* # of frames with excessive deferral */
+	u64 tx_fcs_errs;	/* # of frames with bad FCS */
+
+	u64 tx_frames_64;	/* # of Tx frames in a particular range */
+	u64 tx_frames_65_127;
+	u64 tx_frames_128_255;
+	u64 tx_frames_256_511;
+	u64 tx_frames_512_1023;
+	u64 tx_frames_1024_1518;
+	u64 tx_frames_1519_max;
+
+	u64 rx_octets;		/* total # of octets in good frames */
+	u64 rx_octets_bad;	/* total # of octets in error frames */
+	u64 rx_frames;		/* all good frames */
+	u64 rx_mcast_frames;	/* good multicast frames */
+	u64 rx_bcast_frames;	/* good broadcast frames */
+	u64 rx_pause;		/* # of received pause frames */
+	u64 rx_fcs_errs;	/* # of received frames with bad FCS */
+	u64 rx_align_errs;	/* alignment errors */
+	u64 rx_symbol_errs;	/* symbol errors */
+	u64 rx_data_errs;	/* data errors */
+	u64 rx_sequence_errs;	/* sequence errors */
+	u64 rx_runt;		/* # of runt frames */
+	u64 rx_jabber;		/* # of jabber frames */
+	u64 rx_short;		/* # of short frames */
+	u64 rx_too_long;	/* # of oversized frames */
+	u64 rx_mac_internal_errs;	/* # of internal MAC errors on Rx */
+
+	u64 rx_frames_64;	/* # of Rx frames in a particular range */
+	u64 rx_frames_65_127;
+	u64 rx_frames_128_255;
+	u64 rx_frames_256_511;
+	u64 rx_frames_512_1023;
+	u64 rx_frames_1024_1518;
+	u64 rx_frames_1519_max;
+
+	u64 rx_cong_drops;	/* # of Rx drops due to SGE congestion */
+
+	unsigned long tx_fifo_parity_err;
+	unsigned long rx_fifo_parity_err;
+	unsigned long tx_fifo_urun;
+	unsigned long rx_fifo_ovfl;
+	unsigned long serdes_signal_loss;
+	unsigned long xaui_pcs_ctc_err;
+	unsigned long xaui_pcs_align_change;
+};
+
+struct tp_mib_stats {
+	u32 ipInReceive_hi;
+	u32 ipInReceive_lo;
+	u32 ipInHdrErrors_hi;
+	u32 ipInHdrErrors_lo;
+	u32 ipInAddrErrors_hi;
+	u32 ipInAddrErrors_lo;
+	u32 ipInUnknownProtos_hi;
+	u32 ipInUnknownProtos_lo;
+	u32 ipInDiscards_hi;
+	u32 ipInDiscards_lo;
+	u32 ipInDelivers_hi;
+	u32 ipInDelivers_lo;
+	u32 ipOutRequests_hi;
+	u32 ipOutRequests_lo;
+	u32 ipOutDiscards_hi;
+	u32 ipOutDiscards_lo;
+	u32 ipOutNoRoutes_hi;
+	u32 ipOutNoRoutes_lo;
+	u32 ipReasmTimeout;
+	u32 ipReasmReqds;
+	u32 ipReasmOKs;
+	u32 ipReasmFails;
+
+	u32 reserved[8];
+
+	u32 tcpActiveOpens;
+	u32 tcpPassiveOpens;
+	u32 tcpAttemptFails;
+	u32 tcpEstabResets;
+	u32 tcpOutRsts;
+	u32 tcpCurrEstab;
+	u32 tcpInSegs_hi;
+	u32 tcpInSegs_lo;
+	u32 tcpOutSegs_hi;
+	u32 tcpOutSegs_lo;
+	u32 tcpRetransSeg_hi;
+	u32 tcpRetransSeg_lo;
+	u32 tcpInErrs_hi;
+	u32 tcpInErrs_lo;
+	u32 tcpRtoMin;
+	u32 tcpRtoMax;
+};
+
+struct tp_params {
+	unsigned int nchan;	/* # of channels */
+	unsigned int pmrx_size;	/* total PMRX capacity */
+	unsigned int pmtx_size;	/* total PMTX capacity */
+	unsigned int cm_size;	/* total CM capacity */
+	unsigned int chan_rx_size;	/* per channel Rx size */
+	unsigned int chan_tx_size;	/* per channel Tx size */
+	unsigned int rx_pg_size;	/* Rx page size */
+	unsigned int tx_pg_size;	/* Tx page size */
+	unsigned int rx_num_pgs;	/* # of Rx pages */
+	unsigned int tx_num_pgs;	/* # of Tx pages */
+	unsigned int ntimer_qs;	/* # of timer queues */
+};
+
+struct qset_params {		/* SGE queue set parameters */
+	unsigned int polling;	/* polling/interrupt service for rspq */
+	unsigned int coalesce_usecs;	/* irq coalescing timer */
+	unsigned int rspq_size;	/* # of entries in response queue */
+	unsigned int fl_size;	/* # of entries in regular free list */
+	unsigned int jumbo_size;	/* # of entries in jumbo free list */
+	unsigned int txq_size[SGE_TXQ_PER_SET];	/* Tx queue sizes */
+	unsigned int cong_thres;	/* FL congestion threshold */
+};
+
+struct sge_params {
+	unsigned int max_pkt_size;	/* max offload pkt size */
+	struct qset_params qset[SGE_QSETS];
+};
+
+struct mc5_params {
+	unsigned int mode;	/* selects MC5 width */
+	unsigned int nservers;	/* size of server region */
+	unsigned int nfilters;	/* size of filter region */
+	unsigned int nroutes;	/* size of routing region */
+};
+
+/* Default MC5 region sizes */
+enum {
+	DEFAULT_NSERVERS = 512,
+	DEFAULT_NFILTERS = 128
+};
+
+/* MC5 modes, these must be non-0 */
+enum {
+	MC5_MODE_144_BIT = 1,
+	MC5_MODE_72_BIT = 2
+};
+
+struct vpd_params {
+	unsigned int cclk;
+	unsigned int mclk;
+	unsigned int uclk;
+	unsigned int mdc;
+	unsigned int mem_timing;
+	u8 eth_base[6];
+	u8 port_type[MAX_NPORTS];
+	unsigned short xauicfg[2];
+};
+
+struct pci_params {
+	unsigned int vpd_cap_addr;
+	unsigned int pcie_cap_addr;
+	unsigned short speed;
+	unsigned char width;
+	unsigned char variant;
+};
+
+enum {
+	PCI_VARIANT_PCI,
+	PCI_VARIANT_PCIX_MODE1_PARITY,
+	PCI_VARIANT_PCIX_MODE1_ECC,
+	PCI_VARIANT_PCIX_266_MODE2,
+	PCI_VARIANT_PCIE
+};
+
+struct adapter_params {
+	struct sge_params sge;
+	struct mc5_params mc5;
+	struct tp_params tp;
+	struct vpd_params vpd;
+	struct pci_params pci;
+
+	const struct adapter_info *info;
+
+	unsigned short mtus[NMTUS];
+	unsigned short a_wnd[NCCTRL_WIN];
+	unsigned short b_wnd[NCCTRL_WIN];
+
+	unsigned int nports;	/* # of ethernet ports */
+	unsigned int stats_update_period;	/* MAC stats accumulation period */
+	unsigned int linkpoll_period;	/* link poll period in 0.1s */
+	unsigned int rev;	/* chip revision */
+};
+
+struct trace_params {
+	u32 sip;
+	u32 sip_mask;
+	u32 dip;
+	u32 dip_mask;
+	u16 sport;
+	u16 sport_mask;
+	u16 dport;
+	u16 dport_mask;
+	u32 vlan:12;
+	u32 vlan_mask:12;
+	u32 intf:4;
+	u32 intf_mask:4;
+	u8 proto;
+	u8 proto_mask;
+};
+
+struct link_config {
+	unsigned int supported;	/* link capabilities */
+	unsigned int advertising;	/* advertised capabilities */
+	unsigned short requested_speed;	/* speed user has requested */
+	unsigned short speed;	/* actual link speed */
+	unsigned char requested_duplex;	/* duplex user has requested */
+	unsigned char duplex;	/* actual link duplex */
+	unsigned char requested_fc;	/* flow control user has requested */
+	unsigned char fc;	/* actual link flow control */
+	unsigned char autoneg;	/* autonegotiating? */
+	unsigned int link_ok;	/* link up? */
+};
+
+#define SPEED_INVALID   0xffff
+#define DUPLEX_INVALID  0xff
+
+struct mc5 {
+	struct adapter *adapter;
+	unsigned int tcam_size;
+	unsigned char part_type;
+	unsigned char parity_enabled;
+	unsigned char mode;
+	struct mc5_stats stats;
+};
+
+static inline unsigned int t3_mc5_size(const struct mc5 *p)
+{
+	return p->tcam_size;
+}
+
+struct mc7 {
+	struct adapter *adapter;	/* backpointer to adapter */
+	unsigned int size;	/* memory size in bytes */
+	unsigned int width;	/* MC7 interface width */
+	unsigned int offset;	/* register address offset for MC7 instance */
+	const char *name;	/* name of MC7 instance */
+	struct mc7_stats stats;	/* MC7 statistics */
+};
+
+static inline unsigned int t3_mc7_size(const struct mc7 *p)
+{
+	return p->size;
+}
+
+struct cmac {
+	struct adapter *adapter;
+	unsigned int offset;
+	unsigned int nucast;	/* # of address filters for unicast MACs */
+	struct mac_stats stats;
+};
+
+enum {
+	MAC_DIRECTION_RX = 1,
+	MAC_DIRECTION_TX = 2,
+	MAC_RXFIFO_SIZE = 32768
+};
+
+/* IEEE 802.3ae specified MDIO devices */
+enum {
+	MDIO_DEV_PMA_PMD = 1,
+	MDIO_DEV_WIS = 2,
+	MDIO_DEV_PCS = 3,
+	MDIO_DEV_XGXS = 4
+};
+
+/* PHY loopback direction */
+enum {
+	PHY_LOOPBACK_TX = 1,
+	PHY_LOOPBACK_RX = 2
+};
+
+/* PHY interrupt types */
+enum {
+	cphy_cause_link_change = 1,
+	cphy_cause_fifo_error = 2
+};
+
+/* PHY operations */
+struct cphy_ops {
+	void (*destroy)(struct cphy *phy);
+	int (*reset)(struct cphy *phy, int wait);
+
+	int (*intr_enable)(struct cphy *phy);
+	int (*intr_disable)(struct cphy *phy);
+	int (*intr_clear)(struct cphy *phy);
+	int (*intr_handler)(struct cphy *phy);
+
+	int (*autoneg_enable)(struct cphy *phy);
+	int (*autoneg_restart)(struct cphy *phy);
+
+	int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+	int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
+	int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+	int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+			       int *duplex, int *fc);
+	int (*power_down)(struct cphy *phy, int enable);
+};
+
+/* A PHY instance */
+struct cphy {
+	int addr;		/* PHY address */
+	struct adapter *adapter;	/* associated adapter */
+	unsigned long fifo_errors;	/* FIFO over/under-flows */
+	const struct cphy_ops *ops;	/* PHY operations */
+	int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
+			 int reg_addr, unsigned int *val);
+	int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
+			  int reg_addr, unsigned int val);
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int mdio_read(struct cphy *phy, int mmd, int reg,
+			    unsigned int *valp)
+{
+	return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
+}
+
+static inline int mdio_write(struct cphy *phy, int mmd, int reg,
+			     unsigned int val)
+{
+	return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
+			     int phy_addr, struct cphy_ops *phy_ops,
+			     const struct mdio_ops *mdio_ops)
+{
+	phy->adapter = adapter;
+	phy->addr = phy_addr;
+	phy->ops = phy_ops;
+	if (mdio_ops) {
+		phy->mdio_read = mdio_ops->read;
+		phy->mdio_write = mdio_ops->write;
+	}
+}
+
+/* Accumulate MAC statistics every 180 seconds.  For 1G we multiply by 10. */
+#define MAC_STATS_ACCUM_SECS 180
+
+#define XGM_REG(reg_addr, idx) \
+	((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
+
+struct addr_val_pair {
+	unsigned int reg_addr;
+	unsigned int val;
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define adapter_info(adap) ((adap)->params.info)
+
+static inline int uses_xaui(const struct adapter *adap)
+{
+	return adapter_info(adap)->caps & SUPPORTED_AUI;
+}
+
+static inline int is_10G(const struct adapter *adap)
+{
+	return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
+}
+
+static inline int is_offload(const struct adapter *adap)
+{
+	return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+	return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int is_pcie(const struct adapter *adap)
+{
+	return adap->params.pci.variant == PCI_VARIANT_PCIE;
+}
+
+void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+		      u32 val);
+void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
+		   int n, unsigned int offset);
+int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+			int polarity, int attempts, int delay, u32 *valp);
+static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+				  int polarity, int attempts, int delay)
+{
+	return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+				   delay, NULL);
+}
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+			unsigned int set);
+int t3_phy_reset(struct cphy *phy, int mmd, int wait);
+int t3_phy_advertise(struct cphy *phy, unsigned int advert);
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
+
+void t3_intr_enable(struct adapter *adapter);
+void t3_intr_disable(struct adapter *adapter);
+void t3_intr_clear(struct adapter *adapter);
+void t3_port_intr_enable(struct adapter *adapter, int idx);
+void t3_port_intr_disable(struct adapter *adapter, int idx);
+void t3_port_intr_clear(struct adapter *adapter, int idx);
+int t3_slow_intr_handler(struct adapter *adapter);
+int t3_phy_intr_handler(struct adapter *adapter);
+
+void t3_link_changed(struct adapter *adapter, int port_id);
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
+int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
+int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
+int t3_seeprom_wp(struct adapter *adapter, int enable);
+int t3_read_flash(struct adapter *adapter, unsigned int addr,
+		  unsigned int nwords, u32 *data, int byte_oriented);
+int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
+int t3_get_fw_version(struct adapter *adapter, u32 *vers);
+int t3_check_fw_version(struct adapter *adapter);
+int t3_init_hw(struct adapter *adapter, u32 fw_params);
+void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
+void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+		    int reset);
+void t3_led_ready(struct adapter *adapter);
+void t3_fatal_err(struct adapter *adapter);
+void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
+void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
+		   const u8 * cpus, const u16 *rspq);
+int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
+int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
+int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
+			unsigned int n, unsigned int *valp);
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+		   u64 *buf);
+
+int t3_mac_reset(struct cmac *mac);
+void t3b_pcs_reset(struct cmac *mac);
+int t3_mac_enable(struct cmac *mac, int which);
+int t3_mac_disable(struct cmac *mac, int which);
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
+int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+int t3_mac_set_num_ucast(struct cmac *mac, int n);
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
+
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+		unsigned int nroutes);
+void t3_mc5_intr_handler(struct mc5 *mc5);
+int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
+		      u32 *buf);
+
+int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
+void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
+void t3_tp_set_offload_mode(struct adapter *adap, int enable);
+void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
+void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
+		  unsigned short alpha[NCCTRL_WIN],
+		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
+void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
+void t3_get_cong_cntl_tab(struct adapter *adap,
+			  unsigned short incr[NMTUS][NCCTRL_WIN]);
+void t3_config_trace_filter(struct adapter *adapter,
+			    const struct trace_params *tp, int filter_index,
+			    int invert, int enable);
+int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
+
+void t3_sge_prep(struct adapter *adap, struct sge_params *p);
+void t3_sge_init(struct adapter *adap, struct sge_params *p);
+int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+		       enum sge_context_type type, int respq, u64 base_addr,
+		       unsigned int size, unsigned int token, int gen,
+		       unsigned int cidx);
+int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
+			int gts_enable, u64 base_addr, unsigned int size,
+			unsigned int esize, unsigned int cong_thres, int gen,
+			unsigned int cidx);
+int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
+			 int irq_vec_idx, u64 base_addr, unsigned int size,
+			 unsigned int fl_thres, int gen, unsigned int cidx);
+int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
+			unsigned int size, int rspq, int ovfl_mode,
+			unsigned int credits, unsigned int credit_thres);
+int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
+int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
+int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
+int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
+int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
+int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
+int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
+int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
+int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+		      unsigned int credits);
+
+void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+			 int phy_addr, const struct mdio_ops *mdio_ops);
+void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+			 int phy_addr, const struct mdio_ops *mdio_ops);
+void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+			 int phy_addr, const struct mdio_ops *mdio_ops);
+void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+			const struct mdio_ops *mdio_ops);
+void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+			     int phy_addr, const struct mdio_ops *mdio_ops);
+#endif				/* __CHELSIO_COMMON_H */

+ 164 - 0
drivers/net/cxgb3/cxgb3_ctl_defs.h

@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
+#define _CXGB3_OFFLOAD_CTL_DEFS_H
+
+enum {
+	GET_MAX_OUTSTANDING_WR,
+	GET_TX_MAX_CHUNK,
+	GET_TID_RANGE,
+	GET_STID_RANGE,
+	GET_RTBL_RANGE,
+	GET_L2T_CAPACITY,
+	GET_MTUS,
+	GET_WR_LEN,
+	GET_IFF_FROM_MAC,
+	GET_DDP_PARAMS,
+	GET_PORTS,
+
+	ULP_ISCSI_GET_PARAMS,
+	ULP_ISCSI_SET_PARAMS,
+
+	RDMA_GET_PARAMS,
+	RDMA_CQ_OP,
+	RDMA_CQ_SETUP,
+	RDMA_CQ_DISABLE,
+	RDMA_CTRL_QP_SETUP,
+	RDMA_GET_MEM,
+};
+
+/*
+ * Structure used to describe a TID range.  Valid TIDs are [base, base+num).
+ */
+struct tid_range {
+	unsigned int base;	/* first TID */
+	unsigned int num;	/* number of TIDs in range */
+};
+
+/*
+ * Structure used to request the size and contents of the MTU table.
+ */
+struct mtutab {
+	unsigned int size;	/* # of entries in the MTU table */
+	const unsigned short *mtus;	/* the MTU table values */
+};
+
+struct net_device;
+
+/*
+ * Structure used to request the adapter net_device owning a given MAC address.
+ */
+struct iff_mac {
+	struct net_device *dev;	/* the net_device */
+	const unsigned char *mac_addr;	/* MAC address to lookup */
+	u16 vlan_tag;
+};
+
+struct pci_dev;
+
+/*
+ * Structure used to request the TCP DDP parameters.
+ */
+struct ddp_params {
+	unsigned int llimit;	/* TDDP region start address */
+	unsigned int ulimit;	/* TDDP region end address */
+	unsigned int tag_mask;	/* TDDP tag mask */
+	struct pci_dev *pdev;
+};
+
+struct adap_ports {
+	unsigned int nports;	/* number of ports on this adapter */
+	struct net_device *lldevs[2];
+};
+
+/*
+ * Structure used to return information to the iscsi layer.
+ */
+struct ulp_iscsi_info {
+	unsigned int offset;
+	unsigned int llimit;
+	unsigned int ulimit;
+	unsigned int tagmask;
+	unsigned int pgsz3;
+	unsigned int pgsz2;
+	unsigned int pgsz1;
+	unsigned int pgsz0;
+	unsigned int max_rxsz;
+	unsigned int max_txsz;
+	struct pci_dev *pdev;
+};
+
+/*
+ * Structure used to return information to the RDMA layer.
+ */
+struct rdma_info {
+	unsigned int tpt_base;	/* TPT base address */
+	unsigned int tpt_top;	/* TPT last entry address */
+	unsigned int pbl_base;	/* PBL base address */
+	unsigned int pbl_top;	/* PBL last entry address */
+	unsigned int rqt_base;	/* RQT base address */
+	unsigned int rqt_top;	/* RQT last entry address */
+	unsigned int udbell_len;	/* user doorbell region length */
+	unsigned long udbell_physbase;	/* user doorbell physical start addr */
+	void __iomem *kdb_addr;	/* kernel doorbell register address */
+	struct pci_dev *pdev;	/* associated PCI device */
+};
+
+/*
+ * Structure used to request an operation on an RDMA completion queue.
+ */
+struct rdma_cq_op {
+	unsigned int id;
+	unsigned int op;
+	unsigned int credits;
+};
+
+/*
+ * Structure used to setup RDMA completion queues.
+ */
+struct rdma_cq_setup {
+	unsigned int id;
+	unsigned long long base_addr;
+	unsigned int size;
+	unsigned int credits;
+	unsigned int credit_thres;
+	unsigned int ovfl_mode;
+};
+
+/*
+ * Structure used to setup the RDMA control egress context.
+ */
+struct rdma_ctrlqp_setup {
+	unsigned long long base_addr;
+	unsigned int size;
+};
+#endif				/* _CXGB3_OFFLOAD_CTL_DEFS_H */

+ 99 - 0
drivers/net/cxgb3/cxgb3_defs.h

@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CHELSIO_DEFS_H
+#define _CHELSIO_DEFS_H
+
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include "t3cdev.h"
+
+#include "cxgb3_offload.h"
+
+#define VALIDATE_TID 1
+
+void *cxgb_alloc_mem(unsigned long size);
+void cxgb_free_mem(void *addr);
+void cxgb_neigh_update(struct neighbour *neigh);
+void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
+
+/*
+ * Map an ATID or STID to their entries in the corresponding TID tables.
+ */
+static inline union active_open_entry *atid2entry(const struct tid_info *t,
+						  unsigned int atid)
+{
+	return &t->atid_tab[atid - t->atid_base];
+}
+
+static inline union listen_entry *stid2entry(const struct tid_info *t,
+					     unsigned int stid)
+{
+	return &t->stid_tab[stid - t->stid_base];
+}
+
+/*
+ * Find the connection corresponding to a TID.
+ */
+static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
+					       unsigned int tid)
+{
+	return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
+}
+
+/*
+ * Find the connection corresponding to a server TID.
+ */
+static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
+						unsigned int tid)
+{
+	if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
+		return NULL;
+	return &(stid2entry(t, tid)->t3c_tid);
+}
+
+/*
+ * Find the connection corresponding to an active-open TID.
+ */
+static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
+						unsigned int tid)
+{
+	if (tid < t->atid_base || tid >= t->atid_base + t->natids)
+		return NULL;
+	return &(atid2entry(t, tid)->t3c_tid);
+}
+
+int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
+int attach_t3cdev(struct t3cdev *dev);
+void detach_t3cdev(struct t3cdev *dev);
+#endif

+ 185 - 0
drivers/net/cxgb3/cxgb3_ioctl.h

@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __CHIOCTL_H__
+#define __CHIOCTL_H__
+
+/*
+ * Ioctl commands specific to this driver.
+ */
+enum {
+	CHELSIO_SETREG = 1024,
+	CHELSIO_GETREG,
+	CHELSIO_SETTPI,
+	CHELSIO_GETTPI,
+	CHELSIO_GETMTUTAB,
+	CHELSIO_SETMTUTAB,
+	CHELSIO_GETMTU,
+	CHELSIO_SET_PM,
+	CHELSIO_GET_PM,
+	CHELSIO_GET_TCAM,
+	CHELSIO_SET_TCAM,
+	CHELSIO_GET_TCB,
+	CHELSIO_GET_MEM,
+	CHELSIO_LOAD_FW,
+	CHELSIO_GET_PROTO,
+	CHELSIO_SET_PROTO,
+	CHELSIO_SET_TRACE_FILTER,
+	CHELSIO_SET_QSET_PARAMS,
+	CHELSIO_GET_QSET_PARAMS,
+	CHELSIO_SET_QSET_NUM,
+	CHELSIO_GET_QSET_NUM,
+	CHELSIO_SET_PKTSCHED,
+};
+
+struct ch_reg {
+	uint32_t cmd;
+	uint32_t addr;
+	uint32_t val;
+};
+
+struct ch_cntxt {
+	uint32_t cmd;
+	uint32_t cntxt_type;
+	uint32_t cntxt_id;
+	uint32_t data[4];
+};
+
+/* context types */
+enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
+
+struct ch_desc {
+	uint32_t cmd;
+	uint32_t queue_num;
+	uint32_t idx;
+	uint32_t size;
+	uint8_t data[128];
+};
+
+struct ch_mem_range {
+	uint32_t cmd;
+	uint32_t mem_id;
+	uint32_t addr;
+	uint32_t len;
+	uint32_t version;
+	uint8_t buf[0];
+};
+
+struct ch_qset_params {
+	uint32_t cmd;
+	uint32_t qset_idx;
+	int32_t txq_size[3];
+	int32_t rspq_size;
+	int32_t fl_size[2];
+	int32_t intr_lat;
+	int32_t polling;
+	int32_t cong_thres;
+};
+
+struct ch_pktsched_params {
+	uint32_t cmd;
+	uint8_t sched;
+	uint8_t idx;
+	uint8_t min;
+	uint8_t max;
+	uint8_t binding;
+};
+
+#ifndef TCB_SIZE
+# define TCB_SIZE   128
+#endif
+
+/* TCB size in 32-bit words */
+#define TCB_WORDS (TCB_SIZE / 4)
+
+enum { MEM_CM, MEM_PMRX, MEM_PMTX };	/* ch_mem_range.mem_id values */
+
+struct ch_mtus {
+	uint32_t cmd;
+	uint32_t nmtus;
+	uint16_t mtus[NMTUS];
+};
+
+struct ch_pm {
+	uint32_t cmd;
+	uint32_t tx_pg_sz;
+	uint32_t tx_num_pg;
+	uint32_t rx_pg_sz;
+	uint32_t rx_num_pg;
+	uint32_t pm_total;
+};
+
+struct ch_tcam {
+	uint32_t cmd;
+	uint32_t tcam_size;
+	uint32_t nservers;
+	uint32_t nroutes;
+	uint32_t nfilters;
+};
+
+struct ch_tcb {
+	uint32_t cmd;
+	uint32_t tcb_index;
+	uint32_t tcb_data[TCB_WORDS];
+};
+
+struct ch_tcam_word {
+	uint32_t cmd;
+	uint32_t addr;
+	uint32_t buf[3];
+};
+
+struct ch_trace {
+	uint32_t cmd;
+	uint32_t sip;
+	uint32_t sip_mask;
+	uint32_t dip;
+	uint32_t dip_mask;
+	uint16_t sport;
+	uint16_t sport_mask;
+	uint16_t dport;
+	uint16_t dport_mask;
+	uint32_t vlan:12;
+	uint32_t vlan_mask:12;
+	uint32_t intf:4;
+	uint32_t intf_mask:4;
+	uint8_t proto;
+	uint8_t proto_mask;
+	uint8_t invert_match:1;
+	uint8_t config_tx:1;
+	uint8_t config_rx:1;
+	uint8_t trace_tx:1;
+	uint8_t trace_rx:1;
+};
+
+#define SIOCCHIOCTL SIOCDEVPRIVATE
+
+#endif

+ 2515 - 0
drivers/net/cxgb3/cxgb3_main.c

@@ -0,0 +1,2515 @@
+/*
+ * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+#include <linux/sockios.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
+#include <linux/rtnetlink.h>
+#include <asm/uaccess.h>
+
+#include "common.h"
+#include "cxgb3_ioctl.h"
+#include "regs.h"
+#include "cxgb3_offload.h"
+#include "version.h"
+
+#include "cxgb3_ctl_defs.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+enum {
+	MAX_TXQ_ENTRIES = 16384,
+	MAX_CTRL_TXQ_ENTRIES = 1024,
+	MAX_RSPQ_ENTRIES = 16384,
+	MAX_RX_BUFFERS = 16384,
+	MAX_RX_JUMBO_BUFFERS = 16384,
+	MIN_TXQ_ENTRIES = 4,
+	MIN_CTRL_TXQ_ENTRIES = 4,
+	MIN_RSPQ_ENTRIES = 32,
+	MIN_FL_ENTRIES = 32
+};
+
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+
+#define CH_DEVICE(devid, ssid, idx) \
+	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
+
+static const struct pci_device_id cxgb3_pci_tbl[] = {
+	CH_DEVICE(0x20, 1, 0),	/* PE9000 */
+	CH_DEVICE(0x21, 1, 1),	/* T302E */
+	CH_DEVICE(0x22, 1, 2),	/* T310E */
+	CH_DEVICE(0x23, 1, 3),	/* T320X */
+	CH_DEVICE(0x24, 1, 1),	/* T302X */
+	CH_DEVICE(0x25, 1, 3),	/* T320E */
+	CH_DEVICE(0x26, 1, 2),	/* T310X */
+	CH_DEVICE(0x30, 1, 2),	/* T3B10 */
+	CH_DEVICE(0x31, 1, 3),	/* T3B20 */
+	CH_DEVICE(0x32, 1, 1),	/* T3B02 */
+	{0,}
+};
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1: only consider MSI and pin interrupts
+ * msi = 0: force pin interrupts
+ */
+static int msi = 2;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
+
+/*
+ * The driver enables offload as a default.
+ * To disable it, use ofld_disable = 1.
+ */
+
+static int ofld_disable = 0;
+
+module_param(ofld_disable, int, 0644);
+MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
+
+/*
+ * We have work elements that we need to cancel when an interface is taken
+ * down.  Normally the work elements would be executed by keventd but that
+ * can deadlock because of linkwatch.  If our close method takes the rtnl
+ * lock and linkwatch is ahead of our work elements in keventd, linkwatch
+ * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
+ * for our work to complete.  Get our own work queue to solve this.
+ */
+static struct workqueue_struct *cxgb3_wq;
+
+/**
+ *	link_report - show link status and link speed/duplex
+ *	@p: the port whose settings are to be reported
+ *
+ *	Shows the link status, speed, and duplex of a port.
+ */
+static void link_report(struct net_device *dev)
+{
+	if (!netif_carrier_ok(dev))
+		printk(KERN_INFO "%s: link down\n", dev->name);
+	else {
+		const char *s = "10Mbps";
+		const struct port_info *p = netdev_priv(dev);
+
+		switch (p->link_config.speed) {
+		case SPEED_10000:
+			s = "10Gbps";
+			break;
+		case SPEED_1000:
+			s = "1000Mbps";
+			break;
+		case SPEED_100:
+			s = "100Mbps";
+			break;
+		}
+
+		printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
+		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+	}
+}
+
+/**
+ *	t3_os_link_changed - handle link status changes
+ *	@adapter: the adapter associated with the link change
+ *	@port_id: the port index whose limk status has changed
+ *	@link_stat: the new status of the link
+ *	@speed: the new speed setting
+ *	@duplex: the new duplex setting
+ *	@pause: the new flow-control setting
+ *
+ *	This is the OS-dependent handler for link status changes.  The OS
+ *	neutral handler takes care of most of the processing for these events,
+ *	then calls this handler for any OS-specific processing.
+ */
+void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
+			int speed, int duplex, int pause)
+{
+	struct net_device *dev = adapter->port[port_id];
+
+	/* Skip changes from disabled ports. */
+	if (!netif_running(dev))
+		return;
+
+	if (link_stat != netif_carrier_ok(dev)) {
+		if (link_stat)
+			netif_carrier_on(dev);
+		else
+			netif_carrier_off(dev);
+		link_report(dev);
+	}
+}
+
+static void cxgb_set_rxmode(struct net_device *dev)
+{
+	struct t3_rx_mode rm;
+	struct port_info *pi = netdev_priv(dev);
+
+	init_rx_mode(&rm, dev, dev->mc_list);
+	t3_mac_set_rx_mode(&pi->mac, &rm);
+}
+
+/**
+ *	link_start - enable a port
+ *	@dev: the device to enable
+ *
+ *	Performs the MAC and PHY actions needed to enable a port.
+ */
+static void link_start(struct net_device *dev)
+{
+	struct t3_rx_mode rm;
+	struct port_info *pi = netdev_priv(dev);
+	struct cmac *mac = &pi->mac;
+
+	init_rx_mode(&rm, dev, dev->mc_list);
+	t3_mac_reset(mac);
+	t3_mac_set_mtu(mac, dev->mtu);
+	t3_mac_set_address(mac, 0, dev->dev_addr);
+	t3_mac_set_rx_mode(mac, &rm);
+	t3_link_start(&pi->phy, mac, &pi->link_config);
+	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+static inline void cxgb_disable_msi(struct adapter *adapter)
+{
+	if (adapter->flags & USING_MSIX) {
+		pci_disable_msix(adapter->pdev);
+		adapter->flags &= ~USING_MSIX;
+	} else if (adapter->flags & USING_MSI) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~USING_MSI;
+	}
+}
+
+/*
+ * Interrupt handler for asynchronous events used with MSI-X.
+ */
+static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
+{
+	t3_slow_intr_handler(cookie);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adap)
+{
+	int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
+
+	snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
+	adap->msix_info[0].desc[n] = 0;
+
+	for_each_port(adap, j) {
+		struct net_device *d = adap->port[j];
+		const struct port_info *pi = netdev_priv(d);
+
+		for (i = 0; i < pi->nqsets; i++, msi_idx++) {
+			snprintf(adap->msix_info[msi_idx].desc, n,
+				 "%s (queue %d)", d->name, i);
+			adap->msix_info[msi_idx].desc[n] = 0;
+		}
+ 	}
+}
+
+static int request_msix_data_irqs(struct adapter *adap)
+{
+	int i, j, err, qidx = 0;
+
+	for_each_port(adap, i) {
+		int nqsets = adap2pinfo(adap, i)->nqsets;
+
+		for (j = 0; j < nqsets; ++j) {
+			err = request_irq(adap->msix_info[qidx + 1].vec,
+					  t3_intr_handler(adap,
+							  adap->sge.qs[qidx].
+							  rspq.polling), 0,
+					  adap->msix_info[qidx + 1].desc,
+					  &adap->sge.qs[qidx]);
+			if (err) {
+				while (--qidx >= 0)
+					free_irq(adap->msix_info[qidx + 1].vec,
+						 &adap->sge.qs[qidx]);
+				return err;
+			}
+			qidx++;
+		}
+	}
+	return 0;
+}
+
+/**
+ *	setup_rss - configure RSS
+ *	@adap: the adapter
+ *
+ *	Sets up RSS to distribute packets to multiple receive queues.  We
+ *	configure the RSS CPU lookup table to distribute to the number of HW
+ *	receive queues, and the response queue lookup table to narrow that
+ *	down to the response queues actually configured for each port.
+ *	We always configure the RSS mapping for two ports since the mapping
+ *	table has plenty of entries.
+ */
+static void setup_rss(struct adapter *adap)
+{
+	int i;
+	unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
+	unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
+	u8 cpus[SGE_QSETS + 1];
+	u16 rspq_map[RSS_TABLE_SIZE];
+
+	for (i = 0; i < SGE_QSETS; ++i)
+		cpus[i] = i;
+	cpus[SGE_QSETS] = 0xff;	/* terminator */
+
+	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
+		rspq_map[i] = i % nq0;
+		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
+	}
+
+	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
+		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
+		      V_RRCPLCPUSIZE(6), cpus, rspq_map);
+}
+
+/*
+ * If we have multiple receive queues per port serviced by NAPI we need one
+ * netdevice per queue as NAPI operates on netdevices.  We already have one
+ * netdevice, namely the one associated with the interface, so we use dummy
+ * ones for any additional queues.  Note that these netdevices exist purely
+ * so that NAPI has something to work with, they do not represent network
+ * ports and are not registered.
+ */
+static int init_dummy_netdevs(struct adapter *adap)
+{
+	int i, j, dummy_idx = 0;
+	struct net_device *nd;
+
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+		const struct port_info *pi = netdev_priv(dev);
+
+		for (j = 0; j < pi->nqsets - 1; j++) {
+			if (!adap->dummy_netdev[dummy_idx]) {
+				nd = alloc_netdev(0, "", ether_setup);
+				if (!nd)
+					goto free_all;
+
+				nd->priv = adap;
+				nd->weight = 64;
+				set_bit(__LINK_STATE_START, &nd->state);
+				adap->dummy_netdev[dummy_idx] = nd;
+			}
+			strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
+			dummy_idx++;
+		}
+	}
+	return 0;
+
+free_all:
+	while (--dummy_idx >= 0) {
+		free_netdev(adap->dummy_netdev[dummy_idx]);
+		adap->dummy_netdev[dummy_idx] = NULL;
+	}
+	return -ENOMEM;
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.  This includes the handlers of
+ * both netdevices representing interfaces and the dummy ones for the extra
+ * queues.
+ */
+static void quiesce_rx(struct adapter *adap)
+{
+	int i;
+	struct net_device *dev;
+
+	for_each_port(adap, i) {
+		dev = adap->port[i];
+		while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
+			msleep(1);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
+		dev = adap->dummy_netdev[i];
+		if (dev)
+			while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
+				msleep(1);
+	}
+}
+
+/**
+ *	setup_sge_qsets - configure SGE Tx/Rx/response queues
+ *	@adap: the adapter
+ *
+ *	Determines how many sets of SGE queues to use and initializes them.
+ *	We support multiple queue sets per port if we have MSI-X, otherwise
+ *	just one queue set per port.
+ */
+static int setup_sge_qsets(struct adapter *adap)
+{
+	int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
+	unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
+
+	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
+		irq_idx = -1;
+
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+		const struct port_info *pi = netdev_priv(dev);
+
+		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
+			err = t3_sge_alloc_qset(adap, qset_idx, 1,
+				(adap->flags & USING_MSIX) ? qset_idx + 1 :
+							     irq_idx,
+				&adap->params.sge.qset[qset_idx], ntxq,
+				j == 0 ? dev :
+					 adap-> dummy_netdev[dummy_dev_idx++]);
+			if (err) {
+				t3_free_sge_resources(adap);
+				return err;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static ssize_t attr_show(struct class_device *cd, char *buf,
+			 ssize_t(*format) (struct adapter *, char *))
+{
+	ssize_t len;
+	struct adapter *adap = to_net_dev(cd)->priv;
+
+	/* Synchronize with ioctls that may shut down the device */
+	rtnl_lock();
+	len = (*format) (adap, buf);
+	rtnl_unlock();
+	return len;
+}
+
+static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
+			  ssize_t(*set) (struct adapter *, unsigned int),
+			  unsigned int min_val, unsigned int max_val)
+{
+	char *endp;
+	ssize_t ret;
+	unsigned int val;
+	struct adapter *adap = to_net_dev(cd)->priv;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (endp == buf || val < min_val || val > max_val)
+		return -EINVAL;
+
+	rtnl_lock();
+	ret = (*set) (adap, val);
+	if (!ret)
+		ret = len;
+	rtnl_unlock();
+	return ret;
+}
+
+#define CXGB3_SHOW(name, val_expr) \
+static ssize_t format_##name(struct adapter *adap, char *buf) \
+{ \
+	return sprintf(buf, "%u\n", val_expr); \
+} \
+static ssize_t show_##name(struct class_device *cd, char *buf) \
+{ \
+	return attr_show(cd, buf, format_##name); \
+}
+
+static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
+{
+	if (adap->flags & FULL_INIT_DONE)
+		return -EBUSY;
+	if (val && adap->params.rev == 0)
+		return -EINVAL;
+	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
+		return -EINVAL;
+	adap->params.mc5.nfilters = val;
+	return 0;
+}
+
+static ssize_t store_nfilters(struct class_device *cd, const char *buf,
+			      size_t len)
+{
+	return attr_store(cd, buf, len, set_nfilters, 0, ~0);
+}
+
+static ssize_t set_nservers(struct adapter *adap, unsigned int val)
+{
+	if (adap->flags & FULL_INIT_DONE)
+		return -EBUSY;
+	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
+		return -EINVAL;
+	adap->params.mc5.nservers = val;
+	return 0;
+}
+
+static ssize_t store_nservers(struct class_device *cd, const char *buf,
+			      size_t len)
+{
+	return attr_store(cd, buf, len, set_nservers, 0, ~0);
+}
+
+#define CXGB3_ATTR_R(name, val_expr) \
+CXGB3_SHOW(name, val_expr) \
+static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+#define CXGB3_ATTR_RW(name, val_expr, store_method) \
+CXGB3_SHOW(name, val_expr) \
+static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
+
+CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
+CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
+CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
+
+static struct attribute *cxgb3_attrs[] = {
+	&class_device_attr_cam_size.attr,
+	&class_device_attr_nfilters.attr,
+	&class_device_attr_nservers.attr,
+	NULL
+};
+
+static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
+
+static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
+{
+	ssize_t len;
+	unsigned int v, addr, bpt, cpt;
+	struct adapter *adap = to_net_dev(cd)->priv;
+
+	addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
+	rtnl_lock();
+	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+	if (sched & 1)
+		v >>= 16;
+	bpt = (v >> 8) & 0xff;
+	cpt = v & 0xff;
+	if (!cpt)
+		len = sprintf(buf, "disabled\n");
+	else {
+		v = (adap->params.vpd.cclk * 1000) / cpt;
+		len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
+	}
+	rtnl_unlock();
+	return len;
+}
+
+static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
+			     size_t len, int sched)
+{
+	char *endp;
+	ssize_t ret;
+	unsigned int val;
+	struct adapter *adap = to_net_dev(cd)->priv;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (endp == buf || val > 10000000)
+		return -EINVAL;
+
+	rtnl_lock();
+	ret = t3_config_sched(adap, val, sched);
+	if (!ret)
+		ret = len;
+	rtnl_unlock();
+	return ret;
+}
+
+#define TM_ATTR(name, sched) \
+static ssize_t show_##name(struct class_device *cd, char *buf) \
+{ \
+	return tm_attr_show(cd, buf, sched); \
+} \
+static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
+{ \
+	return tm_attr_store(cd, buf, len, sched); \
+} \
+static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
+
+TM_ATTR(sched0, 0);
+TM_ATTR(sched1, 1);
+TM_ATTR(sched2, 2);
+TM_ATTR(sched3, 3);
+TM_ATTR(sched4, 4);
+TM_ATTR(sched5, 5);
+TM_ATTR(sched6, 6);
+TM_ATTR(sched7, 7);
+
+static struct attribute *offload_attrs[] = {
+	&class_device_attr_sched0.attr,
+	&class_device_attr_sched1.attr,
+	&class_device_attr_sched2.attr,
+	&class_device_attr_sched3.attr,
+	&class_device_attr_sched4.attr,
+	&class_device_attr_sched5.attr,
+	&class_device_attr_sched6.attr,
+	&class_device_attr_sched7.attr,
+	NULL
+};
+
+static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
+
+/*
+ * Sends an sk_buff to an offload queue driver
+ * after dealing with any active network taps.
+ */
+static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
+{
+	int ret;
+
+	local_bh_disable();
+	ret = t3_offload_tx(tdev, skb);
+	local_bh_enable();
+	return ret;
+}
+
+static int write_smt_entry(struct adapter *adapter, int idx)
+{
+	struct cpl_smt_write_req *req;
+	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+
+	if (!skb)
+		return -ENOMEM;
+
+	req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
+	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
+	req->iff = idx;
+	memset(req->src_mac1, 0, sizeof(req->src_mac1));
+	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
+	skb->priority = 1;
+	offload_tx(&adapter->tdev, skb);
+	return 0;
+}
+
+static int init_smt(struct adapter *adapter)
+{
+	int i;
+
+	for_each_port(adapter, i)
+	    write_smt_entry(adapter, i);
+	return 0;
+}
+
+static void init_port_mtus(struct adapter *adapter)
+{
+	unsigned int mtus = adapter->port[0]->mtu;
+
+	if (adapter->port[1])
+		mtus |= adapter->port[1]->mtu << 16;
+	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
+}
+
+static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
+			      int hi, int port)
+{
+	struct sk_buff *skb;
+	struct mngt_pktsched_wr *req;
+
+	skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+	req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
+	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
+	req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
+	req->sched = sched;
+	req->idx = qidx;
+	req->min = lo;
+	req->max = hi;
+	req->binding = port;
+	t3_mgmt_tx(adap, skb);
+}
+
+static void bind_qsets(struct adapter *adap)
+{
+	int i, j;
+
+	for_each_port(adap, i) {
+		const struct port_info *pi = adap2pinfo(adap, i);
+
+		for (j = 0; j < pi->nqsets; ++j)
+			send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
+					  -1, i);
+	}
+}
+
+/**
+ *	cxgb_up - enable the adapter
+ *	@adapter: adapter being enabled
+ *
+ *	Called when the first port is enabled, this function performs the
+ *	actions necessary to make an adapter operational, such as completing
+ *	the initialization of HW modules, and enabling interrupts.
+ *
+ *	Must be called with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adap)
+{
+	int err = 0;
+
+	if (!(adap->flags & FULL_INIT_DONE)) {
+		err = t3_check_fw_version(adap);
+		if (err)
+			goto out;
+
+		err = init_dummy_netdevs(adap);
+		if (err)
+			goto out;
+
+		err = t3_init_hw(adap, 0);
+		if (err)
+			goto out;
+
+		err = setup_sge_qsets(adap);
+		if (err)
+			goto out;
+
+		setup_rss(adap);
+		adap->flags |= FULL_INIT_DONE;
+	}
+
+	t3_intr_clear(adap);
+
+	if (adap->flags & USING_MSIX) {
+		name_msix_vecs(adap);
+		err = request_irq(adap->msix_info[0].vec,
+				  t3_async_intr_handler, 0,
+				  adap->msix_info[0].desc, adap);
+		if (err)
+			goto irq_err;
+
+		if (request_msix_data_irqs(adap)) {
+			free_irq(adap->msix_info[0].vec, adap);
+			goto irq_err;
+		}
+	} else if ((err = request_irq(adap->pdev->irq,
+				      t3_intr_handler(adap,
+						      adap->sge.qs[0].rspq.
+						      polling),
+				      (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
+				      adap->name, adap)))
+		goto irq_err;
+
+	t3_sge_start(adap);
+	t3_intr_enable(adap);
+
+	if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
+		bind_qsets(adap);
+	adap->flags |= QUEUES_BOUND;
+
+out:
+	return err;
+irq_err:
+	CH_ERR(adap, "request_irq failed, err %d\n", err);
+	goto out;
+}
+
+/*
+ * Release resources when all the ports and offloading have been stopped.
+ */
+static void cxgb_down(struct adapter *adapter)
+{
+	t3_sge_stop(adapter);
+	spin_lock_irq(&adapter->work_lock);	/* sync with PHY intr task */
+	t3_intr_disable(adapter);
+	spin_unlock_irq(&adapter->work_lock);
+
+	if (adapter->flags & USING_MSIX) {
+		int i, n = 0;
+
+		free_irq(adapter->msix_info[0].vec, adapter);
+		for_each_port(adapter, i)
+		    n += adap2pinfo(adapter, i)->nqsets;
+
+		for (i = 0; i < n; ++i)
+			free_irq(adapter->msix_info[i + 1].vec,
+				 &adapter->sge.qs[i]);
+	} else
+		free_irq(adapter->pdev->irq, adapter);
+
+	flush_workqueue(cxgb3_wq);	/* wait for external IRQ handler */
+	quiesce_rx(adapter);
+}
+
+static void schedule_chk_task(struct adapter *adap)
+{
+	unsigned int timeo;
+
+	timeo = adap->params.linkpoll_period ?
+	    (HZ * adap->params.linkpoll_period) / 10 :
+	    adap->params.stats_update_period * HZ;
+	if (timeo)
+		queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
+}
+
+static int offload_open(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct t3cdev *tdev = T3CDEV(dev);
+	int adap_up = adapter->open_device_map & PORT_MASK;
+	int err = 0;
+
+	if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+		return 0;
+
+	if (!adap_up && (err = cxgb_up(adapter)) < 0)
+		return err;
+
+	t3_tp_set_offload_mode(adapter, 1);
+	tdev->lldev = adapter->port[0];
+	err = cxgb3_offload_activate(adapter);
+	if (err)
+		goto out;
+
+	init_port_mtus(adapter);
+	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
+		     adapter->params.b_wnd,
+		     adapter->params.rev == 0 ?
+		     adapter->port[0]->mtu : 0xffff);
+	init_smt(adapter);
+
+	/* Never mind if the next step fails */
+	sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
+
+	/* Call back all registered clients */
+	cxgb3_add_clients(tdev);
+
+out:
+	/* restore them in case the offload module has changed them */
+	if (err) {
+		t3_tp_set_offload_mode(adapter, 0);
+		clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+		cxgb3_set_dummy_ops(tdev);
+	}
+	return err;
+}
+
+static int offload_close(struct t3cdev *tdev)
+{
+	struct adapter *adapter = tdev2adap(tdev);
+
+	if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+		return 0;
+
+	/* Call back all registered clients */
+	cxgb3_remove_clients(tdev);
+
+	sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
+
+	tdev->lldev = NULL;
+	cxgb3_set_dummy_ops(tdev);
+	t3_tp_set_offload_mode(adapter, 0);
+	clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+
+	if (!adapter->open_device_map)
+		cxgb_down(adapter);
+
+	cxgb3_offload_deactivate(adapter);
+	return 0;
+}
+
+static int cxgb_open(struct net_device *dev)
+{
+	int err;
+	struct adapter *adapter = dev->priv;
+	struct port_info *pi = netdev_priv(dev);
+	int other_ports = adapter->open_device_map & PORT_MASK;
+
+	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+		return err;
+
+	set_bit(pi->port_id, &adapter->open_device_map);
+	if (!ofld_disable) {
+		err = offload_open(dev);
+		if (err)
+			printk(KERN_WARNING
+			       "Could not initialize offload capabilities\n");
+	}
+
+	link_start(dev);
+	t3_port_intr_enable(adapter, pi->port_id);
+	netif_start_queue(dev);
+	if (!other_ports)
+		schedule_chk_task(adapter);
+
+	return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = netdev_priv(dev);
+
+	t3_port_intr_disable(adapter, p->port_id);
+	netif_stop_queue(dev);
+	p->phy.ops->power_down(&p->phy, 1);
+	netif_carrier_off(dev);
+	t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+
+	spin_lock(&adapter->work_lock);	/* sync with update task */
+	clear_bit(p->port_id, &adapter->open_device_map);
+	spin_unlock(&adapter->work_lock);
+
+	if (!(adapter->open_device_map & PORT_MASK))
+		cancel_rearming_delayed_workqueue(cxgb3_wq,
+						  &adapter->adap_check_task);
+
+	if (!adapter->open_device_map)
+		cxgb_down(adapter);
+
+	return 0;
+}
+
+static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = netdev_priv(dev);
+	struct net_device_stats *ns = &p->netstats;
+	const struct mac_stats *pstats;
+
+	spin_lock(&adapter->stats_lock);
+	pstats = t3_mac_update_stats(&p->mac);
+	spin_unlock(&adapter->stats_lock);
+
+	ns->tx_bytes = pstats->tx_octets;
+	ns->tx_packets = pstats->tx_frames;
+	ns->rx_bytes = pstats->rx_octets;
+	ns->rx_packets = pstats->rx_frames;
+	ns->multicast = pstats->rx_mcast_frames;
+
+	ns->tx_errors = pstats->tx_underrun;
+	ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
+	    pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
+	    pstats->rx_fifo_ovfl;
+
+	/* detailed rx_errors */
+	ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
+	ns->rx_over_errors = 0;
+	ns->rx_crc_errors = pstats->rx_fcs_errs;
+	ns->rx_frame_errors = pstats->rx_symbol_errs;
+	ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
+	ns->rx_missed_errors = pstats->rx_cong_drops;
+
+	/* detailed tx_errors */
+	ns->tx_aborted_errors = 0;
+	ns->tx_carrier_errors = 0;
+	ns->tx_fifo_errors = pstats->tx_underrun;
+	ns->tx_heartbeat_errors = 0;
+	ns->tx_window_errors = 0;
+	return ns;
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+
+	return adapter->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+	struct adapter *adapter = dev->priv;
+
+	adapter->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+	"TxOctetsOK         ",
+	"TxFramesOK         ",
+	"TxMulticastFramesOK",
+	"TxBroadcastFramesOK",
+	"TxPauseFrames      ",
+	"TxUnderrun         ",
+	"TxExtUnderrun      ",
+
+	"TxFrames64         ",
+	"TxFrames65To127    ",
+	"TxFrames128To255   ",
+	"TxFrames256To511   ",
+	"TxFrames512To1023  ",
+	"TxFrames1024To1518 ",
+	"TxFrames1519ToMax  ",
+
+	"RxOctetsOK         ",
+	"RxFramesOK         ",
+	"RxMulticastFramesOK",
+	"RxBroadcastFramesOK",
+	"RxPauseFrames      ",
+	"RxFCSErrors        ",
+	"RxSymbolErrors     ",
+	"RxShortErrors      ",
+	"RxJabberErrors     ",
+	"RxLengthErrors     ",
+	"RxFIFOoverflow     ",
+
+	"RxFrames64         ",
+	"RxFrames65To127    ",
+	"RxFrames128To255   ",
+	"RxFrames256To511   ",
+	"RxFrames512To1023  ",
+	"RxFrames1024To1518 ",
+	"RxFrames1519ToMax  ",
+
+	"PhyFIFOErrors      ",
+	"TSO                ",
+	"VLANextractions    ",
+	"VLANinsertions     ",
+	"TxCsumOffload      ",
+	"RxCsumGood         ",
+	"RxDrops            "
+};
+
+static int get_stats_count(struct net_device *dev)
+{
+	return ARRAY_SIZE(stats_strings);
+}
+
+#define T3_REGMAP_SIZE (3 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+	return T3_REGMAP_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+	return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	u32 fw_vers = 0;
+	struct adapter *adapter = dev->priv;
+
+	t3_get_fw_version(adapter, &fw_vers);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->bus_info, pci_name(adapter->pdev));
+	if (!fw_vers)
+		strcpy(info->fw_version, "N/A");
+	else {
+		snprintf(info->fw_version, sizeof(info->fw_version),
+			 "%s %u.%u.%u",
+			 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
+			 G_FW_VERSION_MAJOR(fw_vers),
+			 G_FW_VERSION_MINOR(fw_vers),
+			 G_FW_VERSION_MICRO(fw_vers));
+	}
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+	if (stringset == ETH_SS_STATS)
+		memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+static unsigned long collect_sge_port_stats(struct adapter *adapter,
+					    struct port_info *p, int idx)
+{
+	int i;
+	unsigned long tot = 0;
+
+	for (i = 0; i < p->nqsets; ++i)
+		tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
+	return tot;
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+		      u64 *data)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *pi = netdev_priv(dev);
+	const struct mac_stats *s;
+
+	spin_lock(&adapter->stats_lock);
+	s = t3_mac_update_stats(&pi->mac);
+	spin_unlock(&adapter->stats_lock);
+
+	*data++ = s->tx_octets;
+	*data++ = s->tx_frames;
+	*data++ = s->tx_mcast_frames;
+	*data++ = s->tx_bcast_frames;
+	*data++ = s->tx_pause;
+	*data++ = s->tx_underrun;
+	*data++ = s->tx_fifo_urun;
+
+	*data++ = s->tx_frames_64;
+	*data++ = s->tx_frames_65_127;
+	*data++ = s->tx_frames_128_255;
+	*data++ = s->tx_frames_256_511;
+	*data++ = s->tx_frames_512_1023;
+	*data++ = s->tx_frames_1024_1518;
+	*data++ = s->tx_frames_1519_max;
+
+	*data++ = s->rx_octets;
+	*data++ = s->rx_frames;
+	*data++ = s->rx_mcast_frames;
+	*data++ = s->rx_bcast_frames;
+	*data++ = s->rx_pause;
+	*data++ = s->rx_fcs_errs;
+	*data++ = s->rx_symbol_errs;
+	*data++ = s->rx_short;
+	*data++ = s->rx_jabber;
+	*data++ = s->rx_too_long;
+	*data++ = s->rx_fifo_ovfl;
+
+	*data++ = s->rx_frames_64;
+	*data++ = s->rx_frames_65_127;
+	*data++ = s->rx_frames_128_255;
+	*data++ = s->rx_frames_256_511;
+	*data++ = s->rx_frames_512_1023;
+	*data++ = s->rx_frames_1024_1518;
+	*data++ = s->rx_frames_1519_max;
+
+	*data++ = pi->phy.fifo_errors;
+
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
+	*data++ = s->rx_cong_drops;
+}
+
+static inline void reg_block_dump(struct adapter *ap, void *buf,
+				  unsigned int start, unsigned int end)
+{
+	u32 *p = buf + start;
+
+	for (; start <= end; start += sizeof(u32))
+		*p++ = t3_read_reg(ap, start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+		     void *buf)
+{
+	struct adapter *ap = dev->priv;
+
+	/*
+	 * Version scheme:
+	 * bits 0..9: chip version
+	 * bits 10..15: chip revision
+	 * bit 31: set for PCIe cards
+	 */
+	regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
+
+	/*
+	 * We skip the MAC statistics registers because they are clear-on-read.
+	 * Also reading multi-register stats would need to synchronize with the
+	 * periodic mac stats accumulation.  Hard to justify the complexity.
+	 */
+	memset(buf, 0, T3_REGMAP_SIZE);
+	reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
+	reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
+	reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
+	reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
+	reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
+	reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
+		       XGM_REG(A_XGM_SERDES_STAT3, 1));
+	reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
+		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+	if (p->link_config.autoneg != AUTONEG_ENABLE)
+		return -EINVAL;
+	p->phy.ops->autoneg_restart(&p->phy);
+	return 0;
+}
+
+static int cxgb3_phys_id(struct net_device *dev, u32 data)
+{
+	int i;
+	struct adapter *adapter = dev->priv;
+
+	if (data == 0)
+		data = 2;
+
+	for (i = 0; i < data * 2; i++) {
+		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+				 (i & 1) ? F_GPIO0_OUT_VAL : 0);
+		if (msleep_interruptible(500))
+			break;
+	}
+	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+			 F_GPIO0_OUT_VAL);
+	return 0;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	cmd->supported = p->link_config.supported;
+	cmd->advertising = p->link_config.advertising;
+
+	if (netif_carrier_ok(dev)) {
+		cmd->speed = p->link_config.speed;
+		cmd->duplex = p->link_config.duplex;
+	} else {
+		cmd->speed = -1;
+		cmd->duplex = -1;
+	}
+
+	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+	cmd->phy_address = p->phy.addr;
+	cmd->transceiver = XCVR_EXTERNAL;
+	cmd->autoneg = p->link_config.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static int speed_duplex_to_caps(int speed, int duplex)
+{
+	int cap = 0;
+
+	switch (speed) {
+	case SPEED_10:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10baseT_Full;
+		else
+			cap = SUPPORTED_10baseT_Half;
+		break;
+	case SPEED_100:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_100baseT_Full;
+		else
+			cap = SUPPORTED_100baseT_Half;
+		break;
+	case SPEED_1000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_1000baseT_Full;
+		else
+			cap = SUPPORTED_1000baseT_Half;
+		break;
+	case SPEED_10000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10000baseT_Full;
+	}
+	return cap;
+}
+
+#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
+		      ADVERTISED_10000baseT_Full)
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct port_info *p = netdev_priv(dev);
+	struct link_config *lc = &p->link_config;
+
+	if (!(lc->supported & SUPPORTED_Autoneg))
+		return -EOPNOTSUPP;	/* can't change speed/duplex */
+
+	if (cmd->autoneg == AUTONEG_DISABLE) {
+		int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+
+		if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
+			return -EINVAL;
+		lc->requested_speed = cmd->speed;
+		lc->requested_duplex = cmd->duplex;
+		lc->advertising = 0;
+	} else {
+		cmd->advertising &= ADVERTISED_MASK;
+		cmd->advertising &= lc->supported;
+		if (!cmd->advertising)
+			return -EINVAL;
+		lc->requested_speed = SPEED_INVALID;
+		lc->requested_duplex = DUPLEX_INVALID;
+		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+	}
+	lc->autoneg = cmd->autoneg;
+	if (netif_running(dev))
+		t3_link_start(&p->phy, &p->mac, lc);
+	return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+			   struct ethtool_pauseparam *epause)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
+	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
+	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *epause)
+{
+	struct port_info *p = netdev_priv(dev);
+	struct link_config *lc = &p->link_config;
+
+	if (epause->autoneg == AUTONEG_DISABLE)
+		lc->requested_fc = 0;
+	else if (lc->supported & SUPPORTED_Autoneg)
+		lc->requested_fc = PAUSE_AUTONEG;
+	else
+		return -EINVAL;
+
+	if (epause->rx_pause)
+		lc->requested_fc |= PAUSE_RX;
+	if (epause->tx_pause)
+		lc->requested_fc |= PAUSE_TX;
+	if (lc->autoneg == AUTONEG_ENABLE) {
+		if (netif_running(dev))
+			t3_link_start(&p->phy, &p->mac, lc);
+	} else {
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+		if (netif_running(dev))
+			t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
+	}
+	return 0;
+}
+
+static u32 get_rx_csum(struct net_device *dev)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	return p->rx_csum_offload;
+}
+
+static int set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	p->rx_csum_offload = data;
+	return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	struct adapter *adapter = dev->priv;
+
+	e->rx_max_pending = MAX_RX_BUFFERS;
+	e->rx_mini_max_pending = 0;
+	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
+	e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+	e->rx_pending = adapter->params.sge.qset[0].fl_size;
+	e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
+	e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
+	e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	int i;
+	struct adapter *adapter = dev->priv;
+
+	if (e->rx_pending > MAX_RX_BUFFERS ||
+	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
+	    e->tx_pending > MAX_TXQ_ENTRIES ||
+	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+	    e->rx_pending < MIN_FL_ENTRIES ||
+	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+	    e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
+		return -EINVAL;
+
+	if (adapter->flags & FULL_INIT_DONE)
+		return -EBUSY;
+
+	for (i = 0; i < SGE_QSETS; ++i) {
+		struct qset_params *q = &adapter->params.sge.qset[i];
+
+		q->rspq_size = e->rx_mini_pending;
+		q->fl_size = e->rx_pending;
+		q->jumbo_size = e->rx_jumbo_pending;
+		q->txq_size[0] = e->tx_pending;
+		q->txq_size[1] = e->tx_pending;
+		q->txq_size[2] = e->tx_pending;
+	}
+	return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct adapter *adapter = dev->priv;
+	struct qset_params *qsp = &adapter->params.sge.qset[0];
+	struct sge_qset *qs = &adapter->sge.qs[0];
+
+	if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
+		return -EINVAL;
+
+	qsp->coalesce_usecs = c->rx_coalesce_usecs;
+	t3_update_qset_coalesce(qs, qsp);
+	return 0;
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct adapter *adapter = dev->priv;
+	struct qset_params *q = adapter->params.sge.qset;
+
+	c->rx_coalesce_usecs = q->coalesce_usecs;
+	return 0;
+}
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+		      u8 * data)
+{
+	int i, err = 0;
+	struct adapter *adapter = dev->priv;
+
+	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	e->magic = EEPROM_MAGIC;
+	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+		err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
+
+	if (!err)
+		memcpy(data, buf + e->offset, e->len);
+	kfree(buf);
+	return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		      u8 * data)
+{
+	u8 *buf;
+	int err = 0;
+	u32 aligned_offset, aligned_len, *p;
+	struct adapter *adapter = dev->priv;
+
+	if (eeprom->magic != EEPROM_MAGIC)
+		return -EINVAL;
+
+	aligned_offset = eeprom->offset & ~3;
+	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+		buf = kmalloc(aligned_len, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+		err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
+		if (!err && aligned_len > 4)
+			err = t3_seeprom_read(adapter,
+					      aligned_offset + aligned_len - 4,
+					      (u32 *) & buf[aligned_len - 4]);
+		if (err)
+			goto out;
+		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+	} else
+		buf = data;
+
+	err = t3_seeprom_wp(adapter, 0);
+	if (err)
+		goto out;
+
+	for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
+		err = t3_seeprom_write(adapter, aligned_offset, *p);
+		aligned_offset += 4;
+	}
+
+	if (!err)
+		err = t3_seeprom_wp(adapter, 1);
+out:
+	if (buf != data)
+		kfree(buf);
+	return err;
+}
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts = 0;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static const struct ethtool_ops cxgb_ethtool_ops = {
+	.get_settings = get_settings,
+	.set_settings = set_settings,
+	.get_drvinfo = get_drvinfo,
+	.get_msglevel = get_msglevel,
+	.set_msglevel = set_msglevel,
+	.get_ringparam = get_sge_param,
+	.set_ringparam = set_sge_param,
+	.get_coalesce = get_coalesce,
+	.set_coalesce = set_coalesce,
+	.get_eeprom_len = get_eeprom_len,
+	.get_eeprom = get_eeprom,
+	.set_eeprom = set_eeprom,
+	.get_pauseparam = get_pauseparam,
+	.set_pauseparam = set_pauseparam,
+	.get_rx_csum = get_rx_csum,
+	.set_rx_csum = set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = ethtool_op_set_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_link = ethtool_op_get_link,
+	.get_strings = get_strings,
+	.phys_id = cxgb3_phys_id,
+	.nway_reset = restart_autoneg,
+	.get_stats_count = get_stats_count,
+	.get_ethtool_stats = get_stats,
+	.get_regs_len = get_regs_len,
+	.get_regs = get_regs,
+	.get_wol = get_wol,
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = ethtool_op_set_tso,
+	.get_perm_addr = ethtool_op_get_perm_addr
+};
+
+static int in_range(int val, int lo, int hi)
+{
+	return val < 0 || (val <= hi && val >= lo);
+}
+
+static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+{
+	int ret;
+	u32 cmd;
+	struct adapter *adapter = dev->priv;
+
+	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+		return -EFAULT;
+
+	switch (cmd) {
+	case CHELSIO_SETREG:{
+		struct ch_reg edata;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (copy_from_user(&edata, useraddr, sizeof(edata)))
+			return -EFAULT;
+		if ((edata.addr & 3) != 0
+			|| edata.addr >= adapter->mmio_len)
+			return -EINVAL;
+		writel(edata.val, adapter->regs + edata.addr);
+		break;
+	}
+	case CHELSIO_GETREG:{
+		struct ch_reg edata;
+
+		if (copy_from_user(&edata, useraddr, sizeof(edata)))
+			return -EFAULT;
+		if ((edata.addr & 3) != 0
+			|| edata.addr >= adapter->mmio_len)
+			return -EINVAL;
+		edata.val = readl(adapter->regs + edata.addr);
+		if (copy_to_user(useraddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		break;
+	}
+	case CHELSIO_SET_QSET_PARAMS:{
+		int i;
+		struct qset_params *q;
+		struct ch_qset_params t;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+		if (t.qset_idx >= SGE_QSETS)
+			return -EINVAL;
+		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
+			!in_range(t.cong_thres, 0, 255) ||
+			!in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
+				MAX_TXQ_ENTRIES) ||
+			!in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
+				MAX_TXQ_ENTRIES) ||
+			!in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
+				MAX_CTRL_TXQ_ENTRIES) ||
+			!in_range(t.fl_size[0], MIN_FL_ENTRIES,
+				MAX_RX_BUFFERS)
+			|| !in_range(t.fl_size[1], MIN_FL_ENTRIES,
+					MAX_RX_JUMBO_BUFFERS)
+			|| !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
+					MAX_RSPQ_ENTRIES))
+			return -EINVAL;
+		if ((adapter->flags & FULL_INIT_DONE) &&
+			(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
+			t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
+			t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
+			t.polling >= 0 || t.cong_thres >= 0))
+			return -EBUSY;
+
+		q = &adapter->params.sge.qset[t.qset_idx];
+
+		if (t.rspq_size >= 0)
+			q->rspq_size = t.rspq_size;
+		if (t.fl_size[0] >= 0)
+			q->fl_size = t.fl_size[0];
+		if (t.fl_size[1] >= 0)
+			q->jumbo_size = t.fl_size[1];
+		if (t.txq_size[0] >= 0)
+			q->txq_size[0] = t.txq_size[0];
+		if (t.txq_size[1] >= 0)
+			q->txq_size[1] = t.txq_size[1];
+		if (t.txq_size[2] >= 0)
+			q->txq_size[2] = t.txq_size[2];
+		if (t.cong_thres >= 0)
+			q->cong_thres = t.cong_thres;
+		if (t.intr_lat >= 0) {
+			struct sge_qset *qs =
+				&adapter->sge.qs[t.qset_idx];
+
+			q->coalesce_usecs = t.intr_lat;
+			t3_update_qset_coalesce(qs, q);
+		}
+		if (t.polling >= 0) {
+			if (adapter->flags & USING_MSIX)
+				q->polling = t.polling;
+			else {
+				/* No polling with INTx for T3A */
+				if (adapter->params.rev == 0 &&
+					!(adapter->flags & USING_MSI))
+					t.polling = 0;
+
+				for (i = 0; i < SGE_QSETS; i++) {
+					q = &adapter->params.sge.
+						qset[i];
+					q->polling = t.polling;
+				}
+			}
+		}
+		break;
+	}
+	case CHELSIO_GET_QSET_PARAMS:{
+		struct qset_params *q;
+		struct ch_qset_params t;
+
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+		if (t.qset_idx >= SGE_QSETS)
+			return -EINVAL;
+
+		q = &adapter->params.sge.qset[t.qset_idx];
+		t.rspq_size = q->rspq_size;
+		t.txq_size[0] = q->txq_size[0];
+		t.txq_size[1] = q->txq_size[1];
+		t.txq_size[2] = q->txq_size[2];
+		t.fl_size[0] = q->fl_size;
+		t.fl_size[1] = q->jumbo_size;
+		t.polling = q->polling;
+		t.intr_lat = q->coalesce_usecs;
+		t.cong_thres = q->cong_thres;
+
+		if (copy_to_user(useraddr, &t, sizeof(t)))
+			return -EFAULT;
+		break;
+	}
+	case CHELSIO_SET_QSET_NUM:{
+		struct ch_reg edata;
+		struct port_info *pi = netdev_priv(dev);
+		unsigned int i, first_qset = 0, other_qsets = 0;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (adapter->flags & FULL_INIT_DONE)
+			return -EBUSY;
+		if (copy_from_user(&edata, useraddr, sizeof(edata)))
+			return -EFAULT;
+		if (edata.val < 1 ||
+			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
+			return -EINVAL;
+
+		for_each_port(adapter, i)
+			if (adapter->port[i] && adapter->port[i] != dev)
+				other_qsets += adap2pinfo(adapter, i)->nqsets;
+
+		if (edata.val + other_qsets > SGE_QSETS)
+			return -EINVAL;
+
+		pi->nqsets = edata.val;
+
+		for_each_port(adapter, i)
+			if (adapter->port[i]) {
+				pi = adap2pinfo(adapter, i);
+				pi->first_qset = first_qset;
+				first_qset += pi->nqsets;
+			}
+		break;
+	}
+	case CHELSIO_GET_QSET_NUM:{
+		struct ch_reg edata;
+		struct port_info *pi = netdev_priv(dev);
+
+		edata.cmd = CHELSIO_GET_QSET_NUM;
+		edata.val = pi->nqsets;
+		if (copy_to_user(useraddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		break;
+	}
+	case CHELSIO_LOAD_FW:{
+		u8 *fw_data;
+		struct ch_mem_range t;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+
+		fw_data = kmalloc(t.len, GFP_KERNEL);
+		if (!fw_data)
+			return -ENOMEM;
+
+		if (copy_from_user
+			(fw_data, useraddr + sizeof(t), t.len)) {
+			kfree(fw_data);
+			return -EFAULT;
+		}
+
+		ret = t3_load_fw(adapter, fw_data, t.len);
+		kfree(fw_data);
+		if (ret)
+			return ret;
+		break;
+	}
+	case CHELSIO_SETMTUTAB:{
+		struct ch_mtus m;
+		int i;
+
+		if (!is_offload(adapter))
+			return -EOPNOTSUPP;
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (offload_running(adapter))
+			return -EBUSY;
+		if (copy_from_user(&m, useraddr, sizeof(m)))
+			return -EFAULT;
+		if (m.nmtus != NMTUS)
+			return -EINVAL;
+		if (m.mtus[0] < 81)	/* accommodate SACK */
+			return -EINVAL;
+
+		/* MTUs must be in ascending order */
+		for (i = 1; i < NMTUS; ++i)
+			if (m.mtus[i] < m.mtus[i - 1])
+				return -EINVAL;
+
+		memcpy(adapter->params.mtus, m.mtus,
+			sizeof(adapter->params.mtus));
+		break;
+	}
+	case CHELSIO_GET_PM:{
+		struct tp_params *p = &adapter->params.tp;
+		struct ch_pm m = {.cmd = CHELSIO_GET_PM };
+
+		if (!is_offload(adapter))
+			return -EOPNOTSUPP;
+		m.tx_pg_sz = p->tx_pg_size;
+		m.tx_num_pg = p->tx_num_pgs;
+		m.rx_pg_sz = p->rx_pg_size;
+		m.rx_num_pg = p->rx_num_pgs;
+		m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
+		if (copy_to_user(useraddr, &m, sizeof(m)))
+			return -EFAULT;
+		break;
+	}
+	case CHELSIO_SET_PM:{
+		struct ch_pm m;
+		struct tp_params *p = &adapter->params.tp;
+
+		if (!is_offload(adapter))
+			return -EOPNOTSUPP;
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (adapter->flags & FULL_INIT_DONE)
+			return -EBUSY;
+		if (copy_from_user(&m, useraddr, sizeof(m)))
+			return -EFAULT;
+		if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
+			!m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
+			return -EINVAL;	/* not power of 2 */
+		if (!(m.rx_pg_sz & 0x14000))
+			return -EINVAL;	/* not 16KB or 64KB */
+		if (!(m.tx_pg_sz & 0x1554000))
+			return -EINVAL;
+		if (m.tx_num_pg == -1)
+			m.tx_num_pg = p->tx_num_pgs;
+		if (m.rx_num_pg == -1)
+			m.rx_num_pg = p->rx_num_pgs;
+		if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
+			return -EINVAL;
+		if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
+			m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
+			return -EINVAL;
+		p->rx_pg_size = m.rx_pg_sz;
+		p->tx_pg_size = m.tx_pg_sz;
+		p->rx_num_pgs = m.rx_num_pg;
+		p->tx_num_pgs = m.tx_num_pg;
+		break;
+	}
+	case CHELSIO_GET_MEM:{
+		struct ch_mem_range t;
+		struct mc7 *mem;
+		u64 buf[32];
+
+		if (!is_offload(adapter))
+			return -EOPNOTSUPP;
+		if (!(adapter->flags & FULL_INIT_DONE))
+			return -EIO;	/* need the memory controllers */
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+		if ((t.addr & 7) || (t.len & 7))
+			return -EINVAL;
+		if (t.mem_id == MEM_CM)
+			mem = &adapter->cm;
+		else if (t.mem_id == MEM_PMRX)
+			mem = &adapter->pmrx;
+		else if (t.mem_id == MEM_PMTX)
+			mem = &adapter->pmtx;
+		else
+			return -EINVAL;
+
+		/*
+			* Version scheme:
+			* bits 0..9: chip version
+			* bits 10..15: chip revision
+			*/
+		t.version = 3 | (adapter->params.rev << 10);
+		if (copy_to_user(useraddr, &t, sizeof(t)))
+			return -EFAULT;
+
+		/*
+		 * Read 256 bytes at a time as len can be large and we don't
+		 * want to use huge intermediate buffers.
+		 */
+		useraddr += sizeof(t);	/* advance to start of buffer */
+		while (t.len) {
+			unsigned int chunk =
+				min_t(unsigned int, t.len, sizeof(buf));
+
+			ret =
+				t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
+						buf);
+			if (ret)
+				return ret;
+			if (copy_to_user(useraddr, buf, chunk))
+				return -EFAULT;
+			useraddr += chunk;
+			t.addr += chunk;
+			t.len -= chunk;
+		}
+		break;
+	}
+	case CHELSIO_SET_TRACE_FILTER:{
+		struct ch_trace t;
+		const struct trace_params *tp;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (!offload_running(adapter))
+			return -EAGAIN;
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+
+		tp = (const struct trace_params *)&t.sip;
+		if (t.config_tx)
+			t3_config_trace_filter(adapter, tp, 0,
+						t.invert_match,
+						t.trace_tx);
+		if (t.config_rx)
+			t3_config_trace_filter(adapter, tp, 1,
+						t.invert_match,
+						t.trace_rx);
+		break;
+	}
+	case CHELSIO_SET_PKTSCHED:{
+		struct ch_pktsched_params p;
+
+		if (!capable(CAP_NET_ADMIN))
+				return -EPERM;
+		if (!adapter->open_device_map)
+				return -EAGAIN;	/* uP and SGE must be running */
+		if (copy_from_user(&p, useraddr, sizeof(p)))
+				return -EFAULT;
+		send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
+				  p.binding);
+		break;
+			
+	}
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	int ret, mmd;
+	struct adapter *adapter = dev->priv;
+	struct port_info *pi = netdev_priv(dev);
+	struct mii_ioctl_data *data = if_mii(req);
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = pi->phy.addr;
+		/* FALLTHRU */
+	case SIOCGMIIREG:{
+		u32 val;
+		struct cphy *phy = &pi->phy;
+
+		if (!phy->mdio_read)
+			return -EOPNOTSUPP;
+		if (is_10G(adapter)) {
+			mmd = data->phy_id >> 8;
+			if (!mmd)
+				mmd = MDIO_DEV_PCS;
+			else if (mmd > MDIO_DEV_XGXS)
+				return -EINVAL;
+
+			ret =
+				phy->mdio_read(adapter, data->phy_id & 0x1f,
+						mmd, data->reg_num, &val);
+		} else
+			ret =
+				phy->mdio_read(adapter, data->phy_id & 0x1f,
+						0, data->reg_num & 0x1f,
+						&val);
+		if (!ret)
+			data->val_out = val;
+		break;
+	}
+	case SIOCSMIIREG:{
+		struct cphy *phy = &pi->phy;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (!phy->mdio_write)
+			return -EOPNOTSUPP;
+		if (is_10G(adapter)) {
+			mmd = data->phy_id >> 8;
+			if (!mmd)
+				mmd = MDIO_DEV_PCS;
+			else if (mmd > MDIO_DEV_XGXS)
+				return -EINVAL;
+
+			ret =
+				phy->mdio_write(adapter,
+						data->phy_id & 0x1f, mmd,
+						data->reg_num,
+						data->val_in);
+		} else
+			ret =
+				phy->mdio_write(adapter,
+						data->phy_id & 0x1f, 0,
+						data->reg_num & 0x1f,
+						data->val_in);
+		break;
+	}
+	case SIOCCHIOCTL:
+		return cxgb_extension_ioctl(dev, req->ifr_data);
+	default:
+		return -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+	struct adapter *adapter = dev->priv;
+	struct port_info *pi = netdev_priv(dev);
+
+	if (new_mtu < 81)	/* accommodate SACK */
+		return -EINVAL;
+	if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
+		return ret;
+	dev->mtu = new_mtu;
+	init_port_mtus(adapter);
+	if (adapter->params.rev == 0 && offload_running(adapter))
+		t3_load_mtus(adapter, adapter->params.mtus,
+			     adapter->params.a_wnd, adapter->params.b_wnd,
+			     adapter->port[0]->mtu);
+	return 0;
+}
+
+static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *pi = netdev_priv(dev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
+	if (offload_running(adapter))
+		write_smt_entry(adapter, pi->port_id);
+	return 0;
+}
+
+/**
+ * t3_synchronize_rx - wait for current Rx processing on a port to complete
+ * @adap: the adapter
+ * @p: the port
+ *
+ * Ensures that current Rx processing on any of the queues associated with
+ * the given port completes before returning.  We do this by acquiring and
+ * releasing the locks of the response queues associated with the port.
+ */
+static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
+{
+	int i;
+
+	for (i = 0; i < p->nqsets; i++) {
+		struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
+
+		spin_lock_irq(&q->lock);
+		spin_unlock_irq(&q->lock);
+	}
+}
+
+static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *pi = netdev_priv(dev);
+
+	pi->vlan_grp = grp;
+	if (adapter->params.rev > 0)
+		t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
+	else {
+		/* single control for all ports */
+		unsigned int i, have_vlans = 0;
+		for_each_port(adapter, i)
+		    have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
+
+		t3_set_vlan_accel(adapter, 1, have_vlans);
+	}
+	t3_synchronize_rx(adapter, pi);
+}
+
+static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+	/* nothing */
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cxgb_netpoll(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct sge_qset *qs = dev2qset(dev);
+
+	t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
+						    adapter);
+}
+#endif
+
+/*
+ * Periodic accumulation of MAC statistics.
+ */
+static void mac_stats_update(struct adapter *adapter)
+{
+	int i;
+
+	for_each_port(adapter, i) {
+		struct net_device *dev = adapter->port[i];
+		struct port_info *p = netdev_priv(dev);
+
+		if (netif_running(dev)) {
+			spin_lock(&adapter->stats_lock);
+			t3_mac_update_stats(&p->mac);
+			spin_unlock(&adapter->stats_lock);
+		}
+	}
+}
+
+static void check_link_status(struct adapter *adapter)
+{
+	int i;
+
+	for_each_port(adapter, i) {
+		struct net_device *dev = adapter->port[i];
+		struct port_info *p = netdev_priv(dev);
+
+		if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
+			t3_link_changed(adapter, i);
+	}
+}
+
+static void t3_adap_check_task(struct work_struct *work)
+{
+	struct adapter *adapter = container_of(work, struct adapter,
+					       adap_check_task.work);
+	const struct adapter_params *p = &adapter->params;
+
+	adapter->check_task_cnt++;
+
+	/* Check link status for PHYs without interrupts */
+	if (p->linkpoll_period)
+		check_link_status(adapter);
+
+	/* Accumulate MAC stats if needed */
+	if (!p->linkpoll_period ||
+	    (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
+	    p->stats_update_period) {
+		mac_stats_update(adapter);
+		adapter->check_task_cnt = 0;
+	}
+
+	/* Schedule the next check update if any port is active. */
+	spin_lock(&adapter->work_lock);
+	if (adapter->open_device_map & PORT_MASK)
+		schedule_chk_task(adapter);
+	spin_unlock(&adapter->work_lock);
+}
+
+/*
+ * Processes external (PHY) interrupts in process context.
+ */
+static void ext_intr_task(struct work_struct *work)
+{
+	struct adapter *adapter = container_of(work, struct adapter,
+					       ext_intr_handler_task);
+
+	t3_phy_intr_handler(adapter);
+
+	/* Now reenable external interrupts */
+	spin_lock_irq(&adapter->work_lock);
+	if (adapter->slow_intr_mask) {
+		adapter->slow_intr_mask |= F_T3DBG;
+		t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
+		t3_write_reg(adapter, A_PL_INT_ENABLE0,
+			     adapter->slow_intr_mask);
+	}
+	spin_unlock_irq(&adapter->work_lock);
+}
+
+/*
+ * Interrupt-context handler for external (PHY) interrupts.
+ */
+void t3_os_ext_intr_handler(struct adapter *adapter)
+{
+	/*
+	 * Schedule a task to handle external interrupts as they may be slow
+	 * and we use a mutex to protect MDIO registers.  We disable PHY
+	 * interrupts in the meantime and let the task reenable them when
+	 * it's done.
+	 */
+	spin_lock(&adapter->work_lock);
+	if (adapter->slow_intr_mask) {
+		adapter->slow_intr_mask &= ~F_T3DBG;
+		t3_write_reg(adapter, A_PL_INT_ENABLE0,
+			     adapter->slow_intr_mask);
+		queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
+	}
+	spin_unlock(&adapter->work_lock);
+}
+
+void t3_fatal_err(struct adapter *adapter)
+{
+	unsigned int fw_status[4];
+
+	if (adapter->flags & FULL_INIT_DONE) {
+		t3_sge_stop(adapter);
+		t3_intr_disable(adapter);
+	}
+	CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
+	if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
+		CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
+			 fw_status[0], fw_status[1],
+			 fw_status[2], fw_status[3]);
+
+}
+
+static int __devinit cxgb_enable_msix(struct adapter *adap)
+{
+	struct msix_entry entries[SGE_QSETS + 1];
+	int i, err;
+
+	for (i = 0; i < ARRAY_SIZE(entries); ++i)
+		entries[i].entry = i;
+
+	err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
+	if (!err) {
+		for (i = 0; i < ARRAY_SIZE(entries); ++i)
+			adap->msix_info[i].vec = entries[i].vector;
+	} else if (err > 0)
+		dev_info(&adap->pdev->dev,
+		       "only %d MSI-X vectors left, not using MSI-X\n", err);
+	return err;
+}
+
+static void __devinit print_port_info(struct adapter *adap,
+				      const struct adapter_info *ai)
+{
+	static const char *pci_variant[] = {
+		"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
+	};
+
+	int i;
+	char buf[80];
+
+	if (is_pcie(adap))
+		snprintf(buf, sizeof(buf), "%s x%d",
+			 pci_variant[adap->params.pci.variant],
+			 adap->params.pci.width);
+	else
+		snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
+			 pci_variant[adap->params.pci.variant],
+			 adap->params.pci.speed, adap->params.pci.width);
+
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+		const struct port_info *pi = netdev_priv(dev);
+
+		if (!test_bit(i, &adap->registered_device_map))
+			continue;
+		printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
+		       dev->name, ai->desc, pi->port_type->desc,
+		       adap->params.rev, buf,
+		       (adap->flags & USING_MSIX) ? " MSI-X" :
+		       (adap->flags & USING_MSI) ? " MSI" : "");
+		if (adap->name == dev->name && adap->params.vpd.mclk)
+			printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
+			       adap->name, t3_mc7_size(&adap->cm) >> 20,
+			       t3_mc7_size(&adap->pmtx) >> 20,
+			       t3_mc7_size(&adap->pmrx) >> 20);
+	}
+}
+
+static int __devinit init_one(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+	static int version_printed;
+
+	int i, err, pci_using_dac = 0;
+	unsigned long mmio_start, mmio_len;
+	const struct adapter_info *ai;
+	struct adapter *adapter = NULL;
+	struct port_info *pi;
+
+	if (!version_printed) {
+		printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+		++version_printed;
+	}
+
+	if (!cxgb3_wq) {
+		cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
+		if (!cxgb3_wq) {
+			printk(KERN_ERR DRV_NAME
+			       ": cannot initialize work queue\n");
+			return -ENOMEM;
+		}
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		/* Just info, some other driver may have claimed the device. */
+		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+		return err;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		goto out_release_regions;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+		pci_using_dac = 1;
+		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+		if (err) {
+			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+			       "coherent allocations\n");
+			goto out_disable_device;
+		}
+	} else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
+		dev_err(&pdev->dev, "no usable DMA configuration\n");
+		goto out_disable_device;
+	}
+
+	pci_set_master(pdev);
+
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	ai = t3_get_adapter_info(ent->driver_data);
+
+	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+	if (!adapter) {
+		err = -ENOMEM;
+		goto out_disable_device;
+	}
+
+	adapter->regs = ioremap_nocache(mmio_start, mmio_len);
+	if (!adapter->regs) {
+		dev_err(&pdev->dev, "cannot map device registers\n");
+		err = -ENOMEM;
+		goto out_free_adapter;
+	}
+
+	adapter->pdev = pdev;
+	adapter->name = pci_name(pdev);
+	adapter->msg_enable = dflt_msg_enable;
+	adapter->mmio_len = mmio_len;
+
+	mutex_init(&adapter->mdio_lock);
+	spin_lock_init(&adapter->work_lock);
+	spin_lock_init(&adapter->stats_lock);
+
+	INIT_LIST_HEAD(&adapter->adapter_list);
+	INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
+	INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
+
+	for (i = 0; i < ai->nports; ++i) {
+		struct net_device *netdev;
+
+		netdev = alloc_etherdev(sizeof(struct port_info));
+		if (!netdev) {
+			err = -ENOMEM;
+			goto out_free_dev;
+		}
+
+		SET_MODULE_OWNER(netdev);
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		adapter->port[i] = netdev;
+		pi = netdev_priv(netdev);
+		pi->rx_csum_offload = 1;
+		pi->nqsets = 1;
+		pi->first_qset = i;
+		pi->activity = 0;
+		pi->port_id = i;
+		netif_carrier_off(netdev);
+		netdev->irq = pdev->irq;
+		netdev->mem_start = mmio_start;
+		netdev->mem_end = mmio_start + mmio_len - 1;
+		netdev->priv = adapter;
+		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+		netdev->features |= NETIF_F_LLTX;
+		if (pci_using_dac)
+			netdev->features |= NETIF_F_HIGHDMA;
+
+		netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+		netdev->vlan_rx_register = vlan_rx_register;
+		netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
+
+		netdev->open = cxgb_open;
+		netdev->stop = cxgb_close;
+		netdev->hard_start_xmit = t3_eth_xmit;
+		netdev->get_stats = cxgb_get_stats;
+		netdev->set_multicast_list = cxgb_set_rxmode;
+		netdev->do_ioctl = cxgb_ioctl;
+		netdev->change_mtu = cxgb_change_mtu;
+		netdev->set_mac_address = cxgb_set_mac_addr;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+		netdev->poll_controller = cxgb_netpoll;
+#endif
+		netdev->weight = 64;
+
+		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+	}
+
+	pci_set_drvdata(pdev, adapter->port[0]);
+	if (t3_prep_adapter(adapter, ai, 1) < 0) {
+		err = -ENODEV;
+		goto out_free_dev;
+	}
+
+	/*
+	 * The card is now ready to go.  If any errors occur during device
+	 * registration we do not fail the whole card but rather proceed only
+	 * with the ports we manage to register successfully.  However we must
+	 * register at least one net device.
+	 */
+	for_each_port(adapter, i) {
+		err = register_netdev(adapter->port[i]);
+		if (err)
+			dev_warn(&pdev->dev,
+				 "cannot register net device %s, skipping\n",
+				 adapter->port[i]->name);
+		else {
+			/*
+			 * Change the name we use for messages to the name of
+			 * the first successfully registered interface.
+			 */
+			if (!adapter->registered_device_map)
+				adapter->name = adapter->port[i]->name;
+
+			__set_bit(i, &adapter->registered_device_map);
+		}
+	}
+	if (!adapter->registered_device_map) {
+		dev_err(&pdev->dev, "could not register any net devices\n");
+		goto out_free_dev;
+	}
+
+	/* Driver's ready. Reflect it on LEDs */
+	t3_led_ready(adapter);
+
+	if (is_offload(adapter)) {
+		__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
+		cxgb3_adapter_ofld(adapter);
+	}
+
+	/* See what interrupts we'll be using */
+	if (msi > 1 && cxgb_enable_msix(adapter) == 0)
+		adapter->flags |= USING_MSIX;
+	else if (msi > 0 && pci_enable_msi(pdev) == 0)
+		adapter->flags |= USING_MSI;
+
+	err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
+				 &cxgb3_attr_group);
+
+	print_port_info(adapter, ai);
+	return 0;
+
+out_free_dev:
+	iounmap(adapter->regs);
+	for (i = ai->nports - 1; i >= 0; --i)
+		if (adapter->port[i])
+			free_netdev(adapter->port[i]);
+
+out_free_adapter:
+	kfree(adapter);
+
+out_disable_device:
+	pci_disable_device(pdev);
+out_release_regions:
+	pci_release_regions(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (dev) {
+		int i;
+		struct adapter *adapter = dev->priv;
+
+		t3_sge_stop(adapter);
+		sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
+				   &cxgb3_attr_group);
+
+		for_each_port(adapter, i)
+		    if (test_bit(i, &adapter->registered_device_map))
+			unregister_netdev(adapter->port[i]);
+
+		if (is_offload(adapter)) {
+			cxgb3_adapter_unofld(adapter);
+			if (test_bit(OFFLOAD_DEVMAP_BIT,
+				     &adapter->open_device_map))
+				offload_close(&adapter->tdev);
+		}
+
+		t3_free_sge_resources(adapter);
+		cxgb_disable_msi(adapter);
+
+		for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
+			if (adapter->dummy_netdev[i]) {
+				free_netdev(adapter->dummy_netdev[i]);
+				adapter->dummy_netdev[i] = NULL;
+			}
+
+		for_each_port(adapter, i)
+			if (adapter->port[i])
+				free_netdev(adapter->port[i]);
+
+		iounmap(adapter->regs);
+		kfree(adapter);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		pci_set_drvdata(pdev, NULL);
+	}
+}
+
+static struct pci_driver driver = {
+	.name = DRV_NAME,
+	.id_table = cxgb3_pci_tbl,
+	.probe = init_one,
+	.remove = __devexit_p(remove_one),
+};
+
+static int __init cxgb3_init_module(void)
+{
+	int ret;
+
+	cxgb3_offload_init();
+
+	ret = pci_register_driver(&driver);
+	return ret;
+}
+
+static void __exit cxgb3_cleanup_module(void)
+{
+	pci_unregister_driver(&driver);
+	if (cxgb3_wq)
+		destroy_workqueue(cxgb3_wq);
+}
+
+module_init(cxgb3_init_module);
+module_exit(cxgb3_cleanup_module);

+ 1222 - 0
drivers/net/cxgb3/cxgb3_offload.c

@@ -0,0 +1,1222 @@
+/*
+ * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <net/neighbour.h>
+#include <linux/notifier.h>
+#include <asm/atomic.h>
+#include <linux/proc_fs.h>
+#include <linux/if_vlan.h>
+#include <net/netevent.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+
+#include "common.h"
+#include "regs.h"
+#include "cxgb3_ioctl.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "firmware_exports.h"
+#include "cxgb3_offload.h"
+
+static LIST_HEAD(client_list);
+static LIST_HEAD(ofld_dev_list);
+static DEFINE_MUTEX(cxgb3_db_lock);
+
+static DEFINE_RWLOCK(adapter_list_lock);
+static LIST_HEAD(adapter_list);
+
+static const unsigned int MAX_ATIDS = 64 * 1024;
+static const unsigned int ATID_BASE = 0x100000;
+
+static inline int offload_activated(struct t3cdev *tdev)
+{
+	const struct adapter *adapter = tdev2adap(tdev);
+
+	return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
+}
+
+/**
+ *	cxgb3_register_client - register an offload client
+ *	@client: the client
+ *
+ *	Add the client to the client list,
+ *	and call backs the client for each activated offload device
+ */
+void cxgb3_register_client(struct cxgb3_client *client)
+{
+	struct t3cdev *tdev;
+
+	mutex_lock(&cxgb3_db_lock);
+	list_add_tail(&client->client_list, &client_list);
+
+	if (client->add) {
+		list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+			if (offload_activated(tdev))
+				client->add(tdev);
+		}
+	}
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_register_client);
+
+/**
+ *	cxgb3_unregister_client - unregister an offload client
+ *	@client: the client
+ *
+ *	Remove the client to the client list,
+ *	and call backs the client for each activated offload device.
+ */
+void cxgb3_unregister_client(struct cxgb3_client *client)
+{
+	struct t3cdev *tdev;
+
+	mutex_lock(&cxgb3_db_lock);
+	list_del(&client->client_list);
+
+	if (client->remove) {
+		list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+			if (offload_activated(tdev))
+				client->remove(tdev);
+		}
+	}
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_unregister_client);
+
+/**
+ *	cxgb3_add_clients - activate registered clients for an offload device
+ *	@tdev: the offload device
+ *
+ *	Call backs all registered clients once a offload device is activated
+ */
+void cxgb3_add_clients(struct t3cdev *tdev)
+{
+	struct cxgb3_client *client;
+
+	mutex_lock(&cxgb3_db_lock);
+	list_for_each_entry(client, &client_list, client_list) {
+		if (client->add)
+			client->add(tdev);
+	}
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+/**
+ *	cxgb3_remove_clients - deactivates registered clients
+ *			       for an offload device
+ *	@tdev: the offload device
+ *
+ *	Call backs all registered clients once a offload device is deactivated
+ */
+void cxgb3_remove_clients(struct t3cdev *tdev)
+{
+	struct cxgb3_client *client;
+
+	mutex_lock(&cxgb3_db_lock);
+	list_for_each_entry(client, &client_list, client_list) {
+		if (client->remove)
+			client->remove(tdev);
+	}
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+static struct net_device *get_iff_from_mac(struct adapter *adapter,
+					   const unsigned char *mac,
+					   unsigned int vlan)
+{
+	int i;
+
+	for_each_port(adapter, i) {
+		const struct vlan_group *grp;
+		struct net_device *dev = adapter->port[i];
+		const struct port_info *p = netdev_priv(dev);
+
+		if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+			if (vlan && vlan != VLAN_VID_MASK) {
+				grp = p->vlan_grp;
+				dev = grp ? grp->vlan_devices[vlan] : NULL;
+			} else
+				while (dev->master)
+					dev = dev->master;
+			return dev;
+		}
+	}
+	return NULL;
+}
+
+static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
+			      void *data)
+{
+	int ret = 0;
+	struct ulp_iscsi_info *uiip = data;
+
+	switch (req) {
+	case ULP_ISCSI_GET_PARAMS:
+		uiip->pdev = adapter->pdev;
+		uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
+		uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
+		uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
+		/*
+		 * On tx, the iscsi pdu has to be <= tx page size and has to
+		 * fit into the Tx PM FIFO.
+		 */
+		uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
+				     t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
+		/* on rx, the iscsi pdu has to be < rx page size and the
+		   whole pdu + cpl headers has to fit into one sge buffer */
+		uiip->max_rxsz = min_t(unsigned int,
+				       adapter->params.tp.rx_pg_size,
+				       (adapter->sge.qs[0].fl[1].buf_size -
+					sizeof(struct cpl_rx_data) * 2 -
+					sizeof(struct cpl_rx_data_ddp)));
+		break;
+	case ULP_ISCSI_SET_PARAMS:
+		t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+/* Response queue used for RDMA events. */
+#define ASYNC_NOTIF_RSPQ 0
+
+static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
+{
+	int ret = 0;
+
+	switch (req) {
+	case RDMA_GET_PARAMS:{
+		struct rdma_info *req = data;
+		struct pci_dev *pdev = adapter->pdev;
+
+		req->udbell_physbase = pci_resource_start(pdev, 2);
+		req->udbell_len = pci_resource_len(pdev, 2);
+		req->tpt_base =
+			t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
+		req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
+		req->pbl_base =
+			t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
+		req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
+		req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
+		req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
+		req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
+		req->pdev = pdev;
+		break;
+	}
+	case RDMA_CQ_OP:{
+		unsigned long flags;
+		struct rdma_cq_op *req = data;
+
+		/* may be called in any context */
+		spin_lock_irqsave(&adapter->sge.reg_lock, flags);
+		ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
+					req->credits);
+		spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
+		break;
+	}
+	case RDMA_GET_MEM:{
+		struct ch_mem_range *t = data;
+		struct mc7 *mem;
+
+		if ((t->addr & 7) || (t->len & 7))
+			return -EINVAL;
+		if (t->mem_id == MEM_CM)
+			mem = &adapter->cm;
+		else if (t->mem_id == MEM_PMRX)
+			mem = &adapter->pmrx;
+		else if (t->mem_id == MEM_PMTX)
+			mem = &adapter->pmtx;
+		else
+			return -EINVAL;
+
+		ret =
+			t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
+					(u64 *) t->buf);
+		if (ret)
+			return ret;
+		break;
+	}
+	case RDMA_CQ_SETUP:{
+		struct rdma_cq_setup *req = data;
+
+		spin_lock_irq(&adapter->sge.reg_lock);
+		ret =
+			t3_sge_init_cqcntxt(adapter, req->id,
+					req->base_addr, req->size,
+					ASYNC_NOTIF_RSPQ,
+					req->ovfl_mode, req->credits,
+					req->credit_thres);
+		spin_unlock_irq(&adapter->sge.reg_lock);
+		break;
+	}
+	case RDMA_CQ_DISABLE:
+		spin_lock_irq(&adapter->sge.reg_lock);
+		ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
+		spin_unlock_irq(&adapter->sge.reg_lock);
+		break;
+	case RDMA_CTRL_QP_SETUP:{
+		struct rdma_ctrlqp_setup *req = data;
+
+		spin_lock_irq(&adapter->sge.reg_lock);
+		ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
+						SGE_CNTXT_RDMA,
+						ASYNC_NOTIF_RSPQ,
+						req->base_addr, req->size,
+						FW_RI_TID_START, 1, 0);
+		spin_unlock_irq(&adapter->sge.reg_lock);
+		break;
+	}
+	default:
+		ret = -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
+{
+	struct adapter *adapter = tdev2adap(tdev);
+	struct tid_range *tid;
+	struct mtutab *mtup;
+	struct iff_mac *iffmacp;
+	struct ddp_params *ddpp;
+	struct adap_ports *ports;
+	int i;
+
+	switch (req) {
+	case GET_MAX_OUTSTANDING_WR:
+		*(unsigned int *)data = FW_WR_NUM;
+		break;
+	case GET_WR_LEN:
+		*(unsigned int *)data = WR_FLITS;
+		break;
+	case GET_TX_MAX_CHUNK:
+		*(unsigned int *)data = 1 << 20;	/* 1MB */
+		break;
+	case GET_TID_RANGE:
+		tid = data;
+		tid->num = t3_mc5_size(&adapter->mc5) -
+		    adapter->params.mc5.nroutes -
+		    adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
+		tid->base = 0;
+		break;
+	case GET_STID_RANGE:
+		tid = data;
+		tid->num = adapter->params.mc5.nservers;
+		tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
+		    adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
+		break;
+	case GET_L2T_CAPACITY:
+		*(unsigned int *)data = 2048;
+		break;
+	case GET_MTUS:
+		mtup = data;
+		mtup->size = NMTUS;
+		mtup->mtus = adapter->params.mtus;
+		break;
+	case GET_IFF_FROM_MAC:
+		iffmacp = data;
+		iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
+						iffmacp->vlan_tag &
+						VLAN_VID_MASK);
+		break;
+	case GET_DDP_PARAMS:
+		ddpp = data;
+		ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
+		ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
+		ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
+		break;
+	case GET_PORTS:
+		ports = data;
+		ports->nports = adapter->params.nports;
+		for_each_port(adapter, i)
+			ports->lldevs[i] = adapter->port[i];
+		break;
+	case ULP_ISCSI_GET_PARAMS:
+	case ULP_ISCSI_SET_PARAMS:
+		if (!offload_running(adapter))
+			return -EAGAIN;
+		return cxgb_ulp_iscsi_ctl(adapter, req, data);
+	case RDMA_GET_PARAMS:
+	case RDMA_CQ_OP:
+	case RDMA_CQ_SETUP:
+	case RDMA_CQ_DISABLE:
+	case RDMA_CTRL_QP_SETUP:
+	case RDMA_GET_MEM:
+		if (!offload_running(adapter))
+			return -EAGAIN;
+		return cxgb_rdma_ctl(adapter, req, data);
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/*
+ * Dummy handler for Rx offload packets in case we get an offload packet before
+ * proper processing is setup.  This complains and drops the packet as it isn't
+ * normal to get offload packets at this stage.
+ */
+static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
+				int n)
+{
+	CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
+	       n, ntohl(*(u32 *)skbs[0]->data));
+	while (n--)
+		dev_kfree_skb_any(skbs[n]);
+	return 0;
+}
+
+static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+}
+
+void cxgb3_set_dummy_ops(struct t3cdev *dev)
+{
+	dev->recv = rx_offload_blackhole;
+	dev->neigh_update = dummy_neigh_update;
+}
+
+/*
+ * Free an active-open TID.
+ */
+void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
+{
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+	union active_open_entry *p = atid2entry(t, atid);
+	void *ctx = p->t3c_tid.ctx;
+
+	spin_lock_bh(&t->atid_lock);
+	p->next = t->afree;
+	t->afree = p;
+	t->atids_in_use--;
+	spin_unlock_bh(&t->atid_lock);
+
+	return ctx;
+}
+
+EXPORT_SYMBOL(cxgb3_free_atid);
+
+/*
+ * Free a server TID and return it to the free pool.
+ */
+void cxgb3_free_stid(struct t3cdev *tdev, int stid)
+{
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+	union listen_entry *p = stid2entry(t, stid);
+
+	spin_lock_bh(&t->stid_lock);
+	p->next = t->sfree;
+	t->sfree = p;
+	t->stids_in_use--;
+	spin_unlock_bh(&t->stid_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_free_stid);
+
+void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
+		      void *ctx, unsigned int tid)
+{
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+	t->tid_tab[tid].client = client;
+	t->tid_tab[tid].ctx = ctx;
+	atomic_inc(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_insert_tid);
+
+/*
+ * Populate a TID_RELEASE WR.  The skb must be already propely sized.
+ */
+static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
+{
+	struct cpl_tid_release *req;
+
+	skb->priority = CPL_PRIORITY_SETUP;
+	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+static void t3_process_tid_release_list(struct work_struct *work)
+{
+	struct t3c_data *td = container_of(work, struct t3c_data,
+					   tid_release_task);
+	struct sk_buff *skb;
+	struct t3cdev *tdev = td->dev;
+	
+
+	spin_lock_bh(&td->tid_release_lock);
+	while (td->tid_release_list) {
+		struct t3c_tid_entry *p = td->tid_release_list;
+
+		td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
+		spin_unlock_bh(&td->tid_release_lock);
+
+		skb = alloc_skb(sizeof(struct cpl_tid_release),
+				GFP_KERNEL | __GFP_NOFAIL);
+		mk_tid_release(skb, p - td->tid_maps.tid_tab);
+		cxgb3_ofld_send(tdev, skb);
+		p->ctx = NULL;
+		spin_lock_bh(&td->tid_release_lock);
+	}
+	spin_unlock_bh(&td->tid_release_lock);
+}
+
+/* use ctx as a next pointer in the tid release list */
+void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
+{
+	struct t3c_data *td = T3C_DATA(tdev);
+	struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
+
+	spin_lock_bh(&td->tid_release_lock);
+	p->ctx = (void *)td->tid_release_list;
+	td->tid_release_list = p;
+	if (!p->ctx)
+		schedule_work(&td->tid_release_task);
+	spin_unlock_bh(&td->tid_release_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_queue_tid_release);
+
+/*
+ * Remove a tid from the TID table.  A client may defer processing its last
+ * CPL message if it is locked at the time it arrives, and while the message
+ * sits in the client's backlog the TID may be reused for another connection.
+ * To handle this we atomically switch the TID association if it still points
+ * to the original client context.
+ */
+void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
+{
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+	BUG_ON(tid >= t->ntids);
+	if (tdev->type == T3A)
+		(void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
+	else {
+		struct sk_buff *skb;
+
+		skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+		if (likely(skb)) {
+			mk_tid_release(skb, tid);
+			cxgb3_ofld_send(tdev, skb);
+			t->tid_tab[tid].ctx = NULL;
+		} else
+			cxgb3_queue_tid_release(tdev, tid);
+	}
+	atomic_dec(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_remove_tid);
+
+int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
+		     void *ctx)
+{
+	int atid = -1;
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+	spin_lock_bh(&t->atid_lock);
+	if (t->afree) {
+		union active_open_entry *p = t->afree;
+
+		atid = (p - t->atid_tab) + t->atid_base;
+		t->afree = p->next;
+		p->t3c_tid.ctx = ctx;
+		p->t3c_tid.client = client;
+		t->atids_in_use++;
+	}
+	spin_unlock_bh(&t->atid_lock);
+	return atid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_atid);
+
+int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
+		     void *ctx)
+{
+	int stid = -1;
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+	spin_lock_bh(&t->stid_lock);
+	if (t->sfree) {
+		union listen_entry *p = t->sfree;
+
+		stid = (p - t->stid_tab) + t->stid_base;
+		t->sfree = p->next;
+		p->t3c_tid.ctx = ctx;
+		p->t3c_tid.client = client;
+		t->stids_in_use++;
+	}
+	spin_unlock_bh(&t->stid_lock);
+	return stid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_stid);
+
+static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_smt_write_rpl *rpl = cplhdr(skb);
+
+	if (rpl->status != CPL_ERR_NONE)
+		printk(KERN_ERR
+		       "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+		       rpl->status, GET_TID(rpl));
+
+	return CPL_RET_BUF_DONE;
+}
+
+static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
+
+	if (rpl->status != CPL_ERR_NONE)
+		printk(KERN_ERR
+		       "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+		       rpl->status, GET_TID(rpl));
+
+	return CPL_RET_BUF_DONE;
+}
+
+static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_act_open_rpl *rpl = cplhdr(skb);
+	unsigned int atid = G_TID(ntohl(rpl->atid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
+	if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
+		return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
+								    t3c_tid->
+								    ctx);
+	} else {
+		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+		       dev->name, CPL_ACT_OPEN_RPL);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	union opcode_tid *p = cplhdr(skb);
+	unsigned int stid = G_TID(ntohl(p->opcode_tid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
+	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[p->opcode]) {
+		return t3c_tid->client->handlers[p->opcode] (dev, skb,
+							     t3c_tid->ctx);
+	} else {
+		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+		       dev->name, p->opcode);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	union opcode_tid *p = cplhdr(skb);
+	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[p->opcode]) {
+		return t3c_tid->client->handlers[p->opcode]
+		    (dev, skb, t3c_tid->ctx);
+	} else {
+		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+		       dev->name, p->opcode);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_pass_accept_req *req = cplhdr(skb);
+	unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
+	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
+		return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
+		    (dev, skb, t3c_tid->ctx);
+	} else {
+		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+		       dev->name, CPL_PASS_ACCEPT_REQ);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
+{
+	union opcode_tid *p = cplhdr(skb);
+	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[p->opcode]) {
+		return t3c_tid->client->handlers[p->opcode]
+		    (dev, skb, t3c_tid->ctx);
+	} else {
+		struct cpl_abort_req_rss *req = cplhdr(skb);
+		struct cpl_abort_rpl *rpl;
+
+		struct sk_buff *skb =
+		    alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
+		if (!skb) {
+			printk("do_abort_req_rss: couldn't get skb!\n");
+			goto out;
+		}
+		skb->priority = CPL_PRIORITY_DATA;
+		__skb_put(skb, sizeof(struct cpl_abort_rpl));
+		rpl = cplhdr(skb);
+		rpl->wr.wr_hi =
+		    htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+		rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
+		OPCODE_TID(rpl) =
+		    htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
+		rpl->cmd = req->status;
+		cxgb3_ofld_send(dev, skb);
+out:
+		return CPL_RET_BUF_DONE;
+	}
+}
+
+static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_act_establish *req = cplhdr(skb);
+	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
+	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
+		return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
+		    (dev, skb, t3c_tid->ctx);
+	} else {
+		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+		       dev->name, CPL_PASS_ACCEPT_REQ);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
+
+	if (rpl->status != CPL_ERR_NONE)
+		printk(KERN_ERR
+		       "Unexpected SET_TCB_RPL status %u for tid %u\n",
+		       rpl->status, GET_TID(rpl));
+	return CPL_RET_BUF_DONE;
+}
+
+static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_trace_pkt *p = cplhdr(skb);
+
+	skb->protocol = 0xffff;
+	skb->dev = dev->lldev;
+	skb_pull(skb, sizeof(*p));
+	skb->mac.raw = skb->data;
+	netif_receive_skb(skb);
+	return 0;
+}
+
+static int do_term(struct t3cdev *dev, struct sk_buff *skb)
+{
+	unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
+	unsigned int opcode = G_OPCODE(ntohl(skb->csum));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+	if (t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[opcode]) {
+		return t3c_tid->client->handlers[opcode] (dev, skb,
+							  t3c_tid->ctx);
+	} else {
+		printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+		       dev->name, opcode);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int nb_callback(struct notifier_block *self, unsigned long event,
+		       void *ctx)
+{
+	switch (event) {
+	case (NETEVENT_NEIGH_UPDATE):{
+		cxgb_neigh_update((struct neighbour *)ctx);
+		break;
+	}
+	case (NETEVENT_PMTU_UPDATE):
+		break;
+	case (NETEVENT_REDIRECT):{
+		struct netevent_redirect *nr = ctx;
+		cxgb_redirect(nr->old, nr->new);
+		cxgb_neigh_update(nr->new->neighbour);
+		break;
+	}
+	default:
+		break;
+	}
+	return 0;
+}
+
+static struct notifier_block nb = {
+	.notifier_call = nb_callback
+};
+
+/*
+ * Process a received packet with an unknown/unexpected CPL opcode.
+ */
+static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
+	       *skb->data);
+	return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+}
+
+/*
+ * Handlers for each CPL opcode
+ */
+static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
+
+/*
+ * Add a new handler to the CPL dispatch table.  A NULL handler may be supplied
+ * to unregister an existing handler.
+ */
+void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
+{
+	if (opcode < NUM_CPL_CMDS)
+		cpl_handlers[opcode] = h ? h : do_bad_cpl;
+	else
+		printk(KERN_ERR "T3C: handler registration for "
+		       "opcode %x failed\n", opcode);
+}
+
+EXPORT_SYMBOL(t3_register_cpl_handler);
+
+/*
+ * T3CDEV's receive method.
+ */
+int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
+{
+	while (n--) {
+		struct sk_buff *skb = *skbs++;
+		unsigned int opcode = G_OPCODE(ntohl(skb->csum));
+		int ret = cpl_handlers[opcode] (dev, skb);
+
+#if VALIDATE_TID
+		if (ret & CPL_RET_UNKNOWN_TID) {
+			union opcode_tid *p = cplhdr(skb);
+
+			printk(KERN_ERR "%s: CPL message (opcode %u) had "
+			       "unknown TID %u\n", dev->name, opcode,
+			       G_TID(ntohl(p->opcode_tid)));
+		}
+#endif
+		if (ret & CPL_RET_BUF_DONE)
+			kfree_skb(skb);
+	}
+	return 0;
+}
+
+/*
+ * Sends an sk_buff to a T3C driver after dealing with any active network taps.
+ */
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
+{
+	int r;
+
+	local_bh_disable();
+	r = dev->send(dev, skb);
+	local_bh_enable();
+	return r;
+}
+
+EXPORT_SYMBOL(cxgb3_ofld_send);
+
+static int is_offloading(struct net_device *dev)
+{
+	struct adapter *adapter;
+	int i;
+
+	read_lock_bh(&adapter_list_lock);
+	list_for_each_entry(adapter, &adapter_list, adapter_list) {
+		for_each_port(adapter, i) {
+			if (dev == adapter->port[i]) {
+				read_unlock_bh(&adapter_list_lock);
+				return 1;
+			}
+		}
+	}
+	read_unlock_bh(&adapter_list_lock);
+	return 0;
+}
+
+void cxgb_neigh_update(struct neighbour *neigh)
+{
+	struct net_device *dev = neigh->dev;
+
+	if (dev && (is_offloading(dev))) {
+		struct t3cdev *tdev = T3CDEV(dev);
+
+		BUG_ON(!tdev);
+		t3_l2t_update(tdev, neigh);
+	}
+}
+
+static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
+{
+	struct sk_buff *skb;
+	struct cpl_set_tcb_field *req;
+
+	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+	if (!skb) {
+		printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
+		return;
+	}
+	skb->priority = CPL_PRIORITY_CONTROL;
+	req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+	req->reply = 0;
+	req->cpu_idx = 0;
+	req->word = htons(W_TCB_L2T_IX);
+	req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
+	req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
+	tdev->send(tdev, skb);
+}
+
+void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
+{
+	struct net_device *olddev, *newdev;
+	struct tid_info *ti;
+	struct t3cdev *tdev;
+	u32 tid;
+	int update_tcb;
+	struct l2t_entry *e;
+	struct t3c_tid_entry *te;
+
+	olddev = old->neighbour->dev;
+	newdev = new->neighbour->dev;
+	if (!is_offloading(olddev))
+		return;
+	if (!is_offloading(newdev)) {
+		printk(KERN_WARNING "%s: Redirect to non-offload"
+		       "device ignored.\n", __FUNCTION__);
+		return;
+	}
+	tdev = T3CDEV(olddev);
+	BUG_ON(!tdev);
+	if (tdev != T3CDEV(newdev)) {
+		printk(KERN_WARNING "%s: Redirect to different "
+		       "offload device ignored.\n", __FUNCTION__);
+		return;
+	}
+
+	/* Add new L2T entry */
+	e = t3_l2t_get(tdev, new->neighbour, newdev);
+	if (!e) {
+		printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
+		       __FUNCTION__);
+		return;
+	}
+
+	/* Walk tid table and notify clients of dst change. */
+	ti = &(T3C_DATA(tdev))->tid_maps;
+	for (tid = 0; tid < ti->ntids; tid++) {
+		te = lookup_tid(ti, tid);
+		BUG_ON(!te);
+		if (te->ctx && te->client && te->client->redirect) {
+			update_tcb = te->client->redirect(te->ctx, old, new, e);
+			if (update_tcb) {
+				l2t_hold(L2DATA(tdev), e);
+				set_l2t_ix(tdev, tid, e);
+			}
+		}
+	}
+	l2t_release(L2DATA(tdev), e);
+}
+
+/*
+ * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+ * The allocated memory is cleared.
+ */
+void *cxgb_alloc_mem(unsigned long size)
+{
+	void *p = kmalloc(size, GFP_KERNEL);
+
+	if (!p)
+		p = vmalloc(size);
+	if (p)
+		memset(p, 0, size);
+	return p;
+}
+
+/*
+ * Free memory allocated through t3_alloc_mem().
+ */
+void cxgb_free_mem(void *addr)
+{
+	unsigned long p = (unsigned long)addr;
+
+	if (p >= VMALLOC_START && p < VMALLOC_END)
+		vfree(addr);
+	else
+		kfree(addr);
+}
+
+/*
+ * Allocate and initialize the TID tables.  Returns 0 on success.
+ */
+static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
+			 unsigned int natids, unsigned int nstids,
+			 unsigned int atid_base, unsigned int stid_base)
+{
+	unsigned long size = ntids * sizeof(*t->tid_tab) +
+	    natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
+
+	t->tid_tab = cxgb_alloc_mem(size);
+	if (!t->tid_tab)
+		return -ENOMEM;
+
+	t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
+	t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
+	t->ntids = ntids;
+	t->nstids = nstids;
+	t->stid_base = stid_base;
+	t->sfree = NULL;
+	t->natids = natids;
+	t->atid_base = atid_base;
+	t->afree = NULL;
+	t->stids_in_use = t->atids_in_use = 0;
+	atomic_set(&t->tids_in_use, 0);
+	spin_lock_init(&t->stid_lock);
+	spin_lock_init(&t->atid_lock);
+
+	/*
+	 * Setup the free lists for stid_tab and atid_tab.
+	 */
+	if (nstids) {
+		while (--nstids)
+			t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
+		t->sfree = t->stid_tab;
+	}
+	if (natids) {
+		while (--natids)
+			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+		t->afree = t->atid_tab;
+	}
+	return 0;
+}
+
+static void free_tid_maps(struct tid_info *t)
+{
+	cxgb_free_mem(t->tid_tab);
+}
+
+static inline void add_adapter(struct adapter *adap)
+{
+	write_lock_bh(&adapter_list_lock);
+	list_add_tail(&adap->adapter_list, &adapter_list);
+	write_unlock_bh(&adapter_list_lock);
+}
+
+static inline void remove_adapter(struct adapter *adap)
+{
+	write_lock_bh(&adapter_list_lock);
+	list_del(&adap->adapter_list);
+	write_unlock_bh(&adapter_list_lock);
+}
+
+int cxgb3_offload_activate(struct adapter *adapter)
+{
+	struct t3cdev *dev = &adapter->tdev;
+	int natids, err;
+	struct t3c_data *t;
+	struct tid_range stid_range, tid_range;
+	struct mtutab mtutab;
+	unsigned int l2t_capacity;
+
+	t = kcalloc(1, sizeof(*t), GFP_KERNEL);
+	if (!t)
+		return -ENOMEM;
+
+	err = -EOPNOTSUPP;
+	if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
+	    dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
+	    dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
+	    dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
+	    dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
+	    dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
+		goto out_free;
+
+	err = -ENOMEM;
+	L2DATA(dev) = t3_init_l2t(l2t_capacity);
+	if (!L2DATA(dev))
+		goto out_free;
+
+	natids = min(tid_range.num / 2, MAX_ATIDS);
+	err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
+			    stid_range.num, ATID_BASE, stid_range.base);
+	if (err)
+		goto out_free_l2t;
+
+	t->mtus = mtutab.mtus;
+	t->nmtus = mtutab.size;
+
+	INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
+	spin_lock_init(&t->tid_release_lock);
+	INIT_LIST_HEAD(&t->list_node);
+	t->dev = dev;
+
+	T3C_DATA(dev) = t;
+	dev->recv = process_rx;
+	dev->neigh_update = t3_l2t_update;
+
+	/* Register netevent handler once */
+	if (list_empty(&adapter_list))
+		register_netevent_notifier(&nb);
+
+	add_adapter(adapter);
+	return 0;
+
+out_free_l2t:
+	t3_free_l2t(L2DATA(dev));
+	L2DATA(dev) = NULL;
+out_free:
+	kfree(t);
+	return err;
+}
+
+void cxgb3_offload_deactivate(struct adapter *adapter)
+{
+	struct t3cdev *tdev = &adapter->tdev;
+	struct t3c_data *t = T3C_DATA(tdev);
+
+	remove_adapter(adapter);
+	if (list_empty(&adapter_list))
+		unregister_netevent_notifier(&nb);
+
+	free_tid_maps(&t->tid_maps);
+	T3C_DATA(tdev) = NULL;
+	t3_free_l2t(L2DATA(tdev));
+	L2DATA(tdev) = NULL;
+	kfree(t);
+}
+
+static inline void register_tdev(struct t3cdev *tdev)
+{
+	static int unit;
+
+	mutex_lock(&cxgb3_db_lock);
+	snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
+	list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+static inline void unregister_tdev(struct t3cdev *tdev)
+{
+	mutex_lock(&cxgb3_db_lock);
+	list_del(&tdev->ofld_dev_list);
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
+{
+	struct t3cdev *tdev = &adapter->tdev;
+
+	INIT_LIST_HEAD(&tdev->ofld_dev_list);
+
+	cxgb3_set_dummy_ops(tdev);
+	tdev->send = t3_offload_tx;
+	tdev->ctl = cxgb_offload_ctl;
+	tdev->type = adapter->params.rev == 0 ? T3A : T3B;
+
+	register_tdev(tdev);
+}
+
+void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
+{
+	struct t3cdev *tdev = &adapter->tdev;
+
+	tdev->recv = NULL;
+	tdev->neigh_update = NULL;
+
+	unregister_tdev(tdev);
+}
+
+void __init cxgb3_offload_init(void)
+{
+	int i;
+
+	for (i = 0; i < NUM_CPL_CMDS; ++i)
+		cpl_handlers[i] = do_bad_cpl;
+
+	t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
+	t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
+	t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
+	t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
+	t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
+	t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
+	t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
+	t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
+	t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
+	t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
+	t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
+	t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
+}

+ 193 - 0
drivers/net/cxgb3/cxgb3_offload.h

@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CXGB3_OFFLOAD_H
+#define _CXGB3_OFFLOAD_H
+
+#include <linux/list.h>
+#include <linux/skbuff.h>
+
+#include "l2t.h"
+
+#include "t3cdev.h"
+#include "t3_cpl.h"
+
+struct adapter;
+
+void cxgb3_offload_init(void);
+
+void cxgb3_adapter_ofld(struct adapter *adapter);
+void cxgb3_adapter_unofld(struct adapter *adapter);
+int cxgb3_offload_activate(struct adapter *adapter);
+void cxgb3_offload_deactivate(struct adapter *adapter);
+
+void cxgb3_set_dummy_ops(struct t3cdev *dev);
+
+/*
+ * Client registration.  Users of T3 driver must register themselves.
+ * The T3 driver will call the add function of every client for each T3
+ * adapter activated, passing up the t3cdev ptr.  Each client fills out an
+ * array of callback functions to process CPL messages.
+ */
+
+void cxgb3_register_client(struct cxgb3_client *client);
+void cxgb3_unregister_client(struct cxgb3_client *client);
+void cxgb3_add_clients(struct t3cdev *tdev);
+void cxgb3_remove_clients(struct t3cdev *tdev);
+
+typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
+				      struct sk_buff *skb, void *ctx);
+
+struct cxgb3_client {
+	char *name;
+	void (*add) (struct t3cdev *);
+	void (*remove) (struct t3cdev *);
+	cxgb3_cpl_handler_func *handlers;
+	int (*redirect)(void *ctx, struct dst_entry *old,
+			struct dst_entry *new, struct l2t_entry *l2t);
+	struct list_head client_list;
+};
+
+/*
+ * TID allocation services.
+ */
+int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
+		     void *ctx);
+int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
+		     void *ctx);
+void *cxgb3_free_atid(struct t3cdev *dev, int atid);
+void cxgb3_free_stid(struct t3cdev *dev, int stid);
+void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
+		      void *ctx, unsigned int tid);
+void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
+void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
+
+struct t3c_tid_entry {
+	struct cxgb3_client *client;
+	void *ctx;
+};
+
+/* CPL message priority levels */
+enum {
+	CPL_PRIORITY_DATA = 0,	/* data messages */
+	CPL_PRIORITY_SETUP = 1,	/* connection setup messages */
+	CPL_PRIORITY_TEARDOWN = 0,	/* connection teardown messages */
+	CPL_PRIORITY_LISTEN = 1,	/* listen start/stop messages */
+	CPL_PRIORITY_ACK = 1,	/* RX ACK messages */
+	CPL_PRIORITY_CONTROL = 1	/* offload control messages */
+};
+
+/* Flags for return value of CPL message handlers */
+enum {
+	CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
+	CPL_RET_BAD_MSG = 2,  /* bad CPL message (e.g., unknown opcode) */
+	CPL_RET_UNKNOWN_TID = 4	/* unexpected unknown TID */
+};
+
+typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
+
+/*
+ * Returns a pointer to the first byte of the CPL header in an sk_buff that
+ * contains a CPL message.
+ */
+static inline void *cplhdr(struct sk_buff *skb)
+{
+	return skb->data;
+}
+
+void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
+
+union listen_entry {
+	struct t3c_tid_entry t3c_tid;
+	union listen_entry *next;
+};
+
+union active_open_entry {
+	struct t3c_tid_entry t3c_tid;
+	union active_open_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of the TID, server TID,
+ * and active-open TID tables for a offload device.
+ * The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+	struct t3c_tid_entry *tid_tab;
+	unsigned int ntids;
+	atomic_t tids_in_use;
+
+	union listen_entry *stid_tab;
+	unsigned int nstids;
+	unsigned int stid_base;
+
+	union active_open_entry *atid_tab;
+	unsigned int natids;
+	unsigned int atid_base;
+
+	/*
+	 * The following members are accessed R/W so we put them in their own
+	 * cache lines.
+	 *
+	 * XXX We could combine the atid fields above with the lock here since
+	 * atids are use once (unlike other tids).  OTOH the above fields are
+	 * usually in cache due to tid_tab.
+	 */
+	spinlock_t atid_lock ____cacheline_aligned_in_smp;
+	union active_open_entry *afree;
+	unsigned int atids_in_use;
+
+	spinlock_t stid_lock ____cacheline_aligned;
+	union listen_entry *sfree;
+	unsigned int stids_in_use;
+};
+
+struct t3c_data {
+	struct list_head list_node;
+	struct t3cdev *dev;
+	unsigned int tx_max_chunk;	/* max payload for TX_DATA */
+	unsigned int max_wrs;	/* max in-flight WRs per connection */
+	unsigned int nmtus;
+	const unsigned short *mtus;
+	struct tid_info tid_maps;
+
+	struct t3c_tid_entry *tid_release_list;
+	spinlock_t tid_release_lock;
+	struct work_struct tid_release_task;
+};
+
+/*
+ * t3cdev -> t3c_data accessor
+ */
+#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
+
+#endif

+ 177 - 0
drivers/net/cxgb3/firmware_exports.h

@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FIRMWARE_EXPORTS_H_
+#define _FIRMWARE_EXPORTS_H_
+
+/* WR OPCODES supported by the firmware.
+ */
+#define	FW_WROPCODE_FORWARD			0x01
+#define FW_WROPCODE_BYPASS			0x05
+
+#define FW_WROPCODE_TUNNEL_TX_PKT		0x03
+
+#define FW_WROPOCDE_ULPTX_DATA_SGL		0x00
+#define FW_WROPCODE_ULPTX_MEM_READ		0x02
+#define FW_WROPCODE_ULPTX_PKT			0x04
+#define FW_WROPCODE_ULPTX_INVALIDATE		0x06
+
+#define FW_WROPCODE_TUNNEL_RX_PKT		0x07
+
+#define FW_WROPCODE_OFLD_GETTCB_RPL		0x08
+#define FW_WROPCODE_OFLD_CLOSE_CON		0x09
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ	0x0A
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL	0x0F
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ	0x0B
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL	0x0C
+#define FW_WROPCODE_OFLD_TX_DATA		0x0D
+#define FW_WROPCODE_OFLD_TX_DATA_ACK		0x0E
+
+#define FW_WROPCODE_RI_RDMA_INIT		0x10
+#define FW_WROPCODE_RI_RDMA_WRITE		0x11
+#define FW_WROPCODE_RI_RDMA_READ_REQ		0x12
+#define FW_WROPCODE_RI_RDMA_READ_RESP		0x13
+#define FW_WROPCODE_RI_SEND			0x14
+#define FW_WROPCODE_RI_TERMINATE		0x15
+#define FW_WROPCODE_RI_RDMA_READ		0x16
+#define FW_WROPCODE_RI_RECEIVE			0x17
+#define FW_WROPCODE_RI_BIND_MW			0x18
+#define FW_WROPCODE_RI_FASTREGISTER_MR		0x19
+#define FW_WROPCODE_RI_LOCAL_INV		0x1A
+#define FW_WROPCODE_RI_MODIFY_QP		0x1B
+#define FW_WROPCODE_RI_BYPASS			0x1C
+
+#define FW_WROPOCDE_RSVD			0x1E
+
+#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR	0x1F
+
+#define FW_WROPCODE_MNGT			0x1D
+#define FW_MNGTOPCODE_PKTSCHED_SET		0x00
+
+/* Maximum size of a WR sent from the host, limited by the SGE. 
+ *
+ * Note: WR coming from ULP or TP are only limited by CIM. 
+ */
+#define FW_WR_SIZE			128
+
+/* Maximum number of outstanding WRs sent from the host. Value must be
+ * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by 
+ * offload modules to limit the number of WRs per connection.
+ */
+#define FW_T3_WR_NUM			16
+#define FW_N3_WR_NUM			7
+
+#ifndef N3
+# define FW_WR_NUM			FW_T3_WR_NUM
+#else
+# define FW_WR_NUM			FW_N3_WR_NUM
+#endif
+
+/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
+ * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
+ * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
+ *
+ * Ingress Traffic (e.g. DMA completion credit)  for TUNNEL Queue[i] is sent 
+ * to RESP Queue[i].
+ */
+#define FW_TUNNEL_NUM			8
+#define FW_TUNNEL_SGEEC_START		8
+#define FW_TUNNEL_TID_START		65544
+
+/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
+ * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
+ * (or 'uP Token') FW_CTRL_TID_START.
+ *
+ * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_CTRL_NUM			8
+#define FW_CTRL_SGEEC_START		65528
+#define FW_CTRL_TID_START		65536
+
+/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These 
+ * queues must start at SGE Egress Context FW_OFLD_SGEEC_START. 
+ * 
+ * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for 
+ * OFFLOAD Queues, as the host is responsible for providing the correct TID in
+ * every WR.
+ *
+ * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_OFLD_NUM			8
+#define FW_OFLD_SGEEC_START		0
+
+/*
+ * 
+ */
+#define FW_RI_NUM			1
+#define FW_RI_SGEEC_START		65527
+#define FW_RI_TID_START			65552
+
+/*
+ * The RX_PKT_TID 
+ */
+#define FW_RX_PKT_NUM			1
+#define FW_RX_PKT_TID_START		65553
+
+/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
+ * by the firmware.
+ */
+#define FW_WRC_NUM			\
+    (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
+
+/*
+ * FW type and version.
+ */
+#define S_FW_VERSION_TYPE		28
+#define M_FW_VERSION_TYPE		0xF
+#define V_FW_VERSION_TYPE(x)		((x) << S_FW_VERSION_TYPE)
+#define G_FW_VERSION_TYPE(x)		\
+    (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
+
+#define S_FW_VERSION_MAJOR		16
+#define M_FW_VERSION_MAJOR		0xFFF
+#define V_FW_VERSION_MAJOR(x)		((x) << S_FW_VERSION_MAJOR)
+#define G_FW_VERSION_MAJOR(x)		\
+    (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
+
+#define S_FW_VERSION_MINOR		8
+#define M_FW_VERSION_MINOR		0xFF
+#define V_FW_VERSION_MINOR(x)		((x) << S_FW_VERSION_MINOR)
+#define G_FW_VERSION_MINOR(x)		\
+    (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
+
+#define S_FW_VERSION_MICRO		0
+#define M_FW_VERSION_MICRO		0xFF
+#define V_FW_VERSION_MICRO(x)		((x) << S_FW_VERSION_MICRO)
+#define G_FW_VERSION_MICRO(x)		\
+    (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
+
+#endif				/* _FIRMWARE_EXPORTS_H_ */

+ 450 - 0
drivers/net/cxgb3/l2t.c

@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <net/neighbour.h>
+#include "common.h"
+#include "t3cdev.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+#define VLAN_NONE 0xfff
+
+/*
+ * Module locking notes:  There is a RW lock protecting the L2 table as a
+ * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
+ * under the protection of the table lock, individual entry changes happen
+ * while holding that entry's spinlock.  The table lock nests outside the
+ * entry locks.  Allocations of new entries take the table lock as writers so
+ * no other lookups can happen while allocating new entries.  Entry updates
+ * take the table lock as readers so multiple entries can be updated in
+ * parallel.  An L2T entry can be dropped by decrementing its reference count
+ * and therefore can happen in parallel with entry allocation but no entry
+ * can change state or increment its ref count during allocation as both of
+ * these perform lookups.
+ */
+
+static inline unsigned int vlan_prio(const struct l2t_entry *e)
+{
+	return e->vlan >> 13;
+}
+
+static inline unsigned int arp_hash(u32 key, int ifindex,
+				    const struct l2t_data *d)
+{
+	return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
+}
+
+static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
+{
+	neigh_hold(n);
+	if (e->neigh)
+		neigh_release(e->neigh);
+	e->neigh = n;
+}
+
+/*
+ * Set up an L2T entry and send any packets waiting in the arp queue.  The
+ * supplied skb is used for the CPL_L2T_WRITE_REQ.  Must be called with the
+ * entry locked.
+ */
+static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
+				  struct l2t_entry *e)
+{
+	struct cpl_l2t_write_req *req;
+
+	if (!skb) {
+		skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+		if (!skb)
+			return -ENOMEM;
+	}
+
+	req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
+	req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
+			    V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
+			    V_L2T_W_PRIO(vlan_prio(e)));
+	memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
+	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+	skb->priority = CPL_PRIORITY_CONTROL;
+	cxgb3_ofld_send(dev, skb);
+	while (e->arpq_head) {
+		skb = e->arpq_head;
+		e->arpq_head = skb->next;
+		skb->next = NULL;
+		cxgb3_ofld_send(dev, skb);
+	}
+	e->arpq_tail = NULL;
+	e->state = L2T_STATE_VALID;
+
+	return 0;
+}
+
+/*
+ * Add a packet to the an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
+{
+	skb->next = NULL;
+	if (e->arpq_head)
+		e->arpq_tail->next = skb;
+	else
+		e->arpq_head = skb;
+	e->arpq_tail = skb;
+}
+
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+		     struct l2t_entry *e)
+{
+again:
+	switch (e->state) {
+	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
+		neigh_event_send(e->neigh, NULL);
+		spin_lock_bh(&e->lock);
+		if (e->state == L2T_STATE_STALE)
+			e->state = L2T_STATE_VALID;
+		spin_unlock_bh(&e->lock);
+	case L2T_STATE_VALID:	/* fast-path, send the packet on */
+		return cxgb3_ofld_send(dev, skb);
+	case L2T_STATE_RESOLVING:
+		spin_lock_bh(&e->lock);
+		if (e->state != L2T_STATE_RESOLVING) {
+			/* ARP already completed */
+			spin_unlock_bh(&e->lock);
+			goto again;
+		}
+		arpq_enqueue(e, skb);
+		spin_unlock_bh(&e->lock);
+
+		/*
+		 * Only the first packet added to the arpq should kick off
+		 * resolution.  However, because the alloc_skb below can fail,
+		 * we allow each packet added to the arpq to retry resolution
+		 * as a way of recovering from transient memory exhaustion.
+		 * A better way would be to use a work request to retry L2T
+		 * entries when there's no memory.
+		 */
+		if (!neigh_event_send(e->neigh, NULL)) {
+			skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
+					GFP_ATOMIC);
+			if (!skb)
+				break;
+
+			spin_lock_bh(&e->lock);
+			if (e->arpq_head)
+				setup_l2e_send_pending(dev, skb, e);
+			else	/* we lost the race */
+				__kfree_skb(skb);
+			spin_unlock_bh(&e->lock);
+		}
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(t3_l2t_send_slow);
+
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
+{
+again:
+	switch (e->state) {
+	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
+		neigh_event_send(e->neigh, NULL);
+		spin_lock_bh(&e->lock);
+		if (e->state == L2T_STATE_STALE) {
+			e->state = L2T_STATE_VALID;
+		}
+		spin_unlock_bh(&e->lock);
+		return;
+	case L2T_STATE_VALID:	/* fast-path, send the packet on */
+		return;
+	case L2T_STATE_RESOLVING:
+		spin_lock_bh(&e->lock);
+		if (e->state != L2T_STATE_RESOLVING) {
+			/* ARP already completed */
+			spin_unlock_bh(&e->lock);
+			goto again;
+		}
+		spin_unlock_bh(&e->lock);
+
+		/*
+		 * Only the first packet added to the arpq should kick off
+		 * resolution.  However, because the alloc_skb below can fail,
+		 * we allow each packet added to the arpq to retry resolution
+		 * as a way of recovering from transient memory exhaustion.
+		 * A better way would be to use a work request to retry L2T
+		 * entries when there's no memory.
+		 */
+		neigh_event_send(e->neigh, NULL);
+	}
+	return;
+}
+
+EXPORT_SYMBOL(t3_l2t_send_event);
+
+/*
+ * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *alloc_l2e(struct l2t_data *d)
+{
+	struct l2t_entry *end, *e, **p;
+
+	if (!atomic_read(&d->nfree))
+		return NULL;
+
+	/* there's definitely a free entry */
+	for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
+		if (atomic_read(&e->refcnt) == 0)
+			goto found;
+
+	for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
+found:
+	d->rover = e + 1;
+	atomic_dec(&d->nfree);
+
+	/*
+	 * The entry we found may be an inactive entry that is
+	 * presently in the hash table.  We need to remove it.
+	 */
+	if (e->state != L2T_STATE_UNUSED) {
+		int hash = arp_hash(e->addr, e->ifindex, d);
+
+		for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
+			if (*p == e) {
+				*p = e->next;
+				break;
+			}
+		e->state = L2T_STATE_UNUSED;
+	}
+	return e;
+}
+
+/*
+ * Called when an L2T entry has no more users.  The entry is left in the hash
+ * table since it is likely to be reused but we also bump nfree to indicate
+ * that the entry can be reallocated for a different neighbor.  We also drop
+ * the existing neighbor reference in case the neighbor is going away and is
+ * waiting on our reference.
+ *
+ * Because entries can be reallocated to other neighbors once their ref count
+ * drops to 0 we need to take the entry's lock to avoid races with a new
+ * incarnation.
+ */
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
+{
+	spin_lock_bh(&e->lock);
+	if (atomic_read(&e->refcnt) == 0) {	/* hasn't been recycled */
+		if (e->neigh) {
+			neigh_release(e->neigh);
+			e->neigh = NULL;
+		}
+	}
+	spin_unlock_bh(&e->lock);
+	atomic_inc(&d->nfree);
+}
+
+EXPORT_SYMBOL(t3_l2e_free);
+
+/*
+ * Update an L2T entry that was previously used for the same next hop as neigh.
+ * Must be called with softirqs disabled.
+ */
+static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
+{
+	unsigned int nud_state;
+
+	spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
+
+	if (neigh != e->neigh)
+		neigh_replace(e, neigh);
+	nud_state = neigh->nud_state;
+	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
+	    !(nud_state & NUD_VALID))
+		e->state = L2T_STATE_RESOLVING;
+	else if (nud_state & NUD_CONNECTED)
+		e->state = L2T_STATE_VALID;
+	else
+		e->state = L2T_STATE_STALE;
+	spin_unlock(&e->lock);
+}
+
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+			     struct net_device *dev)
+{
+	struct l2t_entry *e;
+	struct l2t_data *d = L2DATA(cdev);
+	u32 addr = *(u32 *) neigh->primary_key;
+	int ifidx = neigh->dev->ifindex;
+	int hash = arp_hash(addr, ifidx, d);
+	struct port_info *p = netdev_priv(dev);
+	int smt_idx = p->port_id;
+
+	write_lock_bh(&d->lock);
+	for (e = d->l2tab[hash].first; e; e = e->next)
+		if (e->addr == addr && e->ifindex == ifidx &&
+		    e->smt_idx == smt_idx) {
+			l2t_hold(d, e);
+			if (atomic_read(&e->refcnt) == 1)
+				reuse_entry(e, neigh);
+			goto done;
+		}
+
+	/* Need to allocate a new entry */
+	e = alloc_l2e(d);
+	if (e) {
+		spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
+		e->next = d->l2tab[hash].first;
+		d->l2tab[hash].first = e;
+		e->state = L2T_STATE_RESOLVING;
+		e->addr = addr;
+		e->ifindex = ifidx;
+		e->smt_idx = smt_idx;
+		atomic_set(&e->refcnt, 1);
+		neigh_replace(e, neigh);
+		if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+			e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
+		else
+			e->vlan = VLAN_NONE;
+		spin_unlock(&e->lock);
+	}
+done:
+	write_unlock_bh(&d->lock);
+	return e;
+}
+
+EXPORT_SYMBOL(t3_l2t_get);
+
+/*
+ * Called when address resolution fails for an L2T entry to handle packets
+ * on the arpq head.  If a packet specifies a failure handler it is invoked,
+ * otherwise the packets is sent to the offload device.
+ *
+ * XXX: maybe we should abandon the latter behavior and just require a failure
+ * handler.
+ */
+static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
+{
+	while (arpq) {
+		struct sk_buff *skb = arpq;
+		struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+		arpq = skb->next;
+		skb->next = NULL;
+		if (cb->arp_failure_handler)
+			cb->arp_failure_handler(dev, skb);
+		else
+			cxgb3_ofld_send(dev, skb);
+	}
+}
+
+/*
+ * Called when the host's ARP layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+	struct l2t_entry *e;
+	struct sk_buff *arpq = NULL;
+	struct l2t_data *d = L2DATA(dev);
+	u32 addr = *(u32 *) neigh->primary_key;
+	int ifidx = neigh->dev->ifindex;
+	int hash = arp_hash(addr, ifidx, d);
+
+	read_lock_bh(&d->lock);
+	for (e = d->l2tab[hash].first; e; e = e->next)
+		if (e->addr == addr && e->ifindex == ifidx) {
+			spin_lock(&e->lock);
+			goto found;
+		}
+	read_unlock_bh(&d->lock);
+	return;
+
+found:
+	read_unlock(&d->lock);
+	if (atomic_read(&e->refcnt)) {
+		if (neigh != e->neigh)
+			neigh_replace(e, neigh);
+
+		if (e->state == L2T_STATE_RESOLVING) {
+			if (neigh->nud_state & NUD_FAILED) {
+				arpq = e->arpq_head;
+				e->arpq_head = e->arpq_tail = NULL;
+			} else if (neigh_is_connected(neigh))
+				setup_l2e_send_pending(dev, NULL, e);
+		} else {
+			e->state = neigh_is_connected(neigh) ?
+			    L2T_STATE_VALID : L2T_STATE_STALE;
+			if (memcmp(e->dmac, neigh->ha, 6))
+				setup_l2e_send_pending(dev, NULL, e);
+		}
+	}
+	spin_unlock_bh(&e->lock);
+
+	if (arpq)
+		handle_failed_resolution(dev, arpq);
+}
+
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
+{
+	struct l2t_data *d;
+	int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
+
+	d = cxgb_alloc_mem(size);
+	if (!d)
+		return NULL;
+
+	d->nentries = l2t_capacity;
+	d->rover = &d->l2tab[1];	/* entry 0 is not used */
+	atomic_set(&d->nfree, l2t_capacity - 1);
+	rwlock_init(&d->lock);
+
+	for (i = 0; i < l2t_capacity; ++i) {
+		d->l2tab[i].idx = i;
+		d->l2tab[i].state = L2T_STATE_UNUSED;
+		spin_lock_init(&d->l2tab[i].lock);
+		atomic_set(&d->l2tab[i].refcnt, 0);
+	}
+	return d;
+}
+
+void t3_free_l2t(struct l2t_data *d)
+{
+	cxgb_free_mem(d);
+}
+

+ 143 - 0
drivers/net/cxgb3/l2t.h

@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CHELSIO_L2T_H
+#define _CHELSIO_L2T_H
+
+#include <linux/spinlock.h>
+#include "t3cdev.h"
+#include <asm/atomic.h>
+
+enum {
+	L2T_STATE_VALID,	/* entry is up to date */
+	L2T_STATE_STALE,	/* entry may be used but needs revalidation */
+	L2T_STATE_RESOLVING,	/* entry needs address resolution */
+	L2T_STATE_UNUSED	/* entry not in use */
+};
+
+struct neighbour;
+struct sk_buff;
+
+/*
+ * Each L2T entry plays multiple roles.  First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution.  Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer.  Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+	u16 state;		/* entry state */
+	u16 idx;		/* entry index */
+	u32 addr;		/* dest IP address */
+	int ifindex;		/* neighbor's net_device's ifindex */
+	u16 smt_idx;		/* SMT index */
+	u16 vlan;		/* VLAN TCI (id: bits 0-11, prio: 13-15 */
+	struct neighbour *neigh;	/* associated neighbour */
+	struct l2t_entry *first;	/* start of hash chain */
+	struct l2t_entry *next;	/* next l2t_entry on chain */
+	struct sk_buff *arpq_head;	/* queue of packets awaiting resolution */
+	struct sk_buff *arpq_tail;
+	spinlock_t lock;
+	atomic_t refcnt;	/* entry reference count */
+	u8 dmac[6];		/* neighbour's MAC address */
+};
+
+struct l2t_data {
+	unsigned int nentries;	/* number of entries */
+	struct l2t_entry *rover;	/* starting point for next allocation */
+	atomic_t nfree;		/* number of free entries */
+	rwlock_t lock;
+	struct l2t_entry l2tab[0];
+};
+
+typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
+					 struct sk_buff * skb);
+
+/*
+ * Callback stored in an skb to handle address resolution failure.
+ */
+struct l2t_skb_cb {
+	arp_failure_handler_func arp_failure_handler;
+};
+
+#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+static inline void set_arp_failure_handler(struct sk_buff *skb,
+					   arp_failure_handler_func hnd)
+{
+	L2T_SKB_CB(skb)->arp_failure_handler = hnd;
+}
+
+/*
+ * Getting to the L2 data from an offload device.
+ */
+#define L2DATA(dev) ((dev)->l2opt)
+
+#define W_TCB_L2T_IX    0
+#define S_TCB_L2T_IX    7
+#define M_TCB_L2T_IX    0x7ffULL
+#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
+
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+			     struct net_device *dev);
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+		     struct l2t_entry *e);
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
+void t3_free_l2t(struct l2t_data *d);
+
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
+
+static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
+			   struct l2t_entry *e)
+{
+	if (likely(e->state == L2T_STATE_VALID))
+		return cxgb3_ofld_send(dev, skb);
+	return t3_l2t_send_slow(dev, skb, e);
+}
+
+static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
+{
+	if (atomic_dec_and_test(&e->refcnt))
+		t3_l2e_free(d, e);
+}
+
+static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+	if (atomic_add_return(1, &e->refcnt) == 1)	/* 0 -> 1 transition */
+		atomic_dec(&d->nfree);
+}
+
+#endif

+ 473 - 0
drivers/net/cxgb3/mc5.c

@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+enum {
+	IDT75P52100 = 4,
+	IDT75N43102 = 5
+};
+
+/* DBGI command mode */
+enum {
+	DBGI_MODE_MBUS = 0,
+	DBGI_MODE_IDT52100 = 5
+};
+
+/* IDT 75P52100 commands */
+#define IDT_CMD_READ   0
+#define IDT_CMD_WRITE  1
+#define IDT_CMD_SEARCH 2
+#define IDT_CMD_LEARN  3
+
+/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
+#define IDT_LAR_ADR0   	0x180006
+#define IDT_LAR_MODE144	0xffff0000
+
+/* IDT SCR and SSR addresses (low 32 bits) */
+#define IDT_SCR_ADR0  0x180000
+#define IDT_SSR0_ADR0 0x180002
+#define IDT_SSR1_ADR0 0x180004
+
+/* IDT GMR base address (low 32 bits) */
+#define IDT_GMR_BASE_ADR0 0x180020
+
+/* IDT data and mask array base addresses (low 32 bits) */
+#define IDT_DATARY_BASE_ADR0 0
+#define IDT_MSKARY_BASE_ADR0 0x80000
+
+/* IDT 75N43102 commands */
+#define IDT4_CMD_SEARCH144 3
+#define IDT4_CMD_WRITE     4
+#define IDT4_CMD_READ      5
+
+/* IDT 75N43102 SCR address (low 32 bits) */
+#define IDT4_SCR_ADR0  0x3
+
+/* IDT 75N43102 GMR base addresses (low 32 bits) */
+#define IDT4_GMR_BASE0 0x10
+#define IDT4_GMR_BASE1 0x20
+#define IDT4_GMR_BASE2 0x30
+
+/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
+#define IDT4_DATARY_BASE_ADR0 0x1000000
+#define IDT4_MSKARY_BASE_ADR0 0x2000000
+
+#define MAX_WRITE_ATTEMPTS 5
+
+#define MAX_ROUTES 2048
+
+/*
+ * Issue a command to the TCAM and wait for its completion.  The address and
+ * any data required by the command must have been setup by the caller.
+ */
+static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
+{
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
+	return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
+			       F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
+}
+
+static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
+				 u32 v3)
+{
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
+}
+
+static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
+				 u32 v3)
+{
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
+}
+
+static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
+				u32 *v3)
+{
+	*v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
+	*v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
+	*v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
+}
+
+/*
+ * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
+ * command cmd.  The data to be written must have been set up by the caller.
+ * Returns -1 on failure, 0 on success.
+ */
+static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
+{
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
+	if (mc5_cmd_write(adapter, cmd) == 0)
+		return 0;
+	CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
+	       addr_lo);
+	return -1;
+}
+
+static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
+				u32 data_array_base, u32 write_cmd,
+				int addr_shift)
+{
+	unsigned int i;
+	struct adapter *adap = mc5->adapter;
+
+	/*
+	 * We need the size of the TCAM data and mask arrays in terms of
+	 * 72-bit entries.
+	 */
+	unsigned int size72 = mc5->tcam_size;
+	unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
+
+	if (mc5->mode == MC5_MODE_144_BIT) {
+		size72 *= 2;	/* 1 144-bit entry is 2 72-bit entries */
+		server_base *= 2;
+	}
+
+	/* Clear the data array */
+	dbgi_wr_data3(adap, 0, 0, 0);
+	for (i = 0; i < size72; i++)
+		if (mc5_write(adap, data_array_base + (i << addr_shift),
+			      write_cmd))
+			return -1;
+
+	/* Initialize the mask array. */
+	dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+	for (i = 0; i < size72; i++) {
+		if (i == server_base)	/* entering server or routing region */
+			t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
+				     mc5->mode == MC5_MODE_144_BIT ?
+				     0xfffffff9 : 0xfffffffd);
+		if (mc5_write(adap, mask_array_base + (i << addr_shift),
+			      write_cmd))
+			return -1;
+	}
+	return 0;
+}
+
+static int init_idt52100(struct mc5 *mc5)
+{
+	int i;
+	struct adapter *adap = mc5->adapter;
+
+	t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+		     V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
+	t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
+
+	/*
+	 * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
+	 * GMRs 8-9 for ACK- and AOPEN searches.
+	 */
+	t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
+	t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
+	t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
+	t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
+	t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
+	t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
+	t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
+	t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
+	t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
+
+	/* Set DBGI command mode for IDT TCAM. */
+	t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+	/* Set up LAR */
+	dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
+	if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
+		goto err;
+
+	/* Set up SSRs */
+	dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
+	if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
+	    mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
+		goto err;
+
+	/* Set up GMRs */
+	for (i = 0; i < 32; ++i) {
+		if (i >= 12 && i < 15)
+			dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+		else if (i == 15)
+			dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+		else
+			dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+
+		if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
+			goto err;
+	}
+
+	/* Set up SCR */
+	dbgi_wr_data3(adap, 1, 0, 0);
+	if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
+		goto err;
+
+	return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
+				    IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
+err:
+	return -EIO;
+}
+
+static int init_idt43102(struct mc5 *mc5)
+{
+	int i;
+	struct adapter *adap = mc5->adapter;
+
+	t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+		     adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
+		     V_RDLAT(0xd) | V_SRCHLAT(0x12));
+
+	/*
+	 * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
+	 * for ACK- and AOPEN searches.
+	 */
+	t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
+		     IDT4_CMD_SEARCH144 | 0x3800);
+	t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
+	t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+	t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+	t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
+	t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
+
+	t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
+
+	/* Set DBGI command mode for IDT TCAM. */
+	t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+	/* Set up GMRs */
+	dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+	for (i = 0; i < 7; ++i)
+		if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
+			goto err;
+
+	for (i = 0; i < 4; ++i)
+		if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
+			goto err;
+
+	dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+	if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
+	    mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
+	    mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
+		goto err;
+
+	dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+	if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
+		goto err;
+
+	/* Set up SCR */
+	dbgi_wr_data3(adap, 0xf0000000, 0, 0);
+	if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
+		goto err;
+
+	return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
+				    IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
+err:
+	return -EIO;
+}
+
+/* Put MC5 in DBGI mode. */
+static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
+{
+	t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+		     V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
+}
+
+/* Put MC5 in M-Bus mode. */
+static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
+{
+	t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+		     V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
+		     V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
+		     V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
+}
+
+/*
+ * Initialization that requires the OS and protocol layers to already
+ * be intialized goes here.
+ */
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+		unsigned int nroutes)
+{
+	u32 cfg;
+	int err;
+	unsigned int tcam_size = mc5->tcam_size;
+	struct adapter *adap = mc5->adapter;
+
+	if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
+		return -EINVAL;
+
+	/* Reset the TCAM */
+	cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
+	cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
+	t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
+	if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
+		CH_ERR(adap, "TCAM reset timed out\n");
+		return -1;
+	}
+
+	t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
+	t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
+		     tcam_size - nroutes - nfilters);
+	t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
+		     tcam_size - nroutes - nfilters - nservers);
+
+	mc5->parity_enabled = 1;
+
+	/* All the TCAM addresses we access have only the low 32 bits non 0 */
+	t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
+	t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
+
+	mc5_dbgi_mode_enable(mc5);
+
+	switch (mc5->part_type) {
+	case IDT75P52100:
+		err = init_idt52100(mc5);
+		break;
+	case IDT75N43102:
+		err = init_idt43102(mc5);
+		break;
+	default:
+		CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
+		err = -EINVAL;
+		break;
+	}
+
+	mc5_dbgi_mode_disable(mc5);
+	return err;
+}
+
+/*
+ *	read_mc5_range - dump a part of the memory managed by MC5
+ *	@mc5: the MC5 handle
+ *	@start: the start address for the dump
+ *	@n: number of 72-bit words to read
+ *	@buf: result buffer
+ *
+ *	Read n 72-bit words from MC5 memory from the given start location.
+ */
+int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
+		      unsigned int n, u32 *buf)
+{
+	u32 read_cmd;
+	int err = 0;
+	struct adapter *adap = mc5->adapter;
+
+	if (mc5->part_type == IDT75P52100)
+		read_cmd = IDT_CMD_READ;
+	else if (mc5->part_type == IDT75N43102)
+		read_cmd = IDT4_CMD_READ;
+	else
+		return -EINVAL;
+
+	mc5_dbgi_mode_enable(mc5);
+
+	while (n--) {
+		t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
+		if (mc5_cmd_write(adap, read_cmd)) {
+			err = -EIO;
+			break;
+		}
+		dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
+		buf += 3;
+	}
+
+	mc5_dbgi_mode_disable(mc5);
+	return 0;
+}
+
+#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
+
+/*
+ * MC5 interrupt handler
+ */
+void t3_mc5_intr_handler(struct mc5 *mc5)
+{
+	struct adapter *adap = mc5->adapter;
+	u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
+
+	if ((cause & F_PARITYERR) && mc5->parity_enabled) {
+		CH_ALERT(adap, "MC5 parity error\n");
+		mc5->stats.parity_err++;
+	}
+
+	if (cause & F_REQQPARERR) {
+		CH_ALERT(adap, "MC5 request queue parity error\n");
+		mc5->stats.reqq_parity_err++;
+	}
+
+	if (cause & F_DISPQPARERR) {
+		CH_ALERT(adap, "MC5 dispatch queue parity error\n");
+		mc5->stats.dispq_parity_err++;
+	}
+
+	if (cause & F_ACTRGNFULL)
+		mc5->stats.active_rgn_full++;
+	if (cause & F_NFASRCHFAIL)
+		mc5->stats.nfa_srch_err++;
+	if (cause & F_UNKNOWNCMD)
+		mc5->stats.unknown_cmd++;
+	if (cause & F_DELACTEMPTY)
+		mc5->stats.del_act_empty++;
+	if (cause & MC5_INT_FATAL)
+		t3_fatal_err(adap);
+
+	t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
+}
+
+void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
+{
+#define K * 1024
+
+	static unsigned int tcam_part_size[] = {	/* in K 72-bit entries */
+		64 K, 128 K, 256 K, 32 K
+	};
+
+#undef K
+
+	u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
+
+	mc5->adapter = adapter;
+	mc5->mode = (unsigned char)mode;
+	mc5->part_type = (unsigned char)G_TMTYPE(cfg);
+	if (cfg & F_TMTYPEHI)
+		mc5->part_type |= 4;
+
+	mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
+	if (mode == MC5_MODE_144_BIT)
+		mc5->tcam_size /= 2;
+}

+ 2195 - 0
drivers/net/cxgb3/regs.h

@@ -0,0 +1,2195 @@
+#define A_SG_CONTROL 0x0
+
+#define S_DROPPKT    20
+#define V_DROPPKT(x) ((x) << S_DROPPKT)
+#define F_DROPPKT    V_DROPPKT(1U)
+
+#define S_EGRGENCTRL    19
+#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
+#define F_EGRGENCTRL    V_EGRGENCTRL(1U)
+
+#define S_USERSPACESIZE    14
+#define M_USERSPACESIZE    0x1f
+#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
+
+#define S_HOSTPAGESIZE    11
+#define M_HOSTPAGESIZE    0x7
+#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
+
+#define S_FLMODE    9
+#define V_FLMODE(x) ((x) << S_FLMODE)
+#define F_FLMODE    V_FLMODE(1U)
+
+#define S_PKTSHIFT    6
+#define M_PKTSHIFT    0x7
+#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+
+#define S_ONEINTMULTQ    5
+#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
+#define F_ONEINTMULTQ    V_ONEINTMULTQ(1U)
+
+#define S_BIGENDIANINGRESS    2
+#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
+#define F_BIGENDIANINGRESS    V_BIGENDIANINGRESS(1U)
+
+#define S_ISCSICOALESCING    1
+#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
+#define F_ISCSICOALESCING    V_ISCSICOALESCING(1U)
+
+#define S_GLOBALENABLE    0
+#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
+#define F_GLOBALENABLE    V_GLOBALENABLE(1U)
+
+#define S_AVOIDCQOVFL    24
+#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
+#define F_AVOIDCQOVFL    V_AVOIDCQOVFL(1U)
+
+#define S_OPTONEINTMULTQ    23
+#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
+#define F_OPTONEINTMULTQ    V_OPTONEINTMULTQ(1U)
+
+#define S_CQCRDTCTRL    22
+#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
+#define F_CQCRDTCTRL    V_CQCRDTCTRL(1U)
+
+#define A_SG_KDOORBELL 0x4
+
+#define S_SELEGRCNTX    31
+#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
+#define F_SELEGRCNTX    V_SELEGRCNTX(1U)
+
+#define S_EGRCNTX    0
+#define M_EGRCNTX    0xffff
+#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
+
+#define A_SG_GTS 0x8
+
+#define S_RSPQ    29
+#define M_RSPQ    0x7
+#define V_RSPQ(x) ((x) << S_RSPQ)
+#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
+
+#define S_NEWTIMER    16
+#define M_NEWTIMER    0x1fff
+#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
+
+#define S_NEWINDEX    0
+#define M_NEWINDEX    0xffff
+#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
+
+#define A_SG_CONTEXT_CMD 0xc
+
+#define S_CONTEXT_CMD_OPCODE    28
+#define M_CONTEXT_CMD_OPCODE    0xf
+#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
+
+#define S_CONTEXT_CMD_BUSY    27
+#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
+#define F_CONTEXT_CMD_BUSY    V_CONTEXT_CMD_BUSY(1U)
+
+#define S_CQ_CREDIT    20
+
+#define M_CQ_CREDIT    0x7f
+
+#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
+
+#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
+
+#define S_CQ    19
+
+#define V_CQ(x) ((x) << S_CQ)
+#define F_CQ    V_CQ(1U)
+
+#define S_RESPONSEQ    18
+#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
+#define F_RESPONSEQ    V_RESPONSEQ(1U)
+
+#define S_EGRESS    17
+#define V_EGRESS(x) ((x) << S_EGRESS)
+#define F_EGRESS    V_EGRESS(1U)
+
+#define S_FREELIST    16
+#define V_FREELIST(x) ((x) << S_FREELIST)
+#define F_FREELIST    V_FREELIST(1U)
+
+#define S_CONTEXT    0
+#define M_CONTEXT    0xffff
+#define V_CONTEXT(x) ((x) << S_CONTEXT)
+
+#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
+
+#define A_SG_CONTEXT_DATA0 0x10
+
+#define A_SG_CONTEXT_DATA1 0x14
+
+#define A_SG_CONTEXT_DATA2 0x18
+
+#define A_SG_CONTEXT_DATA3 0x1c
+
+#define A_SG_CONTEXT_MASK0 0x20
+
+#define A_SG_CONTEXT_MASK1 0x24
+
+#define A_SG_CONTEXT_MASK2 0x28
+
+#define A_SG_CONTEXT_MASK3 0x2c
+
+#define A_SG_RSPQ_CREDIT_RETURN 0x30
+
+#define S_CREDITS    0
+#define M_CREDITS    0xffff
+#define V_CREDITS(x) ((x) << S_CREDITS)
+
+#define A_SG_DATA_INTR 0x34
+
+#define S_ERRINTR    31
+#define V_ERRINTR(x) ((x) << S_ERRINTR)
+#define F_ERRINTR    V_ERRINTR(1U)
+
+#define A_SG_HI_DRB_HI_THRSH 0x38
+
+#define A_SG_HI_DRB_LO_THRSH 0x3c
+
+#define A_SG_LO_DRB_HI_THRSH 0x40
+
+#define A_SG_LO_DRB_LO_THRSH 0x44
+
+#define A_SG_RSPQ_FL_STATUS 0x4c
+
+#define S_RSPQ0DISABLED    8
+
+#define A_SG_EGR_RCQ_DRB_THRSH 0x54
+
+#define S_HIRCQDRBTHRSH    16
+#define M_HIRCQDRBTHRSH    0x7ff
+#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
+
+#define S_LORCQDRBTHRSH    0
+#define M_LORCQDRBTHRSH    0x7ff
+#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
+
+#define A_SG_EGR_CNTX_BADDR 0x58
+
+#define A_SG_INT_CAUSE 0x5c
+
+#define S_RSPQDISABLED    3
+#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
+#define F_RSPQDISABLED    V_RSPQDISABLED(1U)
+
+#define S_RSPQCREDITOVERFOW    2
+#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
+#define F_RSPQCREDITOVERFOW    V_RSPQCREDITOVERFOW(1U)
+
+#define A_SG_INT_ENABLE 0x60
+
+#define A_SG_CMDQ_CREDIT_TH 0x64
+
+#define S_TIMEOUT    8
+#define M_TIMEOUT    0xffffff
+#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
+
+#define S_THRESHOLD    0
+#define M_THRESHOLD    0xff
+#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+
+#define A_SG_TIMER_TICK 0x68
+
+#define A_SG_CQ_CONTEXT_BADDR 0x6c
+
+#define A_SG_OCO_BASE 0x70
+
+#define S_BASE1    16
+#define M_BASE1    0xffff
+#define V_BASE1(x) ((x) << S_BASE1)
+
+#define A_SG_DRB_PRI_THRESH 0x74
+
+#define A_PCIX_INT_ENABLE 0x80
+
+#define S_MSIXPARERR    22
+#define M_MSIXPARERR    0x7
+
+#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
+
+#define S_CFPARERR    18
+#define M_CFPARERR    0xf
+
+#define V_CFPARERR(x) ((x) << S_CFPARERR)
+
+#define S_RFPARERR    14
+#define M_RFPARERR    0xf
+
+#define V_RFPARERR(x) ((x) << S_RFPARERR)
+
+#define S_WFPARERR    12
+#define M_WFPARERR    0x3
+
+#define V_WFPARERR(x) ((x) << S_WFPARERR)
+
+#define S_PIOPARERR    11
+#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
+#define F_PIOPARERR    V_PIOPARERR(1U)
+
+#define S_DETUNCECCERR    10
+#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
+#define F_DETUNCECCERR    V_DETUNCECCERR(1U)
+
+#define S_DETCORECCERR    9
+#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
+#define F_DETCORECCERR    V_DETCORECCERR(1U)
+
+#define S_RCVSPLCMPERR    8
+#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
+#define F_RCVSPLCMPERR    V_RCVSPLCMPERR(1U)
+
+#define S_UNXSPLCMP    7
+#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
+#define F_UNXSPLCMP    V_UNXSPLCMP(1U)
+
+#define S_SPLCMPDIS    6
+#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
+#define F_SPLCMPDIS    V_SPLCMPDIS(1U)
+
+#define S_DETPARERR    5
+#define V_DETPARERR(x) ((x) << S_DETPARERR)
+#define F_DETPARERR    V_DETPARERR(1U)
+
+#define S_SIGSYSERR    4
+#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
+#define F_SIGSYSERR    V_SIGSYSERR(1U)
+
+#define S_RCVMSTABT    3
+#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
+#define F_RCVMSTABT    V_RCVMSTABT(1U)
+
+#define S_RCVTARABT    2
+#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
+#define F_RCVTARABT    V_RCVTARABT(1U)
+
+#define S_SIGTARABT    1
+#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
+#define F_SIGTARABT    V_SIGTARABT(1U)
+
+#define S_MSTDETPARERR    0
+#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
+#define F_MSTDETPARERR    V_MSTDETPARERR(1U)
+
+#define A_PCIX_INT_CAUSE 0x84
+
+#define A_PCIX_CFG 0x88
+
+#define S_CLIDECEN    18
+#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
+#define F_CLIDECEN    V_CLIDECEN(1U)
+
+#define A_PCIX_MODE 0x8c
+
+#define S_PCLKRANGE    6
+#define M_PCLKRANGE    0x3
+#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
+#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
+
+#define S_PCIXINITPAT    2
+#define M_PCIXINITPAT    0xf
+#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
+#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
+
+#define S_64BIT    0
+#define V_64BIT(x) ((x) << S_64BIT)
+#define F_64BIT    V_64BIT(1U)
+
+#define A_PCIE_INT_ENABLE 0x80
+
+#define S_BISTERR    15
+#define M_BISTERR    0xff
+
+#define V_BISTERR(x) ((x) << S_BISTERR)
+
+#define S_PCIE_MSIXPARERR    12
+#define M_PCIE_MSIXPARERR    0x7
+
+#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
+
+#define S_PCIE_CFPARERR    11
+#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
+#define F_PCIE_CFPARERR    V_PCIE_CFPARERR(1U)
+
+#define S_PCIE_RFPARERR    10
+#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
+#define F_PCIE_RFPARERR    V_PCIE_RFPARERR(1U)
+
+#define S_PCIE_WFPARERR    9
+#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
+#define F_PCIE_WFPARERR    V_PCIE_WFPARERR(1U)
+
+#define S_PCIE_PIOPARERR    8
+#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
+#define F_PCIE_PIOPARERR    V_PCIE_PIOPARERR(1U)
+
+#define S_UNXSPLCPLERRC    7
+#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
+#define F_UNXSPLCPLERRC    V_UNXSPLCPLERRC(1U)
+
+#define S_UNXSPLCPLERRR    6
+#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
+#define F_UNXSPLCPLERRR    V_UNXSPLCPLERRR(1U)
+
+#define S_PEXERR    0
+#define V_PEXERR(x) ((x) << S_PEXERR)
+#define F_PEXERR    V_PEXERR(1U)
+
+#define A_PCIE_INT_CAUSE 0x84
+
+#define A_PCIE_CFG 0x88
+
+#define S_PCIE_CLIDECEN    16
+#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
+#define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
+
+#define S_CRSTWRMMODE    0
+#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
+#define F_CRSTWRMMODE    V_CRSTWRMMODE(1U)
+
+#define A_PCIE_MODE 0x8c
+
+#define S_NUMFSTTRNSEQRX    10
+#define M_NUMFSTTRNSEQRX    0xff
+#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
+#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
+
+#define A_PCIE_PEX_CTRL0 0x98
+
+#define S_NUMFSTTRNSEQ    22
+#define M_NUMFSTTRNSEQ    0xff
+#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
+#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
+
+#define S_REPLAYLMT    2
+#define M_REPLAYLMT    0xfffff
+
+#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
+
+#define A_PCIE_PEX_CTRL1 0x9c
+
+#define S_T3A_ACKLAT    0
+#define M_T3A_ACKLAT    0x7ff
+
+#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
+
+#define S_ACKLAT    0
+#define M_ACKLAT    0x1fff
+
+#define V_ACKLAT(x) ((x) << S_ACKLAT)
+
+#define A_PCIE_PEX_ERR 0xa4
+
+#define A_T3DBG_GPIO_EN 0xd0
+
+#define S_GPIO11_OEN    27
+#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
+#define F_GPIO11_OEN    V_GPIO11_OEN(1U)
+
+#define S_GPIO10_OEN    26
+#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
+#define F_GPIO10_OEN    V_GPIO10_OEN(1U)
+
+#define S_GPIO7_OEN    23
+#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
+#define F_GPIO7_OEN    V_GPIO7_OEN(1U)
+
+#define S_GPIO6_OEN    22
+#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
+#define F_GPIO6_OEN    V_GPIO6_OEN(1U)
+
+#define S_GPIO5_OEN    21
+#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
+#define F_GPIO5_OEN    V_GPIO5_OEN(1U)
+
+#define S_GPIO4_OEN    20
+#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
+#define F_GPIO4_OEN    V_GPIO4_OEN(1U)
+
+#define S_GPIO2_OEN    18
+#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
+#define F_GPIO2_OEN    V_GPIO2_OEN(1U)
+
+#define S_GPIO1_OEN    17
+#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
+#define F_GPIO1_OEN    V_GPIO1_OEN(1U)
+
+#define S_GPIO0_OEN    16
+#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
+#define F_GPIO0_OEN    V_GPIO0_OEN(1U)
+
+#define S_GPIO10_OUT_VAL    10
+#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
+#define F_GPIO10_OUT_VAL    V_GPIO10_OUT_VAL(1U)
+
+#define S_GPIO7_OUT_VAL    7
+#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
+#define F_GPIO7_OUT_VAL    V_GPIO7_OUT_VAL(1U)
+
+#define S_GPIO6_OUT_VAL    6
+#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
+#define F_GPIO6_OUT_VAL    V_GPIO6_OUT_VAL(1U)
+
+#define S_GPIO5_OUT_VAL    5
+#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
+#define F_GPIO5_OUT_VAL    V_GPIO5_OUT_VAL(1U)
+
+#define S_GPIO4_OUT_VAL    4
+#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
+#define F_GPIO4_OUT_VAL    V_GPIO4_OUT_VAL(1U)
+
+#define S_GPIO2_OUT_VAL    2
+#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
+#define F_GPIO2_OUT_VAL    V_GPIO2_OUT_VAL(1U)
+
+#define S_GPIO1_OUT_VAL    1
+#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
+#define F_GPIO1_OUT_VAL    V_GPIO1_OUT_VAL(1U)
+
+#define S_GPIO0_OUT_VAL    0
+#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
+#define F_GPIO0_OUT_VAL    V_GPIO0_OUT_VAL(1U)
+
+#define A_T3DBG_INT_ENABLE 0xd8
+
+#define S_GPIO11    11
+#define V_GPIO11(x) ((x) << S_GPIO11)
+#define F_GPIO11    V_GPIO11(1U)
+
+#define S_GPIO10    10
+#define V_GPIO10(x) ((x) << S_GPIO10)
+#define F_GPIO10    V_GPIO10(1U)
+
+#define S_GPIO7    7
+#define V_GPIO7(x) ((x) << S_GPIO7)
+#define F_GPIO7    V_GPIO7(1U)
+
+#define S_GPIO6    6
+#define V_GPIO6(x) ((x) << S_GPIO6)
+#define F_GPIO6    V_GPIO6(1U)
+
+#define S_GPIO5    5
+#define V_GPIO5(x) ((x) << S_GPIO5)
+#define F_GPIO5    V_GPIO5(1U)
+
+#define S_GPIO4    4
+#define V_GPIO4(x) ((x) << S_GPIO4)
+#define F_GPIO4    V_GPIO4(1U)
+
+#define S_GPIO3    3
+#define V_GPIO3(x) ((x) << S_GPIO3)
+#define F_GPIO3    V_GPIO3(1U)
+
+#define S_GPIO2    2
+#define V_GPIO2(x) ((x) << S_GPIO2)
+#define F_GPIO2    V_GPIO2(1U)
+
+#define S_GPIO1    1
+#define V_GPIO1(x) ((x) << S_GPIO1)
+#define F_GPIO1    V_GPIO1(1U)
+
+#define S_GPIO0    0
+#define V_GPIO0(x) ((x) << S_GPIO0)
+#define F_GPIO0    V_GPIO0(1U)
+
+#define A_T3DBG_INT_CAUSE 0xdc
+
+#define A_T3DBG_GPIO_ACT_LOW 0xf0
+
+#define MC7_PMRX_BASE_ADDR 0x100
+
+#define A_MC7_CFG 0x100
+
+#define S_IFEN    13
+#define V_IFEN(x) ((x) << S_IFEN)
+#define F_IFEN    V_IFEN(1U)
+
+#define S_TERM150    11
+#define V_TERM150(x) ((x) << S_TERM150)
+#define F_TERM150    V_TERM150(1U)
+
+#define S_SLOW    10
+#define V_SLOW(x) ((x) << S_SLOW)
+#define F_SLOW    V_SLOW(1U)
+
+#define S_WIDTH    8
+#define M_WIDTH    0x3
+#define V_WIDTH(x) ((x) << S_WIDTH)
+#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
+
+#define S_BKS    6
+#define V_BKS(x) ((x) << S_BKS)
+#define F_BKS    V_BKS(1U)
+
+#define S_ORG    5
+#define V_ORG(x) ((x) << S_ORG)
+#define F_ORG    V_ORG(1U)
+
+#define S_DEN    2
+#define M_DEN    0x7
+#define V_DEN(x) ((x) << S_DEN)
+#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
+
+#define S_RDY    1
+#define V_RDY(x) ((x) << S_RDY)
+#define F_RDY    V_RDY(1U)
+
+#define S_CLKEN    0
+#define V_CLKEN(x) ((x) << S_CLKEN)
+#define F_CLKEN    V_CLKEN(1U)
+
+#define A_MC7_MODE 0x104
+
+#define S_BUSY    31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY    V_BUSY(1U)
+
+#define S_BUSY    31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY    V_BUSY(1U)
+
+#define A_MC7_EXT_MODE1 0x108
+
+#define A_MC7_EXT_MODE2 0x10c
+
+#define A_MC7_EXT_MODE3 0x110
+
+#define A_MC7_PRE 0x114
+
+#define A_MC7_REF 0x118
+
+#define S_PREREFDIV    1
+#define M_PREREFDIV    0x3fff
+#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
+
+#define S_PERREFEN    0
+#define V_PERREFEN(x) ((x) << S_PERREFEN)
+#define F_PERREFEN    V_PERREFEN(1U)
+
+#define A_MC7_DLL 0x11c
+
+#define S_DLLENB    1
+#define V_DLLENB(x) ((x) << S_DLLENB)
+#define F_DLLENB    V_DLLENB(1U)
+
+#define S_DLLRST    0
+#define V_DLLRST(x) ((x) << S_DLLRST)
+#define F_DLLRST    V_DLLRST(1U)
+
+#define A_MC7_PARM 0x120
+
+#define S_ACTTOPREDLY    26
+#define M_ACTTOPREDLY    0xf
+#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
+
+#define S_ACTTORDWRDLY    23
+#define M_ACTTORDWRDLY    0x7
+#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
+
+#define S_PRECYC    20
+#define M_PRECYC    0x7
+#define V_PRECYC(x) ((x) << S_PRECYC)
+
+#define S_REFCYC    13
+#define M_REFCYC    0x7f
+#define V_REFCYC(x) ((x) << S_REFCYC)
+
+#define S_BKCYC    8
+#define M_BKCYC    0x1f
+#define V_BKCYC(x) ((x) << S_BKCYC)
+
+#define S_WRTORDDLY    4
+#define M_WRTORDDLY    0xf
+#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
+
+#define S_RDTOWRDLY    0
+#define M_RDTOWRDLY    0xf
+#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
+
+#define A_MC7_CAL 0x128
+
+#define S_BUSY    31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY    V_BUSY(1U)
+
+#define S_BUSY    31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY    V_BUSY(1U)
+
+#define S_CAL_FAULT    30
+#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
+#define F_CAL_FAULT    V_CAL_FAULT(1U)
+
+#define S_SGL_CAL_EN    20
+#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
+#define F_SGL_CAL_EN    V_SGL_CAL_EN(1U)
+
+#define A_MC7_ERR_ADDR 0x12c
+
+#define A_MC7_ECC 0x130
+
+#define S_ECCCHKEN    1
+#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
+#define F_ECCCHKEN    V_ECCCHKEN(1U)
+
+#define S_ECCGENEN    0
+#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
+#define F_ECCGENEN    V_ECCGENEN(1U)
+
+#define A_MC7_CE_ADDR 0x134
+
+#define A_MC7_CE_DATA0 0x138
+
+#define A_MC7_CE_DATA1 0x13c
+
+#define A_MC7_CE_DATA2 0x140
+
+#define S_DATA    0
+#define M_DATA    0xff
+
+#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
+
+#define A_MC7_UE_ADDR 0x144
+
+#define A_MC7_UE_DATA0 0x148
+
+#define A_MC7_UE_DATA1 0x14c
+
+#define A_MC7_UE_DATA2 0x150
+
+#define A_MC7_BD_ADDR 0x154
+
+#define S_ADDR    3
+
+#define M_ADDR    0x1fffffff
+
+#define A_MC7_BD_DATA0 0x158
+
+#define A_MC7_BD_DATA1 0x15c
+
+#define A_MC7_BD_OP 0x164
+
+#define S_OP    0
+
+#define V_OP(x) ((x) << S_OP)
+#define F_OP    V_OP(1U)
+
+#define F_OP    V_OP(1U)
+#define A_SF_OP 0x6dc
+
+#define A_MC7_BIST_ADDR_BEG 0x168
+
+#define A_MC7_BIST_ADDR_END 0x16c
+
+#define A_MC7_BIST_DATA 0x170
+
+#define A_MC7_BIST_OP 0x174
+
+#define S_CONT    3
+#define V_CONT(x) ((x) << S_CONT)
+#define F_CONT    V_CONT(1U)
+
+#define F_CONT    V_CONT(1U)
+
+#define A_MC7_INT_ENABLE 0x178
+
+#define S_AE    17
+#define V_AE(x) ((x) << S_AE)
+#define F_AE    V_AE(1U)
+
+#define S_PE    2
+#define M_PE    0x7fff
+
+#define V_PE(x) ((x) << S_PE)
+
+#define G_PE(x) (((x) >> S_PE) & M_PE)
+
+#define S_UE    1
+#define V_UE(x) ((x) << S_UE)
+#define F_UE    V_UE(1U)
+
+#define S_CE    0
+#define V_CE(x) ((x) << S_CE)
+#define F_CE    V_CE(1U)
+
+#define A_MC7_INT_CAUSE 0x17c
+
+#define MC7_PMTX_BASE_ADDR 0x180
+
+#define MC7_CM_BASE_ADDR 0x200
+
+#define A_CIM_BOOT_CFG 0x280
+
+#define S_BOOTADDR    2
+#define M_BOOTADDR    0x3fffffff
+#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
+
+#define A_CIM_SDRAM_BASE_ADDR 0x28c
+
+#define A_CIM_SDRAM_ADDR_SIZE 0x290
+
+#define A_CIM_HOST_INT_ENABLE 0x298
+
+#define A_CIM_HOST_INT_CAUSE 0x29c
+
+#define S_BLKWRPLINT    12
+#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
+#define F_BLKWRPLINT    V_BLKWRPLINT(1U)
+
+#define S_BLKRDPLINT    11
+#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
+#define F_BLKRDPLINT    V_BLKRDPLINT(1U)
+
+#define S_BLKWRCTLINT    10
+#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
+#define F_BLKWRCTLINT    V_BLKWRCTLINT(1U)
+
+#define S_BLKRDCTLINT    9
+#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
+#define F_BLKRDCTLINT    V_BLKRDCTLINT(1U)
+
+#define S_BLKWRFLASHINT    8
+#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
+#define F_BLKWRFLASHINT    V_BLKWRFLASHINT(1U)
+
+#define S_BLKRDFLASHINT    7
+#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
+#define F_BLKRDFLASHINT    V_BLKRDFLASHINT(1U)
+
+#define S_SGLWRFLASHINT    6
+#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
+#define F_SGLWRFLASHINT    V_SGLWRFLASHINT(1U)
+
+#define S_WRBLKFLASHINT    5
+#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
+#define F_WRBLKFLASHINT    V_WRBLKFLASHINT(1U)
+
+#define S_BLKWRBOOTINT    4
+#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
+#define F_BLKWRBOOTINT    V_BLKWRBOOTINT(1U)
+
+#define S_FLASHRANGEINT    2
+#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
+#define F_FLASHRANGEINT    V_FLASHRANGEINT(1U)
+
+#define S_SDRAMRANGEINT    1
+#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
+#define F_SDRAMRANGEINT    V_SDRAMRANGEINT(1U)
+
+#define S_RSVDSPACEINT    0
+#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
+#define F_RSVDSPACEINT    V_RSVDSPACEINT(1U)
+
+#define A_CIM_HOST_ACC_CTRL 0x2b0
+
+#define S_HOSTBUSY    17
+#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
+#define F_HOSTBUSY    V_HOSTBUSY(1U)
+
+#define A_CIM_HOST_ACC_DATA 0x2b4
+
+#define A_TP_IN_CONFIG 0x300
+
+#define S_NICMODE    14
+#define V_NICMODE(x) ((x) << S_NICMODE)
+#define F_NICMODE    V_NICMODE(1U)
+
+#define F_NICMODE    V_NICMODE(1U)
+
+#define S_IPV6ENABLE    15
+#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
+#define F_IPV6ENABLE    V_IPV6ENABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_VLANEXTRACTIONENABLE    12
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_TXPACINGENABLE    24
+#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
+#define F_TXPACINGENABLE    V_TXPACINGENABLE(1U)
+
+#define S_PATHMTU    15
+#define V_PATHMTU(x) ((x) << S_PATHMTU)
+#define F_PATHMTU    V_PATHMTU(1U)
+
+#define S_IPCHECKSUMOFFLOAD    13
+#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
+#define F_IPCHECKSUMOFFLOAD    V_IPCHECKSUMOFFLOAD(1U)
+
+#define S_UDPCHECKSUMOFFLOAD    12
+#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
+#define F_UDPCHECKSUMOFFLOAD    V_UDPCHECKSUMOFFLOAD(1U)
+
+#define S_TCPCHECKSUMOFFLOAD    11
+#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
+#define F_TCPCHECKSUMOFFLOAD    V_TCPCHECKSUMOFFLOAD(1U)
+
+#define S_IPTTL    0
+#define M_IPTTL    0xff
+#define V_IPTTL(x) ((x) << S_IPTTL)
+
+#define A_TP_CMM_MM_BASE 0x314
+
+#define A_TP_CMM_TIMER_BASE 0x318
+
+#define S_CMTIMERMAXNUM    28
+#define M_CMTIMERMAXNUM    0x3
+#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
+
+#define A_TP_PMM_SIZE 0x31c
+
+#define A_TP_PMM_TX_BASE 0x320
+
+#define A_TP_PMM_RX_BASE 0x328
+
+#define A_TP_PMM_RX_PAGE_SIZE 0x32c
+
+#define A_TP_PMM_RX_MAX_PAGE 0x330
+
+#define A_TP_PMM_TX_PAGE_SIZE 0x334
+
+#define A_TP_PMM_TX_MAX_PAGE 0x338
+
+#define A_TP_TCP_OPTIONS 0x340
+
+#define S_MTUDEFAULT    16
+#define M_MTUDEFAULT    0xffff
+#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
+
+#define S_MTUENABLE    10
+#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
+#define F_MTUENABLE    V_MTUENABLE(1U)
+
+#define S_SACKRX    8
+#define V_SACKRX(x) ((x) << S_SACKRX)
+#define F_SACKRX    V_SACKRX(1U)
+
+#define S_SACKMODE    4
+
+#define M_SACKMODE    0x3
+
+#define V_SACKMODE(x) ((x) << S_SACKMODE)
+
+#define S_WINDOWSCALEMODE    2
+#define M_WINDOWSCALEMODE    0x3
+#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
+
+#define S_TIMESTAMPSMODE    0
+
+#define M_TIMESTAMPSMODE    0x3
+
+#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
+
+#define A_TP_DACK_CONFIG 0x344
+
+#define S_AUTOSTATE3    30
+#define M_AUTOSTATE3    0x3
+#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
+
+#define S_AUTOSTATE2    28
+#define M_AUTOSTATE2    0x3
+#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
+
+#define S_AUTOSTATE1    26
+#define M_AUTOSTATE1    0x3
+#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
+
+#define S_BYTETHRESHOLD    5
+#define M_BYTETHRESHOLD    0xfffff
+#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
+
+#define S_MSSTHRESHOLD    3
+#define M_MSSTHRESHOLD    0x3
+#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
+
+#define S_AUTOCAREFUL    2
+#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
+#define F_AUTOCAREFUL    V_AUTOCAREFUL(1U)
+
+#define S_AUTOENABLE    1
+#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
+#define F_AUTOENABLE    V_AUTOENABLE(1U)
+
+#define S_DACK_MODE    0
+#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
+#define F_DACK_MODE    V_DACK_MODE(1U)
+
+#define A_TP_PC_CONFIG 0x348
+
+#define S_TXTOSQUEUEMAPMODE    26
+#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
+#define F_TXTOSQUEUEMAPMODE    V_TXTOSQUEUEMAPMODE(1U)
+
+#define S_ENABLEEPCMDAFULL    23
+#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
+#define F_ENABLEEPCMDAFULL    V_ENABLEEPCMDAFULL(1U)
+
+#define S_MODULATEUNIONMODE    22
+#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
+#define F_MODULATEUNIONMODE    V_MODULATEUNIONMODE(1U)
+
+#define S_TXDEFERENABLE    20
+#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
+#define F_TXDEFERENABLE    V_TXDEFERENABLE(1U)
+
+#define S_RXCONGESTIONMODE    19
+#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
+#define F_RXCONGESTIONMODE    V_RXCONGESTIONMODE(1U)
+
+#define S_HEARBEATDACK    16
+#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
+#define F_HEARBEATDACK    V_HEARBEATDACK(1U)
+
+#define S_TXCONGESTIONMODE    15
+#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
+#define F_TXCONGESTIONMODE    V_TXCONGESTIONMODE(1U)
+
+#define S_ENABLEOCSPIFULL    30
+#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
+#define F_ENABLEOCSPIFULL    V_ENABLEOCSPIFULL(1U)
+
+#define S_LOCKTID    28
+#define V_LOCKTID(x) ((x) << S_LOCKTID)
+#define F_LOCKTID    V_LOCKTID(1U)
+
+#define A_TP_PC_CONFIG2 0x34c
+
+#define S_CHDRAFULL    4
+#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
+#define F_CHDRAFULL    V_CHDRAFULL(1U)
+
+#define A_TP_TCP_BACKOFF_REG0 0x350
+
+#define A_TP_TCP_BACKOFF_REG1 0x354
+
+#define A_TP_TCP_BACKOFF_REG2 0x358
+
+#define A_TP_TCP_BACKOFF_REG3 0x35c
+
+#define A_TP_PARA_REG2 0x368
+
+#define S_MAXRXDATA    16
+#define M_MAXRXDATA    0xffff
+#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
+
+#define S_RXCOALESCESIZE    0
+#define M_RXCOALESCESIZE    0xffff
+#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
+
+#define A_TP_PARA_REG3 0x36c
+
+#define S_TXDATAACKIDX    16
+#define M_TXDATAACKIDX    0xf
+
+#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
+
+#define S_TXPACEAUTOSTRICT    10
+#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
+#define F_TXPACEAUTOSTRICT    V_TXPACEAUTOSTRICT(1U)
+
+#define S_TXPACEFIXED    9
+#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
+#define F_TXPACEFIXED    V_TXPACEFIXED(1U)
+
+#define S_TXPACEAUTO    8
+#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
+#define F_TXPACEAUTO    V_TXPACEAUTO(1U)
+
+#define S_RXCOALESCEENABLE    1
+#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
+#define F_RXCOALESCEENABLE    V_RXCOALESCEENABLE(1U)
+
+#define S_RXCOALESCEPSHEN    0
+#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
+#define F_RXCOALESCEPSHEN    V_RXCOALESCEPSHEN(1U)
+
+#define A_TP_PARA_REG4 0x370
+
+#define A_TP_PARA_REG6 0x378
+
+#define S_T3A_ENABLEESND    13
+#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
+#define F_T3A_ENABLEESND    V_T3A_ENABLEESND(1U)
+
+#define S_ENABLEESND    11
+#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
+#define F_ENABLEESND    V_ENABLEESND(1U)
+
+#define A_TP_PARA_REG7 0x37c
+
+#define S_PMMAXXFERLEN1    16
+#define M_PMMAXXFERLEN1    0xffff
+#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
+
+#define S_PMMAXXFERLEN0    0
+#define M_PMMAXXFERLEN0    0xffff
+#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
+
+#define A_TP_TIMER_RESOLUTION 0x390
+
+#define S_TIMERRESOLUTION    16
+#define M_TIMERRESOLUTION    0xff
+#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+
+#define S_TIMESTAMPRESOLUTION    8
+#define M_TIMESTAMPRESOLUTION    0xff
+#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
+
+#define S_DELAYEDACKRESOLUTION    0
+#define M_DELAYEDACKRESOLUTION    0xff
+#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+
+#define A_TP_MSL 0x394
+
+#define A_TP_RXT_MIN 0x398
+
+#define A_TP_RXT_MAX 0x39c
+
+#define A_TP_PERS_MIN 0x3a0
+
+#define A_TP_PERS_MAX 0x3a4
+
+#define A_TP_KEEP_IDLE 0x3a8
+
+#define A_TP_KEEP_INTVL 0x3ac
+
+#define A_TP_INIT_SRTT 0x3b0
+
+#define A_TP_DACK_TIMER 0x3b4
+
+#define A_TP_FINWAIT2_TIMER 0x3b8
+
+#define A_TP_SHIFT_CNT 0x3c0
+
+#define S_SYNSHIFTMAX    24
+
+#define M_SYNSHIFTMAX    0xff
+
+#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
+
+#define S_RXTSHIFTMAXR1    20
+
+#define M_RXTSHIFTMAXR1    0xf
+
+#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
+
+#define S_RXTSHIFTMAXR2    16
+
+#define M_RXTSHIFTMAXR2    0xf
+
+#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
+
+#define S_PERSHIFTBACKOFFMAX    12
+#define M_PERSHIFTBACKOFFMAX    0xf
+#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
+
+#define S_PERSHIFTMAX    8
+#define M_PERSHIFTMAX    0xf
+#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
+
+#define S_KEEPALIVEMAX    0
+
+#define M_KEEPALIVEMAX    0xff
+
+#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
+
+#define A_TP_MTU_PORT_TABLE 0x3d0
+
+#define A_TP_CCTRL_TABLE 0x3dc
+
+#define A_TP_MTU_TABLE 0x3e4
+
+#define A_TP_RSS_MAP_TABLE 0x3e8
+
+#define A_TP_RSS_LKP_TABLE 0x3ec
+
+#define A_TP_RSS_CONFIG 0x3f0
+
+#define S_TNL4TUPEN    29
+#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
+#define F_TNL4TUPEN    V_TNL4TUPEN(1U)
+
+#define S_TNL2TUPEN    28
+#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
+#define F_TNL2TUPEN    V_TNL2TUPEN(1U)
+
+#define S_TNLPRTEN    26
+#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
+#define F_TNLPRTEN    V_TNLPRTEN(1U)
+
+#define S_TNLMAPEN    25
+#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
+#define F_TNLMAPEN    V_TNLMAPEN(1U)
+
+#define S_TNLLKPEN    24
+#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
+#define F_TNLLKPEN    V_TNLLKPEN(1U)
+
+#define S_RRCPLCPUSIZE    4
+#define M_RRCPLCPUSIZE    0x7
+#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
+
+#define S_RQFEEDBACKENABLE    3
+#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
+#define F_RQFEEDBACKENABLE    V_RQFEEDBACKENABLE(1U)
+
+#define S_DISABLE    0
+
+#define A_TP_TM_PIO_ADDR 0x418
+
+#define A_TP_TM_PIO_DATA 0x41c
+
+#define A_TP_TX_MOD_QUE_TABLE 0x420
+
+#define A_TP_TX_RESOURCE_LIMIT 0x424
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
+
+#define S_TX_MOD_QUEUE_REQ_MAP    0
+#define M_TX_MOD_QUEUE_REQ_MAP    0xff
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
+
+#define A_TP_MOD_CHANNEL_WEIGHT 0x434
+
+#define A_TP_PIO_ADDR 0x440
+
+#define A_TP_PIO_DATA 0x444
+
+#define A_TP_RESET 0x44c
+
+#define S_FLSTINITENABLE    1
+#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
+#define F_FLSTINITENABLE    V_FLSTINITENABLE(1U)
+
+#define S_TPRESET    0
+#define V_TPRESET(x) ((x) << S_TPRESET)
+#define F_TPRESET    V_TPRESET(1U)
+
+#define A_TP_CMM_MM_RX_FLST_BASE 0x460
+
+#define A_TP_CMM_MM_TX_FLST_BASE 0x464
+
+#define A_TP_CMM_MM_PS_FLST_BASE 0x468
+
+#define A_TP_MIB_INDEX 0x450
+
+#define A_TP_MIB_RDATA 0x454
+
+#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
+
+#define A_TP_INT_ENABLE 0x470
+
+#define A_TP_INT_CAUSE 0x474
+
+#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
+
+#define A_TP_TX_DROP_CFG_CH0 0x12b
+
+#define A_TP_TX_DROP_MODE 0x12f
+
+#define A_TP_EGRESS_CONFIG 0x145
+
+#define S_REWRITEFORCETOSIZE    0
+#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
+#define F_REWRITEFORCETOSIZE    V_REWRITEFORCETOSIZE(1U)
+
+#define A_TP_TX_TRC_KEY0 0x20
+
+#define A_TP_RX_TRC_KEY0 0x120
+
+#define A_ULPRX_CTL 0x500
+
+#define S_ROUND_ROBIN    4
+#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
+#define F_ROUND_ROBIN    V_ROUND_ROBIN(1U)
+
+#define A_ULPRX_INT_ENABLE 0x504
+
+#define S_PARERR    0
+#define V_PARERR(x) ((x) << S_PARERR)
+#define F_PARERR    V_PARERR(1U)
+
+#define A_ULPRX_INT_CAUSE 0x508
+
+#define A_ULPRX_ISCSI_LLIMIT 0x50c
+
+#define A_ULPRX_ISCSI_ULIMIT 0x510
+
+#define A_ULPRX_ISCSI_TAGMASK 0x514
+
+#define A_ULPRX_TDDP_LLIMIT 0x51c
+
+#define A_ULPRX_TDDP_ULIMIT 0x520
+
+#define A_ULPRX_STAG_LLIMIT 0x52c
+
+#define A_ULPRX_STAG_ULIMIT 0x530
+
+#define A_ULPRX_RQ_LLIMIT 0x534
+#define A_ULPRX_RQ_LLIMIT 0x534
+
+#define A_ULPRX_RQ_ULIMIT 0x538
+#define A_ULPRX_RQ_ULIMIT 0x538
+
+#define A_ULPRX_PBL_LLIMIT 0x53c
+
+#define A_ULPRX_PBL_ULIMIT 0x540
+#define A_ULPRX_PBL_ULIMIT 0x540
+
+#define A_ULPRX_TDDP_TAGMASK 0x524
+
+#define A_ULPRX_RQ_LLIMIT 0x534
+#define A_ULPRX_RQ_LLIMIT 0x534
+
+#define A_ULPRX_RQ_ULIMIT 0x538
+#define A_ULPRX_RQ_ULIMIT 0x538
+
+#define A_ULPRX_PBL_ULIMIT 0x540
+#define A_ULPRX_PBL_ULIMIT 0x540
+
+#define A_ULPTX_CONFIG 0x580
+
+#define S_CFG_RR_ARB    0
+#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
+#define F_CFG_RR_ARB    V_CFG_RR_ARB(1U)
+
+#define A_ULPTX_INT_ENABLE 0x584
+
+#define S_PBL_BOUND_ERR_CH1    1
+#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
+#define F_PBL_BOUND_ERR_CH1    V_PBL_BOUND_ERR_CH1(1U)
+
+#define S_PBL_BOUND_ERR_CH0    0
+#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
+#define F_PBL_BOUND_ERR_CH0    V_PBL_BOUND_ERR_CH0(1U)
+
+#define A_ULPTX_INT_CAUSE 0x588
+
+#define A_ULPTX_TPT_LLIMIT 0x58c
+
+#define A_ULPTX_TPT_ULIMIT 0x590
+
+#define A_ULPTX_PBL_LLIMIT 0x594
+
+#define A_ULPTX_PBL_ULIMIT 0x598
+
+#define A_ULPTX_DMA_WEIGHT 0x5ac
+
+#define S_D1_WEIGHT    16
+#define M_D1_WEIGHT    0xffff
+#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
+
+#define S_D0_WEIGHT    0
+#define M_D0_WEIGHT    0xffff
+#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
+
+#define A_PM1_RX_CFG 0x5c0
+
+#define A_PM1_RX_INT_ENABLE 0x5d8
+
+#define S_ZERO_E_CMD_ERROR    18
+#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
+#define F_ZERO_E_CMD_ERROR    V_ZERO_E_CMD_ERROR(1U)
+
+#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR    17
+#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR    V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR    16
+#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR    V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_RX_FRAMING_ERROR    15
+#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
+#define F_IESPI0_RX_FRAMING_ERROR    V_IESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_RX_FRAMING_ERROR    14
+#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
+#define F_IESPI1_RX_FRAMING_ERROR    V_IESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_TX_FRAMING_ERROR    13
+#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
+#define F_IESPI0_TX_FRAMING_ERROR    V_IESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_TX_FRAMING_ERROR    12
+#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
+#define F_IESPI1_TX_FRAMING_ERROR    V_IESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_RX_FRAMING_ERROR    11
+#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
+#define F_OCSPI0_RX_FRAMING_ERROR    V_OCSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_RX_FRAMING_ERROR    10
+#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
+#define F_OCSPI1_RX_FRAMING_ERROR    V_OCSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_TX_FRAMING_ERROR    9
+#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
+#define F_OCSPI0_TX_FRAMING_ERROR    V_OCSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_TX_FRAMING_ERROR    8
+#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
+#define F_OCSPI1_TX_FRAMING_ERROR    V_OCSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR    7
+#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR    V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR    6
+#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR    V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI_PAR_ERROR    3
+#define M_IESPI_PAR_ERROR    0x7
+
+#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
+
+#define S_OCSPI_PAR_ERROR    0
+#define M_OCSPI_PAR_ERROR    0x7
+
+#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
+
+#define A_PM1_RX_INT_CAUSE 0x5dc
+
+#define A_PM1_TX_CFG 0x5e0
+
+#define A_PM1_TX_INT_ENABLE 0x5f8
+
+#define S_ZERO_C_CMD_ERROR    18
+#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
+#define F_ZERO_C_CMD_ERROR    V_ZERO_C_CMD_ERROR(1U)
+
+#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR    17
+#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR    V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR    16
+#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR    V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_RX_FRAMING_ERROR    15
+#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
+#define F_ICSPI0_RX_FRAMING_ERROR    V_ICSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_RX_FRAMING_ERROR    14
+#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
+#define F_ICSPI1_RX_FRAMING_ERROR    V_ICSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_TX_FRAMING_ERROR    13
+#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
+#define F_ICSPI0_TX_FRAMING_ERROR    V_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_TX_FRAMING_ERROR    12
+#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
+#define F_ICSPI1_TX_FRAMING_ERROR    V_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_RX_FRAMING_ERROR    11
+#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
+#define F_OESPI0_RX_FRAMING_ERROR    V_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_RX_FRAMING_ERROR    10
+#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
+#define F_OESPI1_RX_FRAMING_ERROR    V_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_TX_FRAMING_ERROR    9
+#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
+#define F_OESPI0_TX_FRAMING_ERROR    V_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_TX_FRAMING_ERROR    8
+#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
+#define F_OESPI1_TX_FRAMING_ERROR    V_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR    7
+#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR    V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR    6
+#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR    V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI_PAR_ERROR    3
+#define M_ICSPI_PAR_ERROR    0x7
+
+#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
+
+#define S_OESPI_PAR_ERROR    0
+#define M_OESPI_PAR_ERROR    0x7
+
+#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
+
+#define A_PM1_TX_INT_CAUSE 0x5fc
+
+#define A_MPS_CFG 0x600
+
+#define S_TPRXPORTEN    4
+#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
+#define F_TPRXPORTEN    V_TPRXPORTEN(1U)
+
+#define S_TPTXPORT1EN    3
+#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
+#define F_TPTXPORT1EN    V_TPTXPORT1EN(1U)
+
+#define S_TPTXPORT0EN    2
+#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
+#define F_TPTXPORT0EN    V_TPTXPORT0EN(1U)
+
+#define S_PORT1ACTIVE    1
+#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
+#define F_PORT1ACTIVE    V_PORT1ACTIVE(1U)
+
+#define S_PORT0ACTIVE    0
+#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
+#define F_PORT0ACTIVE    V_PORT0ACTIVE(1U)
+
+#define S_ENFORCEPKT    11
+#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
+#define F_ENFORCEPKT    V_ENFORCEPKT(1U)
+
+#define A_MPS_INT_ENABLE 0x61c
+
+#define S_MCAPARERRENB    6
+#define M_MCAPARERRENB    0x7
+
+#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
+
+#define S_RXTPPARERRENB    4
+#define M_RXTPPARERRENB    0x3
+
+#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
+
+#define S_TX1TPPARERRENB    2
+#define M_TX1TPPARERRENB    0x3
+
+#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
+
+#define S_TX0TPPARERRENB    0
+#define M_TX0TPPARERRENB    0x3
+
+#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
+
+#define A_MPS_INT_CAUSE 0x620
+
+#define S_MCAPARERR    6
+#define M_MCAPARERR    0x7
+
+#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
+
+#define S_RXTPPARERR    4
+#define M_RXTPPARERR    0x3
+
+#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
+
+#define S_TX1TPPARERR    2
+#define M_TX1TPPARERR    0x3
+
+#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
+
+#define S_TX0TPPARERR    0
+#define M_TX0TPPARERR    0x3
+
+#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
+
+#define A_CPL_SWITCH_CNTRL 0x640
+
+#define A_CPL_INTR_ENABLE 0x650
+
+#define S_CIM_OVFL_ERROR    4
+#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
+#define F_CIM_OVFL_ERROR    V_CIM_OVFL_ERROR(1U)
+
+#define S_TP_FRAMING_ERROR    3
+#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
+#define F_TP_FRAMING_ERROR    V_TP_FRAMING_ERROR(1U)
+
+#define S_SGE_FRAMING_ERROR    2
+#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
+#define F_SGE_FRAMING_ERROR    V_SGE_FRAMING_ERROR(1U)
+
+#define S_CIM_FRAMING_ERROR    1
+#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
+#define F_CIM_FRAMING_ERROR    V_CIM_FRAMING_ERROR(1U)
+
+#define S_ZERO_SWITCH_ERROR    0
+#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
+#define F_ZERO_SWITCH_ERROR    V_ZERO_SWITCH_ERROR(1U)
+
+#define A_CPL_INTR_CAUSE 0x654
+
+#define A_CPL_MAP_TBL_DATA 0x65c
+
+#define A_SMB_GLOBAL_TIME_CFG 0x660
+
+#define A_I2C_CFG 0x6a0
+
+#define S_I2C_CLKDIV    0
+#define M_I2C_CLKDIV    0xfff
+#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
+
+#define A_MI1_CFG 0x6b0
+
+#define S_CLKDIV    5
+#define M_CLKDIV    0xff
+#define V_CLKDIV(x) ((x) << S_CLKDIV)
+
+#define S_ST    3
+
+#define M_ST    0x3
+
+#define V_ST(x) ((x) << S_ST)
+
+#define G_ST(x) (((x) >> S_ST) & M_ST)
+
+#define S_PREEN    2
+#define V_PREEN(x) ((x) << S_PREEN)
+#define F_PREEN    V_PREEN(1U)
+
+#define S_MDIINV    1
+#define V_MDIINV(x) ((x) << S_MDIINV)
+#define F_MDIINV    V_MDIINV(1U)
+
+#define S_MDIEN    0
+#define V_MDIEN(x) ((x) << S_MDIEN)
+#define F_MDIEN    V_MDIEN(1U)
+
+#define A_MI1_ADDR 0x6b4
+
+#define S_PHYADDR    5
+#define M_PHYADDR    0x1f
+#define V_PHYADDR(x) ((x) << S_PHYADDR)
+
+#define S_REGADDR    0
+#define M_REGADDR    0x1f
+#define V_REGADDR(x) ((x) << S_REGADDR)
+
+#define A_MI1_DATA 0x6b8
+
+#define A_MI1_OP 0x6bc
+
+#define S_MDI_OP    0
+#define M_MDI_OP    0x3
+#define V_MDI_OP(x) ((x) << S_MDI_OP)
+
+#define A_SF_DATA 0x6d8
+
+#define A_SF_OP 0x6dc
+
+#define S_BYTECNT    1
+#define M_BYTECNT    0x3
+#define V_BYTECNT(x) ((x) << S_BYTECNT)
+
+#define A_PL_INT_ENABLE0 0x6e0
+
+#define S_T3DBG    23
+#define V_T3DBG(x) ((x) << S_T3DBG)
+#define F_T3DBG    V_T3DBG(1U)
+
+#define S_XGMAC0_1    20
+#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
+#define F_XGMAC0_1    V_XGMAC0_1(1U)
+
+#define S_XGMAC0_0    19
+#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
+#define F_XGMAC0_0    V_XGMAC0_0(1U)
+
+#define S_MC5A    18
+#define V_MC5A(x) ((x) << S_MC5A)
+#define F_MC5A    V_MC5A(1U)
+
+#define S_CPL_SWITCH    12
+#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
+#define F_CPL_SWITCH    V_CPL_SWITCH(1U)
+
+#define S_MPS0    11
+#define V_MPS0(x) ((x) << S_MPS0)
+#define F_MPS0    V_MPS0(1U)
+
+#define S_PM1_TX    10
+#define V_PM1_TX(x) ((x) << S_PM1_TX)
+#define F_PM1_TX    V_PM1_TX(1U)
+
+#define S_PM1_RX    9
+#define V_PM1_RX(x) ((x) << S_PM1_RX)
+#define F_PM1_RX    V_PM1_RX(1U)
+
+#define S_ULP2_TX    8
+#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
+#define F_ULP2_TX    V_ULP2_TX(1U)
+
+#define S_ULP2_RX    7
+#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
+#define F_ULP2_RX    V_ULP2_RX(1U)
+
+#define S_TP1    6
+#define V_TP1(x) ((x) << S_TP1)
+#define F_TP1    V_TP1(1U)
+
+#define S_CIM    5
+#define V_CIM(x) ((x) << S_CIM)
+#define F_CIM    V_CIM(1U)
+
+#define S_MC7_CM    4
+#define V_MC7_CM(x) ((x) << S_MC7_CM)
+#define F_MC7_CM    V_MC7_CM(1U)
+
+#define S_MC7_PMTX    3
+#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
+#define F_MC7_PMTX    V_MC7_PMTX(1U)
+
+#define S_MC7_PMRX    2
+#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
+#define F_MC7_PMRX    V_MC7_PMRX(1U)
+
+#define S_PCIM0    1
+#define V_PCIM0(x) ((x) << S_PCIM0)
+#define F_PCIM0    V_PCIM0(1U)
+
+#define S_SGE3    0
+#define V_SGE3(x) ((x) << S_SGE3)
+#define F_SGE3    V_SGE3(1U)
+
+#define A_PL_INT_CAUSE0 0x6e4
+
+#define A_PL_RST 0x6f0
+
+#define S_CRSTWRM    1
+#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
+#define F_CRSTWRM    V_CRSTWRM(1U)
+
+#define A_PL_REV 0x6f4
+
+#define A_PL_CLI 0x6f8
+
+#define A_MC5_DB_CONFIG 0x704
+
+#define S_TMTYPEHI    30
+#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
+#define F_TMTYPEHI    V_TMTYPEHI(1U)
+
+#define S_TMPARTSIZE    28
+#define M_TMPARTSIZE    0x3
+#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
+#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
+
+#define S_TMTYPE    26
+#define M_TMTYPE    0x3
+#define V_TMTYPE(x) ((x) << S_TMTYPE)
+#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
+
+#define S_COMPEN    17
+#define V_COMPEN(x) ((x) << S_COMPEN)
+#define F_COMPEN    V_COMPEN(1U)
+
+#define S_PRTYEN    6
+#define V_PRTYEN(x) ((x) << S_PRTYEN)
+#define F_PRTYEN    V_PRTYEN(1U)
+
+#define S_MBUSEN    5
+#define V_MBUSEN(x) ((x) << S_MBUSEN)
+#define F_MBUSEN    V_MBUSEN(1U)
+
+#define S_DBGIEN    4
+#define V_DBGIEN(x) ((x) << S_DBGIEN)
+#define F_DBGIEN    V_DBGIEN(1U)
+
+#define S_TMRDY    2
+#define V_TMRDY(x) ((x) << S_TMRDY)
+#define F_TMRDY    V_TMRDY(1U)
+
+#define S_TMRST    1
+#define V_TMRST(x) ((x) << S_TMRST)
+#define F_TMRST    V_TMRST(1U)
+
+#define S_TMMODE    0
+#define V_TMMODE(x) ((x) << S_TMMODE)
+#define F_TMMODE    V_TMMODE(1U)
+
+#define F_TMMODE    V_TMMODE(1U)
+
+#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
+
+#define A_MC5_DB_FILTER_TABLE 0x710
+
+#define A_MC5_DB_SERVER_INDEX 0x714
+
+#define A_MC5_DB_RSP_LATENCY 0x720
+
+#define S_RDLAT    16
+#define M_RDLAT    0x1f
+#define V_RDLAT(x) ((x) << S_RDLAT)
+
+#define S_LRNLAT    8
+#define M_LRNLAT    0x1f
+#define V_LRNLAT(x) ((x) << S_LRNLAT)
+
+#define S_SRCHLAT    0
+#define M_SRCHLAT    0x1f
+#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+
+#define A_MC5_DB_PART_ID_INDEX 0x72c
+
+#define A_MC5_DB_INT_ENABLE 0x740
+
+#define S_DELACTEMPTY    18
+#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
+#define F_DELACTEMPTY    V_DELACTEMPTY(1U)
+
+#define S_DISPQPARERR    17
+#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
+#define F_DISPQPARERR    V_DISPQPARERR(1U)
+
+#define S_REQQPARERR    16
+#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
+#define F_REQQPARERR    V_REQQPARERR(1U)
+
+#define S_UNKNOWNCMD    15
+#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
+#define F_UNKNOWNCMD    V_UNKNOWNCMD(1U)
+
+#define S_NFASRCHFAIL    8
+#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
+#define F_NFASRCHFAIL    V_NFASRCHFAIL(1U)
+
+#define S_ACTRGNFULL    7
+#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
+#define F_ACTRGNFULL    V_ACTRGNFULL(1U)
+
+#define S_PARITYERR    6
+#define V_PARITYERR(x) ((x) << S_PARITYERR)
+#define F_PARITYERR    V_PARITYERR(1U)
+
+#define A_MC5_DB_INT_CAUSE 0x744
+
+#define A_MC5_DB_DBGI_CONFIG 0x774
+
+#define A_MC5_DB_DBGI_REQ_CMD 0x778
+
+#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
+
+#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
+
+#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
+
+#define A_MC5_DB_DBGI_REQ_DATA0 0x788
+
+#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
+
+#define A_MC5_DB_DBGI_REQ_DATA2 0x790
+
+#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
+
+#define S_DBGIRSPVALID    0
+#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
+#define F_DBGIRSPVALID    V_DBGIRSPVALID(1U)
+
+#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
+
+#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
+
+#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
+
+#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
+
+#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
+
+#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
+
+#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
+
+#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
+
+#define A_MC5_DB_SYN_LRN_CMD 0x7e0
+
+#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
+
+#define A_MC5_DB_ACK_LRN_CMD 0x7e8
+
+#define A_MC5_DB_ILOOKUP_CMD 0x7ec
+
+#define A_MC5_DB_ELOOKUP_CMD 0x7f0
+
+#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
+
+#define A_MC5_DB_DATA_READ_CMD 0x7f8
+
+#define XGMAC0_0_BASE_ADDR 0x800
+
+#define A_XGM_TX_CTRL 0x800
+
+#define S_TXEN    0
+#define V_TXEN(x) ((x) << S_TXEN)
+#define F_TXEN    V_TXEN(1U)
+
+#define A_XGM_TX_CFG 0x804
+
+#define S_TXPAUSEEN    0
+#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
+#define F_TXPAUSEEN    V_TXPAUSEEN(1U)
+
+#define A_XGM_RX_CTRL 0x80c
+
+#define S_RXEN    0
+#define V_RXEN(x) ((x) << S_RXEN)
+#define F_RXEN    V_RXEN(1U)
+
+#define A_XGM_RX_CFG 0x810
+
+#define S_DISPAUSEFRAMES    9
+#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
+#define F_DISPAUSEFRAMES    V_DISPAUSEFRAMES(1U)
+
+#define S_EN1536BFRAMES    8
+#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
+#define F_EN1536BFRAMES    V_EN1536BFRAMES(1U)
+
+#define S_ENJUMBO    7
+#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
+#define F_ENJUMBO    V_ENJUMBO(1U)
+
+#define S_RMFCS    6
+#define V_RMFCS(x) ((x) << S_RMFCS)
+#define F_RMFCS    V_RMFCS(1U)
+
+#define S_ENHASHMCAST    2
+#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
+#define F_ENHASHMCAST    V_ENHASHMCAST(1U)
+
+#define S_COPYALLFRAMES    0
+#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
+#define F_COPYALLFRAMES    V_COPYALLFRAMES(1U)
+
+#define A_XGM_RX_HASH_LOW 0x814
+
+#define A_XGM_RX_HASH_HIGH 0x818
+
+#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
+
+#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
+
+#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
+
+#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
+
+#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
+
+#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
+
+#define A_XGM_STAT_CTRL 0x880
+
+#define S_CLRSTATS    2
+#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
+#define F_CLRSTATS    V_CLRSTATS(1U)
+
+#define A_XGM_RXFIFO_CFG 0x884
+
+#define S_RXFIFOPAUSEHWM    17
+#define M_RXFIFOPAUSEHWM    0xfff
+
+#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
+
+#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
+
+#define S_RXFIFOPAUSELWM    5
+#define M_RXFIFOPAUSELWM    0xfff
+
+#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
+
+#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+
+#define S_RXSTRFRWRD    1
+#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
+#define F_RXSTRFRWRD    V_RXSTRFRWRD(1U)
+
+#define S_DISERRFRAMES    0
+#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
+#define F_DISERRFRAMES    V_DISERRFRAMES(1U)
+
+#define A_XGM_TXFIFO_CFG 0x888
+
+#define S_TXFIFOTHRESH    4
+#define M_TXFIFOTHRESH    0x1ff
+
+#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+
+#define A_XGM_SERDES_CTRL 0x890
+#define A_XGM_SERDES_CTRL0 0x8e0
+
+#define S_SERDESRESET_    24
+#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
+#define F_SERDESRESET_    V_SERDESRESET_(1U)
+
+#define S_RXENABLE    4
+#define V_RXENABLE(x) ((x) << S_RXENABLE)
+#define F_RXENABLE    V_RXENABLE(1U)
+
+#define S_TXENABLE    3
+#define V_TXENABLE(x) ((x) << S_TXENABLE)
+#define F_TXENABLE    V_TXENABLE(1U)
+
+#define A_XGM_PAUSE_TIMER 0x890
+
+#define A_XGM_RGMII_IMP 0x89c
+
+#define S_XGM_IMPSETUPDATE    6
+#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
+#define F_XGM_IMPSETUPDATE    V_XGM_IMPSETUPDATE(1U)
+
+#define S_RGMIIIMPPD    3
+#define M_RGMIIIMPPD    0x7
+#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+
+#define S_RGMIIIMPPU    0
+#define M_RGMIIIMPPU    0x7
+#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
+
+#define S_CALRESET    8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET    V_CALRESET(1U)
+
+#define S_CALUPDATE    7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE    V_CALUPDATE(1U)
+
+#define A_XGM_XAUI_IMP 0x8a0
+
+#define S_CALBUSY    31
+#define V_CALBUSY(x) ((x) << S_CALBUSY)
+#define F_CALBUSY    V_CALBUSY(1U)
+
+#define S_XGM_CALFAULT    29
+#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
+#define F_XGM_CALFAULT    V_XGM_CALFAULT(1U)
+
+#define S_CALIMP    24
+#define M_CALIMP    0x1f
+#define V_CALIMP(x) ((x) << S_CALIMP)
+#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
+
+#define S_XAUIIMP    0
+#define M_XAUIIMP    0x7
+#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
+
+#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
+#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
+
+#define A_XGM_RESET_CTRL 0x8ac
+
+#define S_XG2G_RESET_    3
+#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
+#define F_XG2G_RESET_    V_XG2G_RESET_(1U)
+
+#define S_RGMII_RESET_    2
+#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
+#define F_RGMII_RESET_    V_RGMII_RESET_(1U)
+
+#define S_PCS_RESET_    1
+#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
+#define F_PCS_RESET_    V_PCS_RESET_(1U)
+
+#define S_MAC_RESET_    0
+#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
+#define F_MAC_RESET_    V_MAC_RESET_(1U)
+
+#define A_XGM_PORT_CFG 0x8b8
+
+#define S_CLKDIVRESET_    3
+#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
+#define F_CLKDIVRESET_    V_CLKDIVRESET_(1U)
+
+#define S_PORTSPEED    1
+#define M_PORTSPEED    0x3
+
+#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
+
+#define S_ENRGMII    0
+#define V_ENRGMII(x) ((x) << S_ENRGMII)
+#define F_ENRGMII    V_ENRGMII(1U)
+
+#define A_XGM_INT_ENABLE 0x8d4
+
+#define S_TXFIFO_PRTY_ERR    17
+#define M_TXFIFO_PRTY_ERR    0x7
+
+#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
+
+#define S_RXFIFO_PRTY_ERR    14
+#define M_RXFIFO_PRTY_ERR    0x7
+
+#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
+
+#define S_TXFIFO_UNDERRUN    13
+#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
+#define F_TXFIFO_UNDERRUN    V_TXFIFO_UNDERRUN(1U)
+
+#define S_RXFIFO_OVERFLOW    12
+#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
+#define F_RXFIFO_OVERFLOW    V_RXFIFO_OVERFLOW(1U)
+
+#define S_SERDES_LOS    4
+#define M_SERDES_LOS    0xf
+
+#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
+
+#define S_XAUIPCSCTCERR    3
+#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
+#define F_XAUIPCSCTCERR    V_XAUIPCSCTCERR(1U)
+
+#define S_XAUIPCSALIGNCHANGE    2
+#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
+#define F_XAUIPCSALIGNCHANGE    V_XAUIPCSALIGNCHANGE(1U)
+
+#define A_XGM_INT_CAUSE 0x8d8
+
+#define A_XGM_XAUI_ACT_CTRL 0x8dc
+
+#define S_TXACTENABLE    1
+#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
+#define F_TXACTENABLE    V_TXACTENABLE(1U)
+
+#define A_XGM_SERDES_CTRL0 0x8e0
+
+#define S_RESET3    23
+#define V_RESET3(x) ((x) << S_RESET3)
+#define F_RESET3    V_RESET3(1U)
+
+#define S_RESET2    22
+#define V_RESET2(x) ((x) << S_RESET2)
+#define F_RESET2    V_RESET2(1U)
+
+#define S_RESET1    21
+#define V_RESET1(x) ((x) << S_RESET1)
+#define F_RESET1    V_RESET1(1U)
+
+#define S_RESET0    20
+#define V_RESET0(x) ((x) << S_RESET0)
+#define F_RESET0    V_RESET0(1U)
+
+#define S_PWRDN3    19
+#define V_PWRDN3(x) ((x) << S_PWRDN3)
+#define F_PWRDN3    V_PWRDN3(1U)
+
+#define S_PWRDN2    18
+#define V_PWRDN2(x) ((x) << S_PWRDN2)
+#define F_PWRDN2    V_PWRDN2(1U)
+
+#define S_PWRDN1    17
+#define V_PWRDN1(x) ((x) << S_PWRDN1)
+#define F_PWRDN1    V_PWRDN1(1U)
+
+#define S_PWRDN0    16
+#define V_PWRDN0(x) ((x) << S_PWRDN0)
+#define F_PWRDN0    V_PWRDN0(1U)
+
+#define S_RESETPLL23    15
+#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
+#define F_RESETPLL23    V_RESETPLL23(1U)
+
+#define S_RESETPLL01    14
+#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
+#define F_RESETPLL01    V_RESETPLL01(1U)
+
+#define A_XGM_SERDES_STAT0 0x8f0
+
+#define S_LOWSIG0    0
+#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
+#define F_LOWSIG0    V_LOWSIG0(1U)
+
+#define A_XGM_SERDES_STAT3 0x8fc
+
+#define A_XGM_STAT_TX_BYTE_LOW 0x900
+
+#define A_XGM_STAT_TX_BYTE_HIGH 0x904
+
+#define A_XGM_STAT_TX_FRAME_LOW 0x908
+
+#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
+
+#define A_XGM_STAT_TX_BCAST 0x910
+
+#define A_XGM_STAT_TX_MCAST 0x914
+
+#define A_XGM_STAT_TX_PAUSE 0x918
+
+#define A_XGM_STAT_TX_64B_FRAMES 0x91c
+
+#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
+
+#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
+
+#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
+
+#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
+
+#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
+
+#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
+
+#define A_XGM_STAT_TX_ERR_FRAMES 0x938
+
+#define A_XGM_STAT_RX_BYTES_LOW 0x93c
+
+#define A_XGM_STAT_RX_BYTES_HIGH 0x940
+
+#define A_XGM_STAT_RX_FRAMES_LOW 0x944
+
+#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
+
+#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
+
+#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
+
+#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
+
+#define A_XGM_STAT_RX_64B_FRAMES 0x958
+
+#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
+
+#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
+
+#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
+
+#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
+
+#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
+
+#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
+
+#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
+
+#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
+
+#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
+
+#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
+
+#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
+
+#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
+
+#define A_XGM_SERDES_STATUS0 0x98c
+
+#define A_XGM_SERDES_STATUS1 0x990
+
+#define S_CMULOCK    31
+#define V_CMULOCK(x) ((x) << S_CMULOCK)
+#define F_CMULOCK    V_CMULOCK(1U)
+
+#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
+
+#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
+
+#define XGMAC0_1_BASE_ADDR 0xa00

+ 2681 - 0
drivers/net/cxgb3/sge.c

@@ -0,0 +1,2681 @@
+/*
+ * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/dma-mapping.h>
+#include "common.h"
+#include "regs.h"
+#include "sge_defs.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+#define USE_GTS 0
+
+#define SGE_RX_SM_BUF_SIZE 1536
+#define SGE_RX_COPY_THRES  256
+
+# define SGE_RX_DROP_THRES 16
+
+/*
+ * Period of the Tx buffer reclaim timer.  This timer does not need to run
+ * frequently as Tx buffers are usually reclaimed by new Tx packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+
+/* WR size in bytes */
+#define WR_LEN (WR_FLITS * 8)
+
+/*
+ * Types of Tx queues in each queue set.  Order here matters, do not change.
+ */
+enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
+
+/* Values for sge_txq.flags */
+enum {
+	TXQ_RUNNING = 1 << 0,	/* fetch engine is running */
+	TXQ_LAST_PKT_DB = 1 << 1,	/* last packet rang the doorbell */
+};
+
+struct tx_desc {
+	u64 flit[TX_DESC_FLITS];
+};
+
+struct rx_desc {
+	__be32 addr_lo;
+	__be32 len_gen;
+	__be32 gen2;
+	__be32 addr_hi;
+};
+
+struct tx_sw_desc {		/* SW state per Tx descriptor */
+	struct sk_buff *skb;
+};
+
+struct rx_sw_desc {		/* SW state per Rx descriptor */
+	struct sk_buff *skb;
+	 DECLARE_PCI_UNMAP_ADDR(dma_addr);
+};
+
+struct rsp_desc {		/* response queue descriptor */
+	struct rss_header rss_hdr;
+	__be32 flags;
+	__be32 len_cq;
+	u8 imm_data[47];
+	u8 intr_gen;
+};
+
+struct unmap_info {		/* packet unmapping info, overlays skb->cb */
+	int sflit;		/* start flit of first SGL entry in Tx descriptor */
+	u16 fragidx;		/* first page fragment in current Tx descriptor */
+	u16 addr_idx;		/* buffer index of first SGL entry in descriptor */
+	u32 len;		/* mapped length of skb main body */
+};
+
+/*
+ * Maps a number of flits to the number of Tx descriptors that can hold them.
+ * The formula is
+ *
+ * desc = 1 + (flits - 2) / (WR_FLITS - 1).
+ *
+ * HW allows up to 4 descriptors to be combined into a WR.
+ */
+static u8 flit_desc_map[] = {
+	0,
+#if SGE_NUM_GENBITS == 1
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
+#elif SGE_NUM_GENBITS == 2
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+#else
+# error "SGE_NUM_GENBITS must be 1 or 2"
+#endif
+};
+
+static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
+{
+	return container_of(q, struct sge_qset, fl[qidx]);
+}
+
+static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
+{
+	return container_of(q, struct sge_qset, rspq);
+}
+
+static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
+{
+	return container_of(q, struct sge_qset, txq[qidx]);
+}
+
+/**
+ *	refill_rspq - replenish an SGE response queue
+ *	@adapter: the adapter
+ *	@q: the response queue to replenish
+ *	@credits: how many new responses to make available
+ *
+ *	Replenishes a response queue by making the supplied number of responses
+ *	available to HW.
+ */
+static inline void refill_rspq(struct adapter *adapter,
+			       const struct sge_rspq *q, unsigned int credits)
+{
+	t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
+		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
+}
+
+/**
+ *	need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ *	Returns true if the platfrom needs sk_buff unmapping.  The compiler
+ *	optimizes away unecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+	/*
+	 * This structure is used to tell if the platfrom needs buffer
+	 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
+	 */
+	struct dummy {
+		DECLARE_PCI_UNMAP_ADDR(addr);
+	};
+
+	return sizeof(struct dummy) != 0;
+}
+
+/**
+ *	unmap_skb - unmap a packet main body and its page fragments
+ *	@skb: the packet
+ *	@q: the Tx queue containing Tx descriptors for the packet
+ *	@cidx: index of Tx descriptor
+ *	@pdev: the PCI device
+ *
+ *	Unmap the main body of an sk_buff and its page fragments, if any.
+ *	Because of the fairly complicated structure of our SGLs and the desire
+ *	to conserve space for metadata, we keep the information necessary to
+ *	unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
+ *	in the Tx descriptors (the physical addresses of the various data
+ *	buffers).  The send functions initialize the state in skb->cb so we
+ *	can unmap the buffers held in the first Tx descriptor here, and we
+ *	have enough information at this point to update the state for the next
+ *	Tx descriptor.
+ */
+static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
+			     unsigned int cidx, struct pci_dev *pdev)
+{
+	const struct sg_ent *sgp;
+	struct unmap_info *ui = (struct unmap_info *)skb->cb;
+	int nfrags, frag_idx, curflit, j = ui->addr_idx;
+
+	sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
+
+	if (ui->len) {
+		pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
+				 PCI_DMA_TODEVICE);
+		ui->len = 0;	/* so we know for next descriptor for this skb */
+		j = 1;
+	}
+
+	frag_idx = ui->fragidx;
+	curflit = ui->sflit + 1 + j;
+	nfrags = skb_shinfo(skb)->nr_frags;
+
+	while (frag_idx < nfrags && curflit < WR_FLITS) {
+		pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
+			       skb_shinfo(skb)->frags[frag_idx].size,
+			       PCI_DMA_TODEVICE);
+		j ^= 1;
+		if (j == 0) {
+			sgp++;
+			curflit++;
+		}
+		curflit++;
+		frag_idx++;
+	}
+
+	if (frag_idx < nfrags) {	/* SGL continues into next Tx descriptor */
+		ui->fragidx = frag_idx;
+		ui->addr_idx = j;
+		ui->sflit = curflit - WR_FLITS - j;	/* sflit can be -1 */
+	}
+}
+
+/**
+ *	free_tx_desc - reclaims Tx descriptors and their buffers
+ *	@adapter: the adapter
+ *	@q: the Tx queue to reclaim descriptors from
+ *	@n: the number of descriptors to reclaim
+ *
+ *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ *	Tx buffers.  Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
+			 unsigned int n)
+{
+	struct tx_sw_desc *d;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned int cidx = q->cidx;
+
+	d = &q->sdesc[cidx];
+	while (n--) {
+		if (d->skb) {	/* an SGL is present */
+			if (need_skb_unmap())
+				unmap_skb(d->skb, q, cidx, pdev);
+			if (d->skb->priority == cidx)
+				kfree_skb(d->skb);
+		}
+		++d;
+		if (++cidx == q->size) {
+			cidx = 0;
+			d = q->sdesc;
+		}
+	}
+	q->cidx = cidx;
+}
+
+/**
+ *	reclaim_completed_tx - reclaims completed Tx descriptors
+ *	@adapter: the adapter
+ *	@q: the Tx queue to reclaim completed descriptors from
+ *
+ *	Reclaims Tx descriptors that the SGE has indicated it has processed,
+ *	and frees the associated buffers if possible.  Called with the Tx
+ *	queue's lock held.
+ */
+static inline void reclaim_completed_tx(struct adapter *adapter,
+					struct sge_txq *q)
+{
+	unsigned int reclaim = q->processed - q->cleaned;
+
+	if (reclaim) {
+		free_tx_desc(adapter, q, reclaim);
+		q->cleaned += reclaim;
+		q->in_use -= reclaim;
+	}
+}
+
+/**
+ *	should_restart_tx - are there enough resources to restart a Tx queue?
+ *	@q: the Tx queue
+ *
+ *	Checks if there are enough descriptors to restart a suspended Tx queue.
+ */
+static inline int should_restart_tx(const struct sge_txq *q)
+{
+	unsigned int r = q->processed - q->cleaned;
+
+	return q->in_use - r < (q->size >> 1);
+}
+
+/**
+ *	free_rx_bufs - free the Rx buffers on an SGE free list
+ *	@pdev: the PCI device associated with the adapter
+ *	@rxq: the SGE free list to clean up
+ *
+ *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
+ *	this queue should be stopped before calling this function.
+ */
+static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
+{
+	unsigned int cidx = q->cidx;
+
+	while (q->credits--) {
+		struct rx_sw_desc *d = &q->sdesc[cidx];
+
+		pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
+				 q->buf_size, PCI_DMA_FROMDEVICE);
+		kfree_skb(d->skb);
+		d->skb = NULL;
+		if (++cidx == q->size)
+			cidx = 0;
+	}
+}
+
+/**
+ *	add_one_rx_buf - add a packet buffer to a free-buffer list
+ *	@skb: the buffer to add
+ *	@len: the buffer length
+ *	@d: the HW Rx descriptor to write
+ *	@sd: the SW Rx descriptor to write
+ *	@gen: the generation bit value
+ *	@pdev: the PCI device associated with the adapter
+ *
+ *	Add a buffer of the given length to the supplied HW and SW Rx
+ *	descriptors.
+ */
+static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
+				  struct rx_desc *d, struct rx_sw_desc *sd,
+				  unsigned int gen, struct pci_dev *pdev)
+{
+	dma_addr_t mapping;
+
+	sd->skb = skb;
+	mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+	pci_unmap_addr_set(sd, dma_addr, mapping);
+
+	d->addr_lo = cpu_to_be32(mapping);
+	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+	wmb();
+	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+}
+
+/**
+ *	refill_fl - refill an SGE free-buffer list
+ *	@adapter: the adapter
+ *	@q: the free-list to refill
+ *	@n: the number of new buffers to allocate
+ *	@gfp: the gfp flags for allocating new buffers
+ *
+ *	(Re)populate an SGE free-buffer list with up to @n new packet buffers,
+ *	allocated with the supplied gfp flags.  The caller must assure that
+ *	@n does not exceed the queue's capacity.
+ */
+static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
+{
+	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+	struct rx_desc *d = &q->desc[q->pidx];
+
+	while (n--) {
+		struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+
+		if (!skb)
+			break;
+
+		add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
+		d++;
+		sd++;
+		if (++q->pidx == q->size) {
+			q->pidx = 0;
+			q->gen ^= 1;
+			sd = q->sdesc;
+			d = q->desc;
+		}
+		q->credits++;
+	}
+
+	t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+	refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
+}
+
+/**
+ *	recycle_rx_buf - recycle a receive buffer
+ *	@adapter: the adapter
+ *	@q: the SGE free list
+ *	@idx: index of buffer to recycle
+ *
+ *	Recycles the specified buffer on the given free list by adding it at
+ *	the next available slot on the list.
+ */
+static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
+			   unsigned int idx)
+{
+	struct rx_desc *from = &q->desc[idx];
+	struct rx_desc *to = &q->desc[q->pidx];
+
+	q->sdesc[q->pidx] = q->sdesc[idx];
+	to->addr_lo = from->addr_lo;	/* already big endian */
+	to->addr_hi = from->addr_hi;	/* likewise */
+	wmb();
+	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
+	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
+	q->credits++;
+
+	if (++q->pidx == q->size) {
+		q->pidx = 0;
+		q->gen ^= 1;
+	}
+	t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+}
+
+/**
+ *	alloc_ring - allocate resources for an SGE descriptor ring
+ *	@pdev: the PCI device
+ *	@nelem: the number of descriptors
+ *	@elem_size: the size of each descriptor
+ *	@sw_size: the size of the SW state associated with each ring element
+ *	@phys: the physical address of the allocated ring
+ *	@metadata: address of the array holding the SW state for the ring
+ *
+ *	Allocates resources for an SGE descriptor ring, such as Tx queues,
+ *	free buffer lists, or response queues.  Each SGE ring requires
+ *	space for its HW descriptors plus, optionally, space for the SW state
+ *	associated with each HW entry (the metadata).  The function returns
+ *	three values: the virtual address for the HW ring (the return value
+ *	of the function), the physical address of the HW ring, and the address
+ *	of the SW ring.
+ */
+static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
+			size_t sw_size, dma_addr_t *phys, void *metadata)
+{
+	size_t len = nelem * elem_size;
+	void *s = NULL;
+	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+
+	if (!p)
+		return NULL;
+	if (sw_size) {
+		s = kcalloc(nelem, sw_size, GFP_KERNEL);
+
+		if (!s) {
+			dma_free_coherent(&pdev->dev, len, p, *phys);
+			return NULL;
+		}
+	}
+	if (metadata)
+		*(void **)metadata = s;
+	memset(p, 0, len);
+	return p;
+}
+
+/**
+ *	free_qset - free the resources of an SGE queue set
+ *	@adapter: the adapter owning the queue set
+ *	@q: the queue set
+ *
+ *	Release the HW and SW resources associated with an SGE queue set, such
+ *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
+ *	queue set must be quiesced prior to calling this.
+ */
+void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+{
+	int i;
+	struct pci_dev *pdev = adapter->pdev;
+
+	if (q->tx_reclaim_timer.function)
+		del_timer_sync(&q->tx_reclaim_timer);
+
+	for (i = 0; i < SGE_RXQ_PER_SET; ++i)
+		if (q->fl[i].desc) {
+			spin_lock(&adapter->sge.reg_lock);
+			t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
+			spin_unlock(&adapter->sge.reg_lock);
+			free_rx_bufs(pdev, &q->fl[i]);
+			kfree(q->fl[i].sdesc);
+			dma_free_coherent(&pdev->dev,
+					  q->fl[i].size *
+					  sizeof(struct rx_desc), q->fl[i].desc,
+					  q->fl[i].phys_addr);
+		}
+
+	for (i = 0; i < SGE_TXQ_PER_SET; ++i)
+		if (q->txq[i].desc) {
+			spin_lock(&adapter->sge.reg_lock);
+			t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
+			spin_unlock(&adapter->sge.reg_lock);
+			if (q->txq[i].sdesc) {
+				free_tx_desc(adapter, &q->txq[i],
+					     q->txq[i].in_use);
+				kfree(q->txq[i].sdesc);
+			}
+			dma_free_coherent(&pdev->dev,
+					  q->txq[i].size *
+					  sizeof(struct tx_desc),
+					  q->txq[i].desc, q->txq[i].phys_addr);
+			__skb_queue_purge(&q->txq[i].sendq);
+		}
+
+	if (q->rspq.desc) {
+		spin_lock(&adapter->sge.reg_lock);
+		t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
+		spin_unlock(&adapter->sge.reg_lock);
+		dma_free_coherent(&pdev->dev,
+				  q->rspq.size * sizeof(struct rsp_desc),
+				  q->rspq.desc, q->rspq.phys_addr);
+	}
+
+	if (q->netdev)
+		q->netdev->atalk_ptr = NULL;
+
+	memset(q, 0, sizeof(*q));
+}
+
+/**
+ *	init_qset_cntxt - initialize an SGE queue set context info
+ *	@qs: the queue set
+ *	@id: the queue set id
+ *
+ *	Initializes the TIDs and context ids for the queues of a queue set.
+ */
+static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
+{
+	qs->rspq.cntxt_id = id;
+	qs->fl[0].cntxt_id = 2 * id;
+	qs->fl[1].cntxt_id = 2 * id + 1;
+	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
+	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
+	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
+	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
+	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
+}
+
+/**
+ *	sgl_len - calculates the size of an SGL of the given capacity
+ *	@n: the number of SGL entries
+ *
+ *	Calculates the number of flits needed for a scatter/gather list that
+ *	can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+	/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
+	return (3 * n) / 2 + (n & 1);
+}
+
+/**
+ *	flits_to_desc - returns the num of Tx descriptors for the given flits
+ *	@n: the number of flits
+ *
+ *	Calculates the number of Tx descriptors needed for the supplied number
+ *	of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+	BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
+	return flit_desc_map[n];
+}
+
+/**
+ *	get_packet - return the next ingress packet buffer from a free list
+ *	@adap: the adapter that received the packet
+ *	@fl: the SGE free list holding the packet
+ *	@len: the packet length including any SGE padding
+ *	@drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *	Get the next packet from a free list and complete setup of the
+ *	sk_buff.  If the packet is small we make a copy and recycle the
+ *	original buffer, otherwise we use the original buffer itself.  If a
+ *	positive drop threshold is supplied packets are dropped and their
+ *	buffers recycled if (a) the number of remaining buffers is under the
+ *	threshold and the packet is too big to copy, or (b) the packet should
+ *	be copied but there is no memory for the copy.
+ */
+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+				  unsigned int len, unsigned int drop_thres)
+{
+	struct sk_buff *skb = NULL;
+	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+	prefetch(sd->skb->data);
+
+	if (len <= SGE_RX_COPY_THRES) {
+		skb = alloc_skb(len, GFP_ATOMIC);
+		if (likely(skb != NULL)) {
+			__skb_put(skb, len);
+			pci_dma_sync_single_for_cpu(adap->pdev,
+						    pci_unmap_addr(sd,
+								   dma_addr),
+						    len, PCI_DMA_FROMDEVICE);
+			memcpy(skb->data, sd->skb->data, len);
+			pci_dma_sync_single_for_device(adap->pdev,
+						       pci_unmap_addr(sd,
+								      dma_addr),
+						       len, PCI_DMA_FROMDEVICE);
+		} else if (!drop_thres)
+			goto use_orig_buf;
+	      recycle:
+		recycle_rx_buf(adap, fl, fl->cidx);
+		return skb;
+	}
+
+	if (unlikely(fl->credits < drop_thres))
+		goto recycle;
+
+      use_orig_buf:
+	pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+			 fl->buf_size, PCI_DMA_FROMDEVICE);
+	skb = sd->skb;
+	skb_put(skb, len);
+	__refill_fl(adap, fl);
+	return skb;
+}
+
+/**
+ *	get_imm_packet - return the next ingress packet buffer from a response
+ *	@resp: the response descriptor containing the packet data
+ *
+ *	Return a packet containing the immediate data of the given response.
+ */
+static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
+{
+	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
+
+	if (skb) {
+		__skb_put(skb, IMMED_PKT_SIZE);
+		memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
+	}
+	return skb;
+}
+
+/**
+ *	calc_tx_descs - calculate the number of Tx descriptors for a packet
+ *	@skb: the packet
+ *
+ * 	Returns the number of Tx descriptors needed for the given Ethernet
+ * 	packet.  Ethernet packets require addition of WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+	unsigned int flits;
+
+	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
+		return 1;
+
+	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
+	if (skb_shinfo(skb)->gso_size)
+		flits++;
+	return flits_to_desc(flits);
+}
+
+/**
+ *	make_sgl - populate a scatter/gather list for a packet
+ *	@skb: the packet
+ *	@sgp: the SGL to populate
+ *	@start: start address of skb main body data to include in the SGL
+ *	@len: length of skb main body data to include in the SGL
+ *	@pdev: the PCI device
+ *
+ *	Generates a scatter/gather list for the buffers that make up a packet
+ *	and returns the SGL size in 8-byte words.  The caller must size the SGL
+ *	appropriately.
+ */
+static inline unsigned int make_sgl(const struct sk_buff *skb,
+				    struct sg_ent *sgp, unsigned char *start,
+				    unsigned int len, struct pci_dev *pdev)
+{
+	dma_addr_t mapping;
+	unsigned int i, j = 0, nfrags;
+
+	if (len) {
+		mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
+		sgp->len[0] = cpu_to_be32(len);
+		sgp->addr[0] = cpu_to_be64(mapping);
+		j = 1;
+	}
+
+	nfrags = skb_shinfo(skb)->nr_frags;
+	for (i = 0; i < nfrags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		mapping = pci_map_page(pdev, frag->page, frag->page_offset,
+				       frag->size, PCI_DMA_TODEVICE);
+		sgp->len[j] = cpu_to_be32(frag->size);
+		sgp->addr[j] = cpu_to_be64(mapping);
+		j ^= 1;
+		if (j == 0)
+			++sgp;
+	}
+	if (j)
+		sgp->len[j] = 0;
+	return ((nfrags + (len != 0)) * 3) / 2 + j;
+}
+
+/**
+ *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
+ *	@adap: the adapter
+ *	@q: the Tx queue
+ *
+ *	Ring the doorbel if a Tx queue is asleep.  There is a natural race,
+ *	where the HW is going to sleep just after we checked, however,
+ *	then the interrupt handler will detect the outstanding TX packet
+ *	and ring the doorbell for us.
+ *
+ *	When GTS is disabled we unconditionally ring the doorbell.
+ */
+static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
+{
+#if USE_GTS
+	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
+	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
+		set_bit(TXQ_LAST_PKT_DB, &q->flags);
+		t3_write_reg(adap, A_SG_KDOORBELL,
+			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+	}
+#else
+	wmb();			/* write descriptors before telling HW */
+	t3_write_reg(adap, A_SG_KDOORBELL,
+		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+#endif
+}
+
+static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
+{
+#if SGE_NUM_GENBITS == 2
+	d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
+#endif
+}
+
+/**
+ *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
+ *	@ndesc: number of Tx descriptors spanned by the SGL
+ *	@skb: the packet corresponding to the WR
+ *	@d: first Tx descriptor to be written
+ *	@pidx: index of above descriptors
+ *	@q: the SGE Tx queue
+ *	@sgl: the SGL
+ *	@flits: number of flits to the start of the SGL in the first descriptor
+ *	@sgl_flits: the SGL size in flits
+ *	@gen: the Tx descriptor generation
+ *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
+ *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
+ *
+ *	Write a work request header and an associated SGL.  If the SGL is
+ *	small enough to fit into one Tx descriptor it has already been written
+ *	and we just need to write the WR header.  Otherwise we distribute the
+ *	SGL across the number of descriptors it spans.
+ */
+static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
+			     struct tx_desc *d, unsigned int pidx,
+			     const struct sge_txq *q,
+			     const struct sg_ent *sgl,
+			     unsigned int flits, unsigned int sgl_flits,
+			     unsigned int gen, unsigned int wr_hi,
+			     unsigned int wr_lo)
+{
+	struct work_request_hdr *wrp = (struct work_request_hdr *)d;
+	struct tx_sw_desc *sd = &q->sdesc[pidx];
+
+	sd->skb = skb;
+	if (need_skb_unmap()) {
+		struct unmap_info *ui = (struct unmap_info *)skb->cb;
+
+		ui->fragidx = 0;
+		ui->addr_idx = 0;
+		ui->sflit = flits;
+	}
+
+	if (likely(ndesc == 1)) {
+		skb->priority = pidx;
+		wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
+				   V_WR_SGLSFLT(flits)) | wr_hi;
+		wmb();
+		wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
+				   V_WR_GEN(gen)) | wr_lo;
+		wr_gen2(d, gen);
+	} else {
+		unsigned int ogen = gen;
+		const u64 *fp = (const u64 *)sgl;
+		struct work_request_hdr *wp = wrp;
+
+		wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
+				   V_WR_SGLSFLT(flits)) | wr_hi;
+
+		while (sgl_flits) {
+			unsigned int avail = WR_FLITS - flits;
+
+			if (avail > sgl_flits)
+				avail = sgl_flits;
+			memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
+			sgl_flits -= avail;
+			ndesc--;
+			if (!sgl_flits)
+				break;
+
+			fp += avail;
+			d++;
+			sd++;
+			if (++pidx == q->size) {
+				pidx = 0;
+				gen ^= 1;
+				d = q->desc;
+				sd = q->sdesc;
+			}
+
+			sd->skb = skb;
+			wrp = (struct work_request_hdr *)d;
+			wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
+					   V_WR_SGLSFLT(1)) | wr_hi;
+			wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
+							sgl_flits + 1)) |
+					   V_WR_GEN(gen)) | wr_lo;
+			wr_gen2(d, gen);
+			flits = 1;
+		}
+		skb->priority = pidx;
+		wrp->wr_hi |= htonl(F_WR_EOP);
+		wmb();
+		wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
+		wr_gen2((struct tx_desc *)wp, ogen);
+		WARN_ON(ndesc != 0);
+	}
+}
+
+/**
+ *	write_tx_pkt_wr - write a TX_PKT work request
+ *	@adap: the adapter
+ *	@skb: the packet to send
+ *	@pi: the egress interface
+ *	@pidx: index of the first Tx descriptor to write
+ *	@gen: the generation value to use
+ *	@q: the Tx queue
+ *	@ndesc: number of descriptors the packet will occupy
+ *	@compl: the value of the COMPL bit to use
+ *
+ *	Generate a TX_PKT work request to send the supplied packet.
+ */
+static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
+			    const struct port_info *pi,
+			    unsigned int pidx, unsigned int gen,
+			    struct sge_txq *q, unsigned int ndesc,
+			    unsigned int compl)
+{
+	unsigned int flits, sgl_flits, cntrl, tso_info;
+	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+	struct tx_desc *d = &q->desc[pidx];
+	struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
+
+	cpl->len = htonl(skb->len | 0x80000000);
+	cntrl = V_TXPKT_INTF(pi->port_id);
+
+	if (vlan_tx_tag_present(skb) && pi->vlan_grp)
+		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
+
+	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
+	if (tso_info) {
+		int eth_type;
+		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
+
+		d->flit[2] = 0;
+		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
+		hdr->cntrl = htonl(cntrl);
+		eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
+		    CPL_ETH_II : CPL_ETH_II_VLAN;
+		tso_info |= V_LSO_ETH_TYPE(eth_type) |
+		    V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
+		    V_LSO_TCPHDR_WORDS(skb->h.th->doff);
+		hdr->lso_info = htonl(tso_info);
+		flits = 3;
+	} else {
+		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
+		cntrl |= F_TXPKT_IPCSUM_DIS;	/* SW calculates IP csum */
+		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
+		cpl->cntrl = htonl(cntrl);
+
+		if (skb->len <= WR_LEN - sizeof(*cpl)) {
+			q->sdesc[pidx].skb = NULL;
+			if (!skb->data_len)
+				memcpy(&d->flit[2], skb->data, skb->len);
+			else
+				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
+
+			flits = (skb->len + 7) / 8 + 2;
+			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
+					      V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
+					      | F_WR_SOP | F_WR_EOP | compl);
+			wmb();
+			cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
+					      V_WR_TID(q->token));
+			wr_gen2(d, gen);
+			kfree_skb(skb);
+			return;
+		}
+
+		flits = 2;
+	}
+
+	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+	sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
+	if (need_skb_unmap())
+		((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
+
+	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
+			 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
+			 htonl(V_WR_TID(q->token)));
+}
+
+/**
+ *	eth_xmit - add a packet to the Ethernet Tx queue
+ *	@skb: the packet
+ *	@dev: the egress net device
+ *
+ *	Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
+ */
+int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	unsigned int ndesc, pidx, credits, gen, compl;
+	const struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = dev->priv;
+	struct sge_qset *qs = dev2qset(dev);
+	struct sge_txq *q = &qs->txq[TXQ_ETH];
+
+	/*
+	 * The chip min packet length is 9 octets but play safe and reject
+	 * anything shorter than an Ethernet header.
+	 */
+	if (unlikely(skb->len < ETH_HLEN)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	spin_lock(&q->lock);
+	reclaim_completed_tx(adap, q);
+
+	credits = q->size - q->in_use;
+	ndesc = calc_tx_descs(skb);
+
+	if (unlikely(credits < ndesc)) {
+		if (!netif_queue_stopped(dev)) {
+			netif_stop_queue(dev);
+			set_bit(TXQ_ETH, &qs->txq_stopped);
+			q->stops++;
+			dev_err(&adap->pdev->dev,
+				"%s: Tx ring %u full while queue awake!\n",
+				dev->name, q->cntxt_id & 7);
+		}
+		spin_unlock(&q->lock);
+		return NETDEV_TX_BUSY;
+	}
+
+	q->in_use += ndesc;
+	if (unlikely(credits - ndesc < q->stop_thres)) {
+		q->stops++;
+		netif_stop_queue(dev);
+		set_bit(TXQ_ETH, &qs->txq_stopped);
+#if !USE_GTS
+		if (should_restart_tx(q) &&
+		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+			q->restarts++;
+			netif_wake_queue(dev);
+		}
+#endif
+	}
+
+	gen = q->gen;
+	q->unacked += ndesc;
+	compl = (q->unacked & 8) << (S_WR_COMPL - 3);
+	q->unacked &= 7;
+	pidx = q->pidx;
+	q->pidx += ndesc;
+	if (q->pidx >= q->size) {
+		q->pidx -= q->size;
+		q->gen ^= 1;
+	}
+
+	/* update port statistics */
+	if (skb->ip_summed == CHECKSUM_COMPLETE)
+		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
+	if (skb_shinfo(skb)->gso_size)
+		qs->port_stats[SGE_PSTAT_TSO]++;
+	if (vlan_tx_tag_present(skb) && pi->vlan_grp)
+		qs->port_stats[SGE_PSTAT_VLANINS]++;
+
+	dev->trans_start = jiffies;
+	spin_unlock(&q->lock);
+
+	/*
+	 * We do not use Tx completion interrupts to free DMAd Tx packets.
+	 * This is good for performamce but means that we rely on new Tx
+	 * packets arriving to run the destructors of completed packets,
+	 * which open up space in their sockets' send queues.  Sometimes
+	 * we do not get such new packets causing Tx to stall.  A single
+	 * UDP transmitter is a good example of this situation.  We have
+	 * a clean up timer that periodically reclaims completed packets
+	 * but it doesn't run often enough (nor do we want it to) to prevent
+	 * lengthy stalls.  A solution to this problem is to run the
+	 * destructor early, after the packet is queued but before it's DMAd.
+	 * A cons is that we lie to socket memory accounting, but the amount
+	 * of extra memory is reasonable (limited by the number of Tx
+	 * descriptors), the packets do actually get freed quickly by new
+	 * packets almost always, and for protocols like TCP that wait for
+	 * acks to really free up the data the extra memory is even less.
+	 * On the positive side we run the destructors on the sending CPU
+	 * rather than on a potentially different completing CPU, usually a
+	 * good thing.  We also run them without holding our Tx queue lock,
+	 * unlike what reclaim_completed_tx() would otherwise do.
+	 *
+	 * Run the destructor before telling the DMA engine about the packet
+	 * to make sure it doesn't complete and get freed prematurely.
+	 */
+	if (likely(!skb_shared(skb)))
+		skb_orphan(skb);
+
+	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
+	check_ring_tx_db(adap, q);
+	return NETDEV_TX_OK;
+}
+
+/**
+ *	write_imm - write a packet into a Tx descriptor as immediate data
+ *	@d: the Tx descriptor to write
+ *	@skb: the packet
+ *	@len: the length of packet data to write as immediate data
+ *	@gen: the generation bit value to write
+ *
+ *	Writes a packet as immediate data into a Tx descriptor.  The packet
+ *	contains a work request at its beginning.  We must write the packet
+ *	carefully so the SGE doesn't read accidentally before it's written in
+ *	its entirety.
+ */
+static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
+			     unsigned int len, unsigned int gen)
+{
+	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
+	struct work_request_hdr *to = (struct work_request_hdr *)d;
+
+	memcpy(&to[1], &from[1], len - sizeof(*from));
+	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
+					V_WR_BCNTLFLT(len & 7));
+	wmb();
+	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
+					V_WR_LEN((len + 7) / 8));
+	wr_gen2(d, gen);
+	kfree_skb(skb);
+}
+
+/**
+ *	check_desc_avail - check descriptor availability on a send queue
+ *	@adap: the adapter
+ *	@q: the send queue
+ *	@skb: the packet needing the descriptors
+ *	@ndesc: the number of Tx descriptors needed
+ *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
+ *
+ *	Checks if the requested number of Tx descriptors is available on an
+ *	SGE send queue.  If the queue is already suspended or not enough
+ *	descriptors are available the packet is queued for later transmission.
+ *	Must be called with the Tx queue locked.
+ *
+ *	Returns 0 if enough descriptors are available, 1 if there aren't
+ *	enough descriptors and the packet has been queued, and 2 if the caller
+ *	needs to retry because there weren't enough descriptors at the
+ *	beginning of the call but some freed up in the mean time.
+ */
+static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
+				   struct sk_buff *skb, unsigned int ndesc,
+				   unsigned int qid)
+{
+	if (unlikely(!skb_queue_empty(&q->sendq))) {
+	      addq_exit:__skb_queue_tail(&q->sendq, skb);
+		return 1;
+	}
+	if (unlikely(q->size - q->in_use < ndesc)) {
+		struct sge_qset *qs = txq_to_qset(q, qid);
+
+		set_bit(qid, &qs->txq_stopped);
+		smp_mb__after_clear_bit();
+
+		if (should_restart_tx(q) &&
+		    test_and_clear_bit(qid, &qs->txq_stopped))
+			return 2;
+
+		q->stops++;
+		goto addq_exit;
+	}
+	return 0;
+}
+
+/**
+ *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ *	@q: the SGE control Tx queue
+ *
+ *	This is a variant of reclaim_completed_tx() that is used for Tx queues
+ *	that send only immediate data (presently just the control queues) and
+ *	thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+	unsigned int reclaim = q->processed - q->cleaned;
+
+	q->in_use -= reclaim;
+	q->cleaned += reclaim;
+}
+
+static inline int immediate(const struct sk_buff *skb)
+{
+	return skb->len <= WR_LEN && !skb->data_len;
+}
+
+/**
+ *	ctrl_xmit - send a packet through an SGE control Tx queue
+ *	@adap: the adapter
+ *	@q: the control queue
+ *	@skb: the packet
+ *
+ *	Send a packet through an SGE control Tx queue.  Packets sent through
+ *	a control queue must fit entirely as immediate data in a single Tx
+ *	descriptor and have no page fragments.
+ */
+static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
+		     struct sk_buff *skb)
+{
+	int ret;
+	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
+
+	if (unlikely(!immediate(skb))) {
+		WARN_ON(1);
+		dev_kfree_skb(skb);
+		return NET_XMIT_SUCCESS;
+	}
+
+	wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
+	wrp->wr_lo = htonl(V_WR_TID(q->token));
+
+	spin_lock(&q->lock);
+      again:reclaim_completed_tx_imm(q);
+
+	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
+	if (unlikely(ret)) {
+		if (ret == 1) {
+			spin_unlock(&q->lock);
+			return NET_XMIT_CN;
+		}
+		goto again;
+	}
+
+	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+	q->in_use++;
+	if (++q->pidx >= q->size) {
+		q->pidx = 0;
+		q->gen ^= 1;
+	}
+	spin_unlock(&q->lock);
+	wmb();
+	t3_write_reg(adap, A_SG_KDOORBELL,
+		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+	return NET_XMIT_SUCCESS;
+}
+
+/**
+ *	restart_ctrlq - restart a suspended control queue
+ *	@qs: the queue set cotaining the control queue
+ *
+ *	Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(unsigned long data)
+{
+	struct sk_buff *skb;
+	struct sge_qset *qs = (struct sge_qset *)data;
+	struct sge_txq *q = &qs->txq[TXQ_CTRL];
+	struct adapter *adap = qs->netdev->priv;
+
+	spin_lock(&q->lock);
+      again:reclaim_completed_tx_imm(q);
+
+	while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
+
+		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+		if (++q->pidx >= q->size) {
+			q->pidx = 0;
+			q->gen ^= 1;
+		}
+		q->in_use++;
+	}
+
+	if (!skb_queue_empty(&q->sendq)) {
+		set_bit(TXQ_CTRL, &qs->txq_stopped);
+		smp_mb__after_clear_bit();
+
+		if (should_restart_tx(q) &&
+		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
+			goto again;
+		q->stops++;
+	}
+
+	spin_unlock(&q->lock);
+	t3_write_reg(adap, A_SG_KDOORBELL,
+		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/*
+ * Send a management message through control queue 0
+ */
+int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+	return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+}
+
+/**
+ *	write_ofld_wr - write an offload work request
+ *	@adap: the adapter
+ *	@skb: the packet to send
+ *	@q: the Tx queue
+ *	@pidx: index of the first Tx descriptor to write
+ *	@gen: the generation value to use
+ *	@ndesc: number of descriptors the packet will occupy
+ *
+ *	Write an offload work request to send the supplied packet.  The packet
+ *	data already carry the work request with most fields populated.
+ */
+static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
+			  struct sge_txq *q, unsigned int pidx,
+			  unsigned int gen, unsigned int ndesc)
+{
+	unsigned int sgl_flits, flits;
+	struct work_request_hdr *from;
+	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+	struct tx_desc *d = &q->desc[pidx];
+
+	if (immediate(skb)) {
+		q->sdesc[pidx].skb = NULL;
+		write_imm(d, skb, skb->len, gen);
+		return;
+	}
+
+	/* Only TX_DATA builds SGLs */
+
+	from = (struct work_request_hdr *)skb->data;
+	memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
+
+	flits = (skb->h.raw - skb->data) / 8;
+	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+	sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
+			     adap->pdev);
+	if (need_skb_unmap())
+		((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
+
+	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
+			 gen, from->wr_hi, from->wr_lo);
+}
+
+/**
+ *	calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
+ *	@skb: the packet
+ *
+ * 	Returns the number of Tx descriptors needed for the given offload
+ * 	packet.  These packets are already fully constructed.
+ */
+static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
+{
+	unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
+
+	if (skb->len <= WR_LEN && cnt == 0)
+		return 1;	/* packet fits as immediate data */
+
+	flits = (skb->h.raw - skb->data) / 8;	/* headers */
+	if (skb->tail != skb->h.raw)
+		cnt++;
+	return flits_to_desc(flits + sgl_len(cnt));
+}
+
+/**
+ *	ofld_xmit - send a packet through an offload queue
+ *	@adap: the adapter
+ *	@q: the Tx offload queue
+ *	@skb: the packet
+ *
+ *	Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
+		     struct sk_buff *skb)
+{
+	int ret;
+	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
+
+	spin_lock(&q->lock);
+      again:reclaim_completed_tx(adap, q);
+
+	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
+	if (unlikely(ret)) {
+		if (ret == 1) {
+			skb->priority = ndesc;	/* save for restart */
+			spin_unlock(&q->lock);
+			return NET_XMIT_CN;
+		}
+		goto again;
+	}
+
+	gen = q->gen;
+	q->in_use += ndesc;
+	pidx = q->pidx;
+	q->pidx += ndesc;
+	if (q->pidx >= q->size) {
+		q->pidx -= q->size;
+		q->gen ^= 1;
+	}
+	spin_unlock(&q->lock);
+
+	write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+	check_ring_tx_db(adap, q);
+	return NET_XMIT_SUCCESS;
+}
+
+/**
+ *	restart_offloadq - restart a suspended offload queue
+ *	@qs: the queue set cotaining the offload queue
+ *
+ *	Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_offloadq(unsigned long data)
+{
+	struct sk_buff *skb;
+	struct sge_qset *qs = (struct sge_qset *)data;
+	struct sge_txq *q = &qs->txq[TXQ_OFLD];
+	struct adapter *adap = qs->netdev->priv;
+
+	spin_lock(&q->lock);
+      again:reclaim_completed_tx(adap, q);
+
+	while ((skb = skb_peek(&q->sendq)) != NULL) {
+		unsigned int gen, pidx;
+		unsigned int ndesc = skb->priority;
+
+		if (unlikely(q->size - q->in_use < ndesc)) {
+			set_bit(TXQ_OFLD, &qs->txq_stopped);
+			smp_mb__after_clear_bit();
+
+			if (should_restart_tx(q) &&
+			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
+				goto again;
+			q->stops++;
+			break;
+		}
+
+		gen = q->gen;
+		q->in_use += ndesc;
+		pidx = q->pidx;
+		q->pidx += ndesc;
+		if (q->pidx >= q->size) {
+			q->pidx -= q->size;
+			q->gen ^= 1;
+		}
+		__skb_unlink(skb, &q->sendq);
+		spin_unlock(&q->lock);
+
+		write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+		spin_lock(&q->lock);
+	}
+	spin_unlock(&q->lock);
+
+#if USE_GTS
+	set_bit(TXQ_RUNNING, &q->flags);
+	set_bit(TXQ_LAST_PKT_DB, &q->flags);
+#endif
+	t3_write_reg(adap, A_SG_KDOORBELL,
+		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/**
+ *	queue_set - return the queue set a packet should use
+ *	@skb: the packet
+ *
+ *	Maps a packet to the SGE queue set it should use.  The desired queue
+ *	set is carried in bits 1-3 in the packet's priority.
+ */
+static inline int queue_set(const struct sk_buff *skb)
+{
+	return skb->priority >> 1;
+}
+
+/**
+ *	is_ctrl_pkt - return whether an offload packet is a control packet
+ *	@skb: the packet
+ *
+ *	Determines whether an offload packet should use an OFLD or a CTRL
+ *	Tx queue.  This is indicated by bit 0 in the packet's priority.
+ */
+static inline int is_ctrl_pkt(const struct sk_buff *skb)
+{
+	return skb->priority & 1;
+}
+
+/**
+ *	t3_offload_tx - send an offload packet
+ *	@tdev: the offload device to send to
+ *	@skb: the packet
+ *
+ *	Sends an offload packet.  We use the packet priority to select the
+ *	appropriate Tx queue as follows: bit 0 indicates whether the packet
+ *	should be sent as regular or control, bits 1-3 select the queue set.
+ */
+int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
+{
+	struct adapter *adap = tdev2adap(tdev);
+	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
+
+	if (unlikely(is_ctrl_pkt(skb)))
+		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
+
+	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
+}
+
+/**
+ *	offload_enqueue - add an offload packet to an SGE offload receive queue
+ *	@q: the SGE response queue
+ *	@skb: the packet
+ *
+ *	Add a new offload packet to an SGE response queue's offload packet
+ *	queue.  If the packet is the first on the queue it schedules the RX
+ *	softirq to process the queue.
+ */
+static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
+{
+	skb->next = skb->prev = NULL;
+	if (q->rx_tail)
+		q->rx_tail->next = skb;
+	else {
+		struct sge_qset *qs = rspq_to_qset(q);
+
+		if (__netif_rx_schedule_prep(qs->netdev))
+			__netif_rx_schedule(qs->netdev);
+		q->rx_head = skb;
+	}
+	q->rx_tail = skb;
+}
+
+/**
+ *	deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
+ *	@tdev: the offload device that will be receiving the packets
+ *	@q: the SGE response queue that assembled the bundle
+ *	@skbs: the partial bundle
+ *	@n: the number of packets in the bundle
+ *
+ *	Delivers a (partial) bundle of Rx offload packets to an offload device.
+ */
+static inline void deliver_partial_bundle(struct t3cdev *tdev,
+					  struct sge_rspq *q,
+					  struct sk_buff *skbs[], int n)
+{
+	if (n) {
+		q->offload_bundles++;
+		tdev->recv(tdev, skbs, n);
+	}
+}
+
+/**
+ *	ofld_poll - NAPI handler for offload packets in interrupt mode
+ *	@dev: the network device doing the polling
+ *	@budget: polling budget
+ *
+ *	The NAPI handler for offload packets when a response queue is serviced
+ *	by the hard interrupt handler, i.e., when it's operating in non-polling
+ *	mode.  Creates small packet batches and sends them through the offload
+ *	receive handler.  Batches need to be of modest size as we do prefetches
+ *	on the packets in each.
+ */
+static int ofld_poll(struct net_device *dev, int *budget)
+{
+	struct adapter *adapter = dev->priv;
+	struct sge_qset *qs = dev2qset(dev);
+	struct sge_rspq *q = &qs->rspq;
+	int work_done, limit = min(*budget, dev->quota), avail = limit;
+
+	while (avail) {
+		struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
+		int ngathered;
+
+		spin_lock_irq(&q->lock);
+		head = q->rx_head;
+		if (!head) {
+			work_done = limit - avail;
+			*budget -= work_done;
+			dev->quota -= work_done;
+			__netif_rx_complete(dev);
+			spin_unlock_irq(&q->lock);
+			return 0;
+		}
+
+		tail = q->rx_tail;
+		q->rx_head = q->rx_tail = NULL;
+		spin_unlock_irq(&q->lock);
+
+		for (ngathered = 0; avail && head; avail--) {
+			prefetch(head->data);
+			skbs[ngathered] = head;
+			head = head->next;
+			skbs[ngathered]->next = NULL;
+			if (++ngathered == RX_BUNDLE_SIZE) {
+				q->offload_bundles++;
+				adapter->tdev.recv(&adapter->tdev, skbs,
+						   ngathered);
+				ngathered = 0;
+			}
+		}
+		if (head) {	/* splice remaining packets back onto Rx queue */
+			spin_lock_irq(&q->lock);
+			tail->next = q->rx_head;
+			if (!q->rx_head)
+				q->rx_tail = tail;
+			q->rx_head = head;
+			spin_unlock_irq(&q->lock);
+		}
+		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
+	}
+	work_done = limit - avail;
+	*budget -= work_done;
+	dev->quota -= work_done;
+	return 1;
+}
+
+/**
+ *	rx_offload - process a received offload packet
+ *	@tdev: the offload device receiving the packet
+ *	@rq: the response queue that received the packet
+ *	@skb: the packet
+ *	@rx_gather: a gather list of packets if we are building a bundle
+ *	@gather_idx: index of the next available slot in the bundle
+ *
+ *	Process an ingress offload pakcet and add it to the offload ingress
+ *	queue. 	Returns the index of the next available slot in the bundle.
+ */
+static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
+			     struct sk_buff *skb, struct sk_buff *rx_gather[],
+			     unsigned int gather_idx)
+{
+	rq->offload_pkts++;
+	skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
+
+	if (rq->polling) {
+		rx_gather[gather_idx++] = skb;
+		if (gather_idx == RX_BUNDLE_SIZE) {
+			tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
+			gather_idx = 0;
+			rq->offload_bundles++;
+		}
+	} else
+		offload_enqueue(rq, skb);
+
+	return gather_idx;
+}
+
+/**
+ *	restart_tx - check whether to restart suspended Tx queues
+ *	@qs: the queue set to resume
+ *
+ *	Restarts suspended Tx queues of an SGE queue set if they have enough
+ *	free resources to resume operation.
+ */
+static void restart_tx(struct sge_qset *qs)
+{
+	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
+	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
+	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+		qs->txq[TXQ_ETH].restarts++;
+		if (netif_running(qs->netdev))
+			netif_wake_queue(qs->netdev);
+	}
+
+	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
+	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
+	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
+		qs->txq[TXQ_OFLD].restarts++;
+		tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
+	}
+	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
+	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
+	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
+		qs->txq[TXQ_CTRL].restarts++;
+		tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
+	}
+}
+
+/**
+ *	rx_eth - process an ingress ethernet packet
+ *	@adap: the adapter
+ *	@rq: the response queue that received the packet
+ *	@skb: the packet
+ *	@pad: amount of padding at the start of the buffer
+ *
+ *	Process an ingress ethernet pakcet and deliver it to the stack.
+ *	The padding is 2 if the packet was delivered in an Rx buffer and 0
+ *	if it was immediate data in a response.
+ */
+static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
+		   struct sk_buff *skb, int pad)
+{
+	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
+	struct port_info *pi;
+
+	rq->eth_pkts++;
+	skb_pull(skb, sizeof(*p) + pad);
+	skb->dev = adap->port[p->iff];
+	skb->dev->last_rx = jiffies;
+	skb->protocol = eth_type_trans(skb, skb->dev);
+	pi = netdev_priv(skb->dev);
+	if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
+	    !p->fragment) {
+		rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else
+		skb->ip_summed = CHECKSUM_NONE;
+
+	if (unlikely(p->vlan_valid)) {
+		struct vlan_group *grp = pi->vlan_grp;
+
+		rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
+		if (likely(grp))
+			__vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
+					  rq->polling);
+		else
+			dev_kfree_skb_any(skb);
+	} else if (rq->polling)
+		netif_receive_skb(skb);
+	else
+		netif_rx(skb);
+}
+
+/**
+ *	handle_rsp_cntrl_info - handles control information in a response
+ *	@qs: the queue set corresponding to the response
+ *	@flags: the response control flags
+ *
+ *	Handles the control information of an SGE response, such as GTS
+ *	indications and completion credits for the queue set's Tx queues.
+ *	HW coalesces credits, we don't do any extra SW coalescing.
+ */
+static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
+{
+	unsigned int credits;
+
+#if USE_GTS
+	if (flags & F_RSPD_TXQ0_GTS)
+		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
+#endif
+
+	credits = G_RSPD_TXQ0_CR(flags);
+	if (credits)
+		qs->txq[TXQ_ETH].processed += credits;
+
+	credits = G_RSPD_TXQ2_CR(flags);
+	if (credits)
+		qs->txq[TXQ_CTRL].processed += credits;
+
+# if USE_GTS
+	if (flags & F_RSPD_TXQ1_GTS)
+		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
+# endif
+	credits = G_RSPD_TXQ1_CR(flags);
+	if (credits)
+		qs->txq[TXQ_OFLD].processed += credits;
+}
+
+/**
+ *	check_ring_db - check if we need to ring any doorbells
+ *	@adapter: the adapter
+ *	@qs: the queue set whose Tx queues are to be examined
+ *	@sleeping: indicates which Tx queue sent GTS
+ *
+ *	Checks if some of a queue set's Tx queues need to ring their doorbells
+ *	to resume transmission after idling while they still have unprocessed
+ *	descriptors.
+ */
+static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
+			  unsigned int sleeping)
+{
+	if (sleeping & F_RSPD_TXQ0_GTS) {
+		struct sge_txq *txq = &qs->txq[TXQ_ETH];
+
+		if (txq->cleaned + txq->in_use != txq->processed &&
+		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+			set_bit(TXQ_RUNNING, &txq->flags);
+			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+				     V_EGRCNTX(txq->cntxt_id));
+		}
+	}
+
+	if (sleeping & F_RSPD_TXQ1_GTS) {
+		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
+
+		if (txq->cleaned + txq->in_use != txq->processed &&
+		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+			set_bit(TXQ_RUNNING, &txq->flags);
+			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+				     V_EGRCNTX(txq->cntxt_id));
+		}
+	}
+}
+
+/**
+ *	is_new_response - check if a response is newly written
+ *	@r: the response descriptor
+ *	@q: the response queue
+ *
+ *	Returns true if a response descriptor contains a yet unprocessed
+ *	response.
+ */
+static inline int is_new_response(const struct rsp_desc *r,
+				  const struct sge_rspq *q)
+{
+	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
+}
+
+#define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
+#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
+			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
+			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
+			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
+
+/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
+#define NOMEM_INTR_DELAY 2500
+
+/**
+ *	process_responses - process responses from an SGE response queue
+ *	@adap: the adapter
+ *	@qs: the queue set to which the response queue belongs
+ *	@budget: how many responses can be processed in this round
+ *
+ *	Process responses from an SGE response queue up to the supplied budget.
+ *	Responses include received packets as well as credits and other events
+ *	for the queues that belong to the response queue's queue set.
+ *	A negative budget is effectively unlimited.
+ *
+ *	Additionally choose the interrupt holdoff time for the next interrupt
+ *	on this queue.  If the system is under memory shortage use a fairly
+ *	long delay to help recovery.
+ */
+static int process_responses(struct adapter *adap, struct sge_qset *qs,
+			     int budget)
+{
+	struct sge_rspq *q = &qs->rspq;
+	struct rsp_desc *r = &q->desc[q->cidx];
+	int budget_left = budget;
+	unsigned int sleeping = 0;
+	struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
+	int ngathered = 0;
+
+	q->next_holdoff = q->holdoff_tmr;
+
+	while (likely(budget_left && is_new_response(r, q))) {
+		int eth, ethpad = 0;
+		struct sk_buff *skb = NULL;
+		u32 len, flags = ntohl(r->flags);
+		u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
+
+		eth = r->rss_hdr.opcode == CPL_RX_PKT;
+
+		if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
+			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
+			if (!skb)
+				goto no_mem;
+
+			memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
+			skb->data[0] = CPL_ASYNC_NOTIF;
+			rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
+			q->async_notif++;
+		} else if (flags & F_RSPD_IMM_DATA_VALID) {
+			skb = get_imm_packet(r);
+			if (unlikely(!skb)) {
+			      no_mem:
+				q->next_holdoff = NOMEM_INTR_DELAY;
+				q->nomem++;
+				/* consume one credit since we tried */
+				budget_left--;
+				break;
+			}
+			q->imm_data++;
+		} else if ((len = ntohl(r->len_cq)) != 0) {
+			struct sge_fl *fl;
+
+			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+			fl->credits--;
+			skb = get_packet(adap, fl, G_RSPD_LEN(len),
+					 eth ? SGE_RX_DROP_THRES : 0);
+			if (!skb)
+				q->rx_drops++;
+			else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
+				__skb_pull(skb, 2);
+			ethpad = 2;
+			if (++fl->cidx == fl->size)
+				fl->cidx = 0;
+		} else
+			q->pure_rsps++;
+
+		if (flags & RSPD_CTRL_MASK) {
+			sleeping |= flags & RSPD_GTS_MASK;
+			handle_rsp_cntrl_info(qs, flags);
+		}
+
+		r++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->gen ^= 1;
+			r = q->desc;
+		}
+		prefetch(r);
+
+		if (++q->credits >= (q->size / 4)) {
+			refill_rspq(adap, q, q->credits);
+			q->credits = 0;
+		}
+
+		if (likely(skb != NULL)) {
+			if (eth)
+				rx_eth(adap, q, skb, ethpad);
+			else {
+				/* Preserve the RSS info in csum & priority */
+				skb->csum = rss_hi;
+				skb->priority = rss_lo;
+				ngathered = rx_offload(&adap->tdev, q, skb,
+						       offload_skbs, ngathered);
+			}
+		}
+
+		--budget_left;
+	}
+
+	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
+	if (sleeping)
+		check_ring_db(adap, qs, sleeping);
+
+	smp_mb();		/* commit Tx queue .processed updates */
+	if (unlikely(qs->txq_stopped != 0))
+		restart_tx(qs);
+
+	budget -= budget_left;
+	return budget;
+}
+
+static inline int is_pure_response(const struct rsp_desc *r)
+{
+	u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
+
+	return (n | r->len_cq) == 0;
+}
+
+/**
+ *	napi_rx_handler - the NAPI handler for Rx processing
+ *	@dev: the net device
+ *	@budget: how many packets we can process in this round
+ *
+ *	Handler for new data events when using NAPI.
+ */
+static int napi_rx_handler(struct net_device *dev, int *budget)
+{
+	struct adapter *adap = dev->priv;
+	struct sge_qset *qs = dev2qset(dev);
+	int effective_budget = min(*budget, dev->quota);
+
+	int work_done = process_responses(adap, qs, effective_budget);
+	*budget -= work_done;
+	dev->quota -= work_done;
+
+	if (work_done >= effective_budget)
+		return 1;
+
+	netif_rx_complete(dev);
+
+	/*
+	 * Because we don't atomically flush the following write it is
+	 * possible that in very rare cases it can reach the device in a way
+	 * that races with a new response being written plus an error interrupt
+	 * causing the NAPI interrupt handler below to return unhandled status
+	 * to the OS.  To protect against this would require flushing the write
+	 * and doing both the write and the flush with interrupts off.  Way too
+	 * expensive and unjustifiable given the rarity of the race.
+	 *
+	 * The race cannot happen at all with MSI-X.
+	 */
+	t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
+		     V_NEWTIMER(qs->rspq.next_holdoff) |
+		     V_NEWINDEX(qs->rspq.cidx));
+	return 0;
+}
+
+/*
+ * Returns true if the device is already scheduled for polling.
+ */
+static inline int napi_is_scheduled(struct net_device *dev)
+{
+	return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+/**
+ *	process_pure_responses - process pure responses from a response queue
+ *	@adap: the adapter
+ *	@qs: the queue set owning the response queue
+ *	@r: the first pure response to process
+ *
+ *	A simpler version of process_responses() that handles only pure (i.e.,
+ *	non data-carrying) responses.  Such respones are too light-weight to
+ *	justify calling a softirq under NAPI, so we handle them specially in
+ *	the interrupt handler.  The function is called with a pointer to a
+ *	response, which the caller must ensure is a valid pure response.
+ *
+ *	Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
+				  struct rsp_desc *r)
+{
+	struct sge_rspq *q = &qs->rspq;
+	unsigned int sleeping = 0;
+
+	do {
+		u32 flags = ntohl(r->flags);
+
+		r++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->gen ^= 1;
+			r = q->desc;
+		}
+		prefetch(r);
+
+		if (flags & RSPD_CTRL_MASK) {
+			sleeping |= flags & RSPD_GTS_MASK;
+			handle_rsp_cntrl_info(qs, flags);
+		}
+
+		q->pure_rsps++;
+		if (++q->credits >= (q->size / 4)) {
+			refill_rspq(adap, q, q->credits);
+			q->credits = 0;
+		}
+	} while (is_new_response(r, q) && is_pure_response(r));
+
+	if (sleeping)
+		check_ring_db(adap, qs, sleeping);
+
+	smp_mb();		/* commit Tx queue .processed updates */
+	if (unlikely(qs->txq_stopped != 0))
+		restart_tx(qs);
+
+	return is_new_response(r, q);
+}
+
+/**
+ *	handle_responses - decide what to do with new responses in NAPI mode
+ *	@adap: the adapter
+ *	@q: the response queue
+ *
+ *	This is used by the NAPI interrupt handlers to decide what to do with
+ *	new SGE responses.  If there are no new responses it returns -1.  If
+ *	there are new responses and they are pure (i.e., non-data carrying)
+ *	it handles them straight in hard interrupt context as they are very
+ *	cheap and don't deliver any packets.  Finally, if there are any data
+ *	signaling responses it schedules the NAPI handler.  Returns 1 if it
+ *	schedules NAPI, 0 if all new responses were pure.
+ *
+ *	The caller must ascertain NAPI is not already running.
+ */
+static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
+{
+	struct sge_qset *qs = rspq_to_qset(q);
+	struct rsp_desc *r = &q->desc[q->cidx];
+
+	if (!is_new_response(r, q))
+		return -1;
+	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
+		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
+		return 0;
+	}
+	if (likely(__netif_rx_schedule_prep(qs->netdev)))
+		__netif_rx_schedule(qs->netdev);
+	return 1;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
+ * (i.e., response queue serviced in hard interrupt).
+ */
+irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
+{
+	struct sge_qset *qs = cookie;
+	struct adapter *adap = qs->netdev->priv;
+	struct sge_rspq *q = &qs->rspq;
+
+	spin_lock(&q->lock);
+	if (process_responses(adap, qs, -1) == 0)
+		q->unhandled_irqs++;
+	t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+		     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+	spin_unlock(&q->lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
+{
+	struct sge_qset *qs = cookie;
+	struct adapter *adap = qs->netdev->priv;
+	struct sge_rspq *q = &qs->rspq;
+
+	spin_lock(&q->lock);
+	BUG_ON(napi_is_scheduled(qs->netdev));
+
+	if (handle_responses(adap, q) < 0)
+		q->unhandled_irqs++;
+	spin_unlock(&q->lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * The non-NAPI MSI interrupt handler.  This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same MSI vector.  We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr_msi(int irq, void *cookie)
+{
+	int new_packets = 0;
+	struct adapter *adap = cookie;
+	struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+	spin_lock(&q->lock);
+
+	if (process_responses(adap, &adap->sge.qs[0], -1)) {
+		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+			     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+		new_packets = 1;
+	}
+
+	if (adap->params.nports == 2 &&
+	    process_responses(adap, &adap->sge.qs[1], -1)) {
+		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
+			     V_NEWTIMER(q1->next_holdoff) |
+			     V_NEWINDEX(q1->cidx));
+		new_packets = 1;
+	}
+
+	if (!new_packets && t3_slow_intr_handler(adap) == 0)
+		q->unhandled_irqs++;
+
+	spin_unlock(&q->lock);
+	return IRQ_HANDLED;
+}
+
+static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
+{
+	if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
+		if (likely(__netif_rx_schedule_prep(dev)))
+			__netif_rx_schedule(dev);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
+ * by NAPI polling).  Handles data events from SGE response queues as well as
+ * error and other async events as they all use the same MSI vector.  We use
+ * one SGE response queue per port in this mode and protect all response
+ * queues with queue 0's lock.
+ */
+irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
+{
+	int new_packets;
+	struct adapter *adap = cookie;
+	struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+	spin_lock(&q->lock);
+
+	new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
+	if (adap->params.nports == 2)
+		new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
+					       &adap->sge.qs[1].rspq);
+	if (!new_packets && t3_slow_intr_handler(adap) == 0)
+		q->unhandled_irqs++;
+
+	spin_unlock(&q->lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * A helper function that processes responses and issues GTS.
+ */
+static inline int process_responses_gts(struct adapter *adap,
+					struct sge_rspq *rq)
+{
+	int work;
+
+	work = process_responses(adap, rspq_to_qset(rq), -1);
+	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
+		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
+	return work;
+}
+
+/*
+ * The legacy INTx interrupt handler.  This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same interrupt pin.  We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr(int irq, void *cookie)
+{
+	int work_done, w0, w1;
+	struct adapter *adap = cookie;
+	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+	spin_lock(&q0->lock);
+
+	w0 = is_new_response(&q0->desc[q0->cidx], q0);
+	w1 = adap->params.nports == 2 &&
+	    is_new_response(&q1->desc[q1->cidx], q1);
+
+	if (likely(w0 | w1)) {
+		t3_write_reg(adap, A_PL_CLI, 0);
+		t3_read_reg(adap, A_PL_CLI);	/* flush */
+
+		if (likely(w0))
+			process_responses_gts(adap, q0);
+
+		if (w1)
+			process_responses_gts(adap, q1);
+
+		work_done = w0 | w1;
+	} else
+		work_done = t3_slow_intr_handler(adap);
+
+	spin_unlock(&q0->lock);
+	return IRQ_RETVAL(work_done != 0);
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin.  We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr(int irq, void *cookie)
+{
+	u32 map;
+	struct adapter *adap = cookie;
+	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+
+	t3_write_reg(adap, A_PL_CLI, 0);
+	map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+	if (unlikely(!map))	/* shared interrupt, most likely */
+		return IRQ_NONE;
+
+	spin_lock(&q0->lock);
+
+	if (unlikely(map & F_ERRINTR))
+		t3_slow_intr_handler(adap);
+
+	if (likely(map & 1))
+		process_responses_gts(adap, q0);
+
+	if (map & 2)
+		process_responses_gts(adap, &adap->sge.qs[1].rspq);
+
+	spin_unlock(&q0->lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin.  We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr_napi(int irq, void *cookie)
+{
+	u32 map;
+	struct net_device *dev;
+	struct adapter *adap = cookie;
+	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+
+	t3_write_reg(adap, A_PL_CLI, 0);
+	map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+	if (unlikely(!map))	/* shared interrupt, most likely */
+		return IRQ_NONE;
+
+	spin_lock(&q0->lock);
+
+	if (unlikely(map & F_ERRINTR))
+		t3_slow_intr_handler(adap);
+
+	if (likely(map & 1)) {
+		dev = adap->sge.qs[0].netdev;
+
+		if (likely(__netif_rx_schedule_prep(dev)))
+			__netif_rx_schedule(dev);
+	}
+	if (map & 2) {
+		dev = adap->sge.qs[1].netdev;
+
+		if (likely(__netif_rx_schedule_prep(dev)))
+			__netif_rx_schedule(dev);
+	}
+
+	spin_unlock(&q0->lock);
+	return IRQ_HANDLED;
+}
+
+/**
+ *	t3_intr_handler - select the top-level interrupt handler
+ *	@adap: the adapter
+ *	@polling: whether using NAPI to service response queues
+ *
+ *	Selects the top-level interrupt handler based on the type of interrupts
+ *	(MSI-X, MSI, or legacy) and whether NAPI will be used to service the
+ *	response queues.
+ */
+intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
+{
+	if (adap->flags & USING_MSIX)
+		return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
+	if (adap->flags & USING_MSI)
+		return polling ? t3_intr_msi_napi : t3_intr_msi;
+	if (adap->params.rev > 0)
+		return polling ? t3b_intr_napi : t3b_intr;
+	return t3_intr;
+}
+
+/**
+ *	t3_sge_err_intr_handler - SGE async event interrupt handler
+ *	@adapter: the adapter
+ *
+ *	Interrupt handler for SGE asynchronous (non-data) events.
+ */
+void t3_sge_err_intr_handler(struct adapter *adapter)
+{
+	unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
+
+	if (status & F_RSPQCREDITOVERFOW)
+		CH_ALERT(adapter, "SGE response queue credit overflow\n");
+
+	if (status & F_RSPQDISABLED) {
+		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
+
+		CH_ALERT(adapter,
+			 "packet delivered to disabled response queue "
+			 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
+	}
+
+	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
+	if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
+		t3_fatal_err(adapter);
+}
+
+/**
+ *	sge_timer_cb - perform periodic maintenance of an SGE qset
+ *	@data: the SGE queue set to maintain
+ *
+ *	Runs periodically from a timer to perform maintenance of an SGE queue
+ *	set.  It performs two tasks:
+ *
+ *	a) Cleans up any completed Tx descriptors that may still be pending.
+ *	Normal descriptor cleanup happens when new packets are added to a Tx
+ *	queue so this timer is relatively infrequent and does any cleanup only
+ *	if the Tx queue has not seen any new packets in a while.  We make a
+ *	best effort attempt to reclaim descriptors, in that we don't wait
+ *	around if we cannot get a queue's lock (which most likely is because
+ *	someone else is queueing new packets and so will also handle the clean
+ *	up).  Since control queues use immediate data exclusively we don't
+ *	bother cleaning them up here.
+ *
+ *	b) Replenishes Rx queues that have run out due to memory shortage.
+ *	Normally new Rx buffers are added when existing ones are consumed but
+ *	when out of memory a queue can become empty.  We try to add only a few
+ *	buffers here, the queue will be replenished fully as these new buffers
+ *	are used up if memory shortage has subsided.
+ */
+static void sge_timer_cb(unsigned long data)
+{
+	spinlock_t *lock;
+	struct sge_qset *qs = (struct sge_qset *)data;
+	struct adapter *adap = qs->netdev->priv;
+
+	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
+		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
+		spin_unlock(&qs->txq[TXQ_ETH].lock);
+	}
+	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
+		reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
+		spin_unlock(&qs->txq[TXQ_OFLD].lock);
+	}
+	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
+	    &adap->sge.qs[0].rspq.lock;
+	if (spin_trylock_irq(lock)) {
+		if (!napi_is_scheduled(qs->netdev)) {
+			if (qs->fl[0].credits < qs->fl[0].size)
+				__refill_fl(adap, &qs->fl[0]);
+			if (qs->fl[1].credits < qs->fl[1].size)
+				__refill_fl(adap, &qs->fl[1]);
+		}
+		spin_unlock_irq(lock);
+	}
+	mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+}
+
+/**
+ *	t3_update_qset_coalesce - update coalescing settings for a queue set
+ *	@qs: the SGE queue set
+ *	@p: new queue set parameters
+ *
+ *	Update the coalescing settings for an SGE queue set.  Nothing is done
+ *	if the queue set is not initialized yet.
+ */
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
+{
+	if (!qs->netdev)
+		return;
+
+	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
+	qs->rspq.polling = p->polling;
+	qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
+}
+
+/**
+ *	t3_sge_alloc_qset - initialize an SGE queue set
+ *	@adapter: the adapter
+ *	@id: the queue set id
+ *	@nports: how many Ethernet ports will be using this queue set
+ *	@irq_vec_idx: the IRQ vector index for response queue interrupts
+ *	@p: configuration parameters for this queue set
+ *	@ntxq: number of Tx queues for the queue set
+ *	@netdev: net device associated with this queue set
+ *
+ *	Allocate resources and initialize an SGE queue set.  A queue set
+ *	comprises a response queue, two Rx free-buffer queues, and up to 3
+ *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
+ *	queue, offload queue, and control queue.
+ */
+int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+		      int irq_vec_idx, const struct qset_params *p,
+		      int ntxq, struct net_device *netdev)
+{
+	int i, ret = -ENOMEM;
+	struct sge_qset *q = &adapter->sge.qs[id];
+
+	init_qset_cntxt(q, id);
+	init_timer(&q->tx_reclaim_timer);
+	q->tx_reclaim_timer.data = (unsigned long)q;
+	q->tx_reclaim_timer.function = sge_timer_cb;
+
+	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
+				   sizeof(struct rx_desc),
+				   sizeof(struct rx_sw_desc),
+				   &q->fl[0].phys_addr, &q->fl[0].sdesc);
+	if (!q->fl[0].desc)
+		goto err;
+
+	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
+				   sizeof(struct rx_desc),
+				   sizeof(struct rx_sw_desc),
+				   &q->fl[1].phys_addr, &q->fl[1].sdesc);
+	if (!q->fl[1].desc)
+		goto err;
+
+	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
+				  sizeof(struct rsp_desc), 0,
+				  &q->rspq.phys_addr, NULL);
+	if (!q->rspq.desc)
+		goto err;
+
+	for (i = 0; i < ntxq; ++i) {
+		/*
+		 * The control queue always uses immediate data so does not
+		 * need to keep track of any sk_buffs.
+		 */
+		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
+
+		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
+					    sizeof(struct tx_desc), sz,
+					    &q->txq[i].phys_addr,
+					    &q->txq[i].sdesc);
+		if (!q->txq[i].desc)
+			goto err;
+
+		q->txq[i].gen = 1;
+		q->txq[i].size = p->txq_size[i];
+		spin_lock_init(&q->txq[i].lock);
+		skb_queue_head_init(&q->txq[i].sendq);
+	}
+
+	tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
+		     (unsigned long)q);
+	tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
+		     (unsigned long)q);
+
+	q->fl[0].gen = q->fl[1].gen = 1;
+	q->fl[0].size = p->fl_size;
+	q->fl[1].size = p->jumbo_size;
+
+	q->rspq.gen = 1;
+	q->rspq.size = p->rspq_size;
+	spin_lock_init(&q->rspq.lock);
+
+	q->txq[TXQ_ETH].stop_thres = nports *
+	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
+
+	if (ntxq == 1) {
+		q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
+		    sizeof(struct cpl_rx_pkt);
+		q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
+		    sizeof(struct cpl_rx_pkt);
+	} else {
+		q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
+		    sizeof(struct cpl_rx_data);
+		q->fl[1].buf_size = (16 * 1024) -
+		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	}
+
+	spin_lock(&adapter->sge.reg_lock);
+
+	/* FL threshold comparison uses < */
+	ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
+				   q->rspq.phys_addr, q->rspq.size,
+				   q->fl[0].buf_size, 1, 0);
+	if (ret)
+		goto err_unlock;
+
+	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
+		ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
+					  q->fl[i].phys_addr, q->fl[i].size,
+					  q->fl[i].buf_size, p->cong_thres, 1,
+					  0);
+		if (ret)
+			goto err_unlock;
+	}
+
+	ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
+				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
+				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
+				 1, 0);
+	if (ret)
+		goto err_unlock;
+
+	if (ntxq > 1) {
+		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
+					 USE_GTS, SGE_CNTXT_OFLD, id,
+					 q->txq[TXQ_OFLD].phys_addr,
+					 q->txq[TXQ_OFLD].size, 0, 1, 0);
+		if (ret)
+			goto err_unlock;
+	}
+
+	if (ntxq > 2) {
+		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
+					 SGE_CNTXT_CTRL, id,
+					 q->txq[TXQ_CTRL].phys_addr,
+					 q->txq[TXQ_CTRL].size,
+					 q->txq[TXQ_CTRL].token, 1, 0);
+		if (ret)
+			goto err_unlock;
+	}
+
+	spin_unlock(&adapter->sge.reg_lock);
+	q->netdev = netdev;
+	t3_update_qset_coalesce(q, p);
+
+	/*
+	 * We use atalk_ptr as a backpointer to a qset.  In case a device is
+	 * associated with multiple queue sets only the first one sets
+	 * atalk_ptr.
+	 */
+	if (netdev->atalk_ptr == NULL)
+		netdev->atalk_ptr = q;
+
+	refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
+	refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
+	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
+
+	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
+		     V_NEWTIMER(q->rspq.holdoff_tmr));
+
+	mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+	return 0;
+
+      err_unlock:
+	spin_unlock(&adapter->sge.reg_lock);
+      err:
+	t3_free_qset(adapter, q);
+	return ret;
+}
+
+/**
+ *	t3_free_sge_resources - free SGE resources
+ *	@adap: the adapter
+ *
+ *	Frees resources used by the SGE queue sets.
+ */
+void t3_free_sge_resources(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < SGE_QSETS; ++i)
+		t3_free_qset(adap, &adap->sge.qs[i]);
+}
+
+/**
+ *	t3_sge_start - enable SGE
+ *	@adap: the adapter
+ *
+ *	Enables the SGE for DMAs.  This is the last step in starting packet
+ *	transfers.
+ */
+void t3_sge_start(struct adapter *adap)
+{
+	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
+}
+
+/**
+ *	t3_sge_stop - disable SGE operation
+ *	@adap: the adapter
+ *
+ *	Disables the DMA engine.  This can be called in emeregencies (e.g.,
+ *	from error interrupts) or from normal process context.  In the latter
+ *	case it also disables any pending queue restart tasklets.  Note that
+ *	if it is called in interrupt context it cannot disable the restart
+ *	tasklets as it cannot wait, however the tasklets will have no effect
+ *	since the doorbells are disabled and the driver will call this again
+ *	later from process context, at which time the tasklets will be stopped
+ *	if they are still running.
+ */
+void t3_sge_stop(struct adapter *adap)
+{
+	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
+	if (!in_interrupt()) {
+		int i;
+
+		for (i = 0; i < SGE_QSETS; ++i) {
+			struct sge_qset *qs = &adap->sge.qs[i];
+
+			tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
+			tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
+		}
+	}
+}
+
+/**
+ *	t3_sge_init - initialize SGE
+ *	@adap: the adapter
+ *	@p: the SGE parameters
+ *
+ *	Performs SGE initialization needed every time after a chip reset.
+ *	We do not initialize any of the queue sets here, instead the driver
+ *	top-level must request those individually.  We also do not enable DMA
+ *	here, that should be done after the queues have been set up.
+ */
+void t3_sge_init(struct adapter *adap, struct sge_params *p)
+{
+	unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
+
+	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
+	    F_CQCRDTCTRL |
+	    V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
+	    V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
+#if SGE_NUM_GENBITS == 1
+	ctrl |= F_EGRGENCTRL;
+#endif
+	if (adap->params.rev > 0) {
+		if (!(adap->flags & (USING_MSIX | USING_MSI)))
+			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
+		ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
+	}
+	t3_write_reg(adap, A_SG_CONTROL, ctrl);
+	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
+		     V_LORCQDRBTHRSH(512));
+	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
+	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
+		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
+	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
+	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
+	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
+	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
+	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
+	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
+}
+
+/**
+ *	t3_sge_prep - one-time SGE initialization
+ *	@adap: the associated adapter
+ *	@p: SGE parameters
+ *
+ *	Performs one-time initialization of SGE SW state.  Includes determining
+ *	defaults for the assorted SGE parameters, which admins can change until
+ *	they are used to initialize the SGE.
+ */
+void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
+{
+	int i;
+
+	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
+	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	for (i = 0; i < SGE_QSETS; ++i) {
+		struct qset_params *q = p->qset + i;
+
+		q->polling = adap->params.rev > 0;
+		q->coalesce_usecs = 5;
+		q->rspq_size = 1024;
+		q->fl_size = 4096;
+		q->jumbo_size = 512;
+		q->txq_size[TXQ_ETH] = 1024;
+		q->txq_size[TXQ_OFLD] = 1024;
+		q->txq_size[TXQ_CTRL] = 256;
+		q->cong_thres = 0;
+	}
+
+	spin_lock_init(&adap->sge.reg_lock);
+}
+
+/**
+ *	t3_get_desc - dump an SGE descriptor for debugging purposes
+ *	@qs: the queue set
+ *	@qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
+ *	@idx: the descriptor index in the queue
+ *	@data: where to dump the descriptor contents
+ *
+ *	Dumps the contents of a HW descriptor of an SGE queue.  Returns the
+ *	size of the descriptor.
+ */
+int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
+		unsigned char *data)
+{
+	if (qnum >= 6)
+		return -EINVAL;
+
+	if (qnum < 3) {
+		if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
+			return -EINVAL;
+		memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
+		return sizeof(struct tx_desc);
+	}
+
+	if (qnum == 3) {
+		if (!qs->rspq.desc || idx >= qs->rspq.size)
+			return -EINVAL;
+		memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
+		return sizeof(struct rsp_desc);
+	}
+
+	qnum -= 4;
+	if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
+		return -EINVAL;
+	memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
+	return sizeof(struct rx_desc);
+}

+ 251 - 0
drivers/net/cxgb3/sge_defs.h

@@ -0,0 +1,251 @@
+/*
+ * This file is automatically generated --- any changes will be lost.
+ */
+
+#ifndef _SGE_DEFS_H
+#define _SGE_DEFS_H
+
+#define S_EC_CREDITS    0
+#define M_EC_CREDITS    0x7FFF
+#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
+#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
+
+#define S_EC_GTS    15
+#define V_EC_GTS(x) ((x) << S_EC_GTS)
+#define F_EC_GTS    V_EC_GTS(1U)
+
+#define S_EC_INDEX    16
+#define M_EC_INDEX    0xFFFF
+#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
+#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
+
+#define S_EC_SIZE    0
+#define M_EC_SIZE    0xFFFF
+#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
+#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
+
+#define S_EC_BASE_LO    16
+#define M_EC_BASE_LO    0xFFFF
+#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
+#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
+
+#define S_EC_BASE_HI    0
+#define M_EC_BASE_HI    0xF
+#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
+#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
+
+#define S_EC_RESPQ    4
+#define M_EC_RESPQ    0x7
+#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
+#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
+
+#define S_EC_TYPE    7
+#define M_EC_TYPE    0x7
+#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
+#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
+
+#define S_EC_GEN    10
+#define V_EC_GEN(x) ((x) << S_EC_GEN)
+#define F_EC_GEN    V_EC_GEN(1U)
+
+#define S_EC_UP_TOKEN    11
+#define M_EC_UP_TOKEN    0xFFFFF
+#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
+#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
+
+#define S_EC_VALID    31
+#define V_EC_VALID(x) ((x) << S_EC_VALID)
+#define F_EC_VALID    V_EC_VALID(1U)
+
+#define S_RQ_MSI_VEC    20
+#define M_RQ_MSI_VEC    0x3F
+#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
+#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
+
+#define S_RQ_INTR_EN    26
+#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
+#define F_RQ_INTR_EN    V_RQ_INTR_EN(1U)
+
+#define S_RQ_GEN    28
+#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
+#define F_RQ_GEN    V_RQ_GEN(1U)
+
+#define S_CQ_INDEX    0
+#define M_CQ_INDEX    0xFFFF
+#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
+#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
+
+#define S_CQ_SIZE    16
+#define M_CQ_SIZE    0xFFFF
+#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
+#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
+
+#define S_CQ_BASE_HI    0
+#define M_CQ_BASE_HI    0xFFFFF
+#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
+#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
+
+#define S_CQ_RSPQ    20
+#define M_CQ_RSPQ    0x3F
+#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
+#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
+
+#define S_CQ_ASYNC_NOTIF    26
+#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
+#define F_CQ_ASYNC_NOTIF    V_CQ_ASYNC_NOTIF(1U)
+
+#define S_CQ_ARMED    27
+#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
+#define F_CQ_ARMED    V_CQ_ARMED(1U)
+
+#define S_CQ_ASYNC_NOTIF_SOL    28
+#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
+#define F_CQ_ASYNC_NOTIF_SOL    V_CQ_ASYNC_NOTIF_SOL(1U)
+
+#define S_CQ_GEN    29
+#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
+#define F_CQ_GEN    V_CQ_GEN(1U)
+
+#define S_CQ_OVERFLOW_MODE    31
+#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
+#define F_CQ_OVERFLOW_MODE    V_CQ_OVERFLOW_MODE(1U)
+
+#define S_CQ_CREDITS    0
+#define M_CQ_CREDITS    0xFFFF
+#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
+#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
+
+#define S_CQ_CREDIT_THRES    16
+#define M_CQ_CREDIT_THRES    0x1FFF
+#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
+#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
+
+#define S_FL_BASE_HI    0
+#define M_FL_BASE_HI    0xFFFFF
+#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
+#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
+
+#define S_FL_INDEX_LO    20
+#define M_FL_INDEX_LO    0xFFF
+#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
+#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
+
+#define S_FL_INDEX_HI    0
+#define M_FL_INDEX_HI    0xF
+#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
+#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
+
+#define S_FL_SIZE    4
+#define M_FL_SIZE    0xFFFF
+#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
+#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
+
+#define S_FL_GEN    20
+#define V_FL_GEN(x) ((x) << S_FL_GEN)
+#define F_FL_GEN    V_FL_GEN(1U)
+
+#define S_FL_ENTRY_SIZE_LO    21
+#define M_FL_ENTRY_SIZE_LO    0x7FF
+#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
+#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
+
+#define S_FL_ENTRY_SIZE_HI    0
+#define M_FL_ENTRY_SIZE_HI    0x1FFFFF
+#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
+#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
+
+#define S_FL_CONG_THRES    21
+#define M_FL_CONG_THRES    0x3FF
+#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
+#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
+
+#define S_FL_GTS    31
+#define V_FL_GTS(x) ((x) << S_FL_GTS)
+#define F_FL_GTS    V_FL_GTS(1U)
+
+#define S_FLD_GEN1    31
+#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
+#define F_FLD_GEN1    V_FLD_GEN1(1U)
+
+#define S_FLD_GEN2    0
+#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
+#define F_FLD_GEN2    V_FLD_GEN2(1U)
+
+#define S_RSPD_TXQ1_CR    0
+#define M_RSPD_TXQ1_CR    0x7F
+#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
+#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
+
+#define S_RSPD_TXQ1_GTS    7
+#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
+#define F_RSPD_TXQ1_GTS    V_RSPD_TXQ1_GTS(1U)
+
+#define S_RSPD_TXQ2_CR    8
+#define M_RSPD_TXQ2_CR    0x7F
+#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
+#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
+
+#define S_RSPD_TXQ2_GTS    15
+#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
+#define F_RSPD_TXQ2_GTS    V_RSPD_TXQ2_GTS(1U)
+
+#define S_RSPD_TXQ0_CR    16
+#define M_RSPD_TXQ0_CR    0x7F
+#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
+#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
+
+#define S_RSPD_TXQ0_GTS    23
+#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
+#define F_RSPD_TXQ0_GTS    V_RSPD_TXQ0_GTS(1U)
+
+#define S_RSPD_EOP    24
+#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
+#define F_RSPD_EOP    V_RSPD_EOP(1U)
+
+#define S_RSPD_SOP    25
+#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
+#define F_RSPD_SOP    V_RSPD_SOP(1U)
+
+#define S_RSPD_ASYNC_NOTIF    26
+#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
+#define F_RSPD_ASYNC_NOTIF    V_RSPD_ASYNC_NOTIF(1U)
+
+#define S_RSPD_FL0_GTS    27
+#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
+#define F_RSPD_FL0_GTS    V_RSPD_FL0_GTS(1U)
+
+#define S_RSPD_FL1_GTS    28
+#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
+#define F_RSPD_FL1_GTS    V_RSPD_FL1_GTS(1U)
+
+#define S_RSPD_IMM_DATA_VALID    29
+#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
+#define F_RSPD_IMM_DATA_VALID    V_RSPD_IMM_DATA_VALID(1U)
+
+#define S_RSPD_OFFLOAD    30
+#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
+#define F_RSPD_OFFLOAD    V_RSPD_OFFLOAD(1U)
+
+#define S_RSPD_GEN1    31
+#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
+#define F_RSPD_GEN1    V_RSPD_GEN1(1U)
+
+#define S_RSPD_LEN    0
+#define M_RSPD_LEN    0x7FFFFFFF
+#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define S_RSPD_FLQ    31
+#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
+#define F_RSPD_FLQ    V_RSPD_FLQ(1U)
+
+#define S_RSPD_GEN2    0
+#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
+#define F_RSPD_GEN2    V_RSPD_GEN2(1U)
+
+#define S_RSPD_INR_VEC    1
+#define M_RSPD_INR_VEC    0x7F
+#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
+#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
+
+#endif				/* _SGE_DEFS_H */

+ 1444 - 0
drivers/net/cxgb3/t3_cpl.h

@@ -0,0 +1,1444 @@
+/*
+ * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef T3_CPL_H
+#define T3_CPL_H
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+# include <asm/byteorder.h>
+#endif
+
+enum CPL_opcode {
+	CPL_PASS_OPEN_REQ = 0x1,
+	CPL_PASS_ACCEPT_RPL = 0x2,
+	CPL_ACT_OPEN_REQ = 0x3,
+	CPL_SET_TCB = 0x4,
+	CPL_SET_TCB_FIELD = 0x5,
+	CPL_GET_TCB = 0x6,
+	CPL_PCMD = 0x7,
+	CPL_CLOSE_CON_REQ = 0x8,
+	CPL_CLOSE_LISTSRV_REQ = 0x9,
+	CPL_ABORT_REQ = 0xA,
+	CPL_ABORT_RPL = 0xB,
+	CPL_TX_DATA = 0xC,
+	CPL_RX_DATA_ACK = 0xD,
+	CPL_TX_PKT = 0xE,
+	CPL_RTE_DELETE_REQ = 0xF,
+	CPL_RTE_WRITE_REQ = 0x10,
+	CPL_RTE_READ_REQ = 0x11,
+	CPL_L2T_WRITE_REQ = 0x12,
+	CPL_L2T_READ_REQ = 0x13,
+	CPL_SMT_WRITE_REQ = 0x14,
+	CPL_SMT_READ_REQ = 0x15,
+	CPL_TX_PKT_LSO = 0x16,
+	CPL_PCMD_READ = 0x17,
+	CPL_BARRIER = 0x18,
+	CPL_TID_RELEASE = 0x1A,
+
+	CPL_CLOSE_LISTSRV_RPL = 0x20,
+	CPL_ERROR = 0x21,
+	CPL_GET_TCB_RPL = 0x22,
+	CPL_L2T_WRITE_RPL = 0x23,
+	CPL_PCMD_READ_RPL = 0x24,
+	CPL_PCMD_RPL = 0x25,
+	CPL_PEER_CLOSE = 0x26,
+	CPL_RTE_DELETE_RPL = 0x27,
+	CPL_RTE_WRITE_RPL = 0x28,
+	CPL_RX_DDP_COMPLETE = 0x29,
+	CPL_RX_PHYS_ADDR = 0x2A,
+	CPL_RX_PKT = 0x2B,
+	CPL_RX_URG_NOTIFY = 0x2C,
+	CPL_SET_TCB_RPL = 0x2D,
+	CPL_SMT_WRITE_RPL = 0x2E,
+	CPL_TX_DATA_ACK = 0x2F,
+
+	CPL_ABORT_REQ_RSS = 0x30,
+	CPL_ABORT_RPL_RSS = 0x31,
+	CPL_CLOSE_CON_RPL = 0x32,
+	CPL_ISCSI_HDR = 0x33,
+	CPL_L2T_READ_RPL = 0x34,
+	CPL_RDMA_CQE = 0x35,
+	CPL_RDMA_CQE_READ_RSP = 0x36,
+	CPL_RDMA_CQE_ERR = 0x37,
+	CPL_RTE_READ_RPL = 0x38,
+	CPL_RX_DATA = 0x39,
+
+	CPL_ACT_OPEN_RPL = 0x40,
+	CPL_PASS_OPEN_RPL = 0x41,
+	CPL_RX_DATA_DDP = 0x42,
+	CPL_SMT_READ_RPL = 0x43,
+
+	CPL_ACT_ESTABLISH = 0x50,
+	CPL_PASS_ESTABLISH = 0x51,
+
+	CPL_PASS_ACCEPT_REQ = 0x70,
+
+	CPL_ASYNC_NOTIF = 0x80,	/* fake opcode for async notifications */
+
+	CPL_TX_DMA_ACK = 0xA0,
+	CPL_RDMA_READ_REQ = 0xA1,
+	CPL_RDMA_TERMINATE = 0xA2,
+	CPL_TRACE_PKT = 0xA3,
+	CPL_RDMA_EC_STATUS = 0xA5,
+
+	NUM_CPL_CMDS		/* must be last and previous entries must be sorted */
+};
+
+enum CPL_error {
+	CPL_ERR_NONE = 0,
+	CPL_ERR_TCAM_PARITY = 1,
+	CPL_ERR_TCAM_FULL = 3,
+	CPL_ERR_CONN_RESET = 20,
+	CPL_ERR_CONN_EXIST = 22,
+	CPL_ERR_ARP_MISS = 23,
+	CPL_ERR_BAD_SYN = 24,
+	CPL_ERR_CONN_TIMEDOUT = 30,
+	CPL_ERR_XMIT_TIMEDOUT = 31,
+	CPL_ERR_PERSIST_TIMEDOUT = 32,
+	CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+	CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+	CPL_ERR_RTX_NEG_ADVICE = 35,
+	CPL_ERR_PERSIST_NEG_ADVICE = 36,
+	CPL_ERR_ABORT_FAILED = 42,
+	CPL_ERR_GENERAL = 99
+};
+
+enum {
+	CPL_CONN_POLICY_AUTO = 0,
+	CPL_CONN_POLICY_ASK = 1,
+	CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+	ULP_MODE_NONE = 0,
+	ULP_MODE_ISCSI = 2,
+	ULP_MODE_RDMA = 4,
+	ULP_MODE_TCPDDP = 5
+};
+
+enum {
+	ULP_CRC_HEADER = 1 << 0,
+	ULP_CRC_DATA = 1 << 1
+};
+
+enum {
+	CPL_PASS_OPEN_ACCEPT,
+	CPL_PASS_OPEN_REJECT
+};
+
+enum {
+	CPL_ABORT_SEND_RST = 0,
+	CPL_ABORT_NO_RST,
+	CPL_ABORT_POST_CLOSE_REQ = 2
+};
+
+enum {				/* TX_PKT_LSO ethernet types */
+	CPL_ETH_II,
+	CPL_ETH_II_VLAN,
+	CPL_ETH_802_3,
+	CPL_ETH_802_3_VLAN
+};
+
+enum {				/* TCP congestion control algorithms */
+	CONG_ALG_RENO,
+	CONG_ALG_TAHOE,
+	CONG_ALG_NEWRENO,
+	CONG_ALG_HIGHSPEED
+};
+
+union opcode_tid {
+	__be32 opcode_tid;
+	__u8 opcode;
+};
+
+#define S_OPCODE 24
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
+#define G_TID(x)    ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+struct tcp_options {
+	__be16 mss;
+	__u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:5;
+	__u8 ecn:1;
+	__u8 sack:1;
+	__u8 tstamp:1;
+#else
+	__u8 tstamp:1;
+	__u8 sack:1;
+	__u8 ecn:1;
+	 __u8:5;
+#endif
+};
+
+struct rss_header {
+	__u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 cpu_idx:6;
+	__u8 hash_type:2;
+#else
+	__u8 hash_type:2;
+	__u8 cpu_idx:6;
+#endif
+	__be16 cq_idx;
+	__be32 rss_hash_val;
+};
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+	__be32 wr_hi;
+	__be32 wr_lo;
+};
+
+/* wr_hi fields */
+#define S_WR_SGE_CREDITS    0
+#define M_WR_SGE_CREDITS    0xFF
+#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
+#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
+
+#define S_WR_SGLSFLT    8
+#define M_WR_SGLSFLT    0xFF
+#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
+#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
+
+#define S_WR_BCNTLFLT    16
+#define M_WR_BCNTLFLT    0xF
+#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
+#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
+
+#define S_WR_DATATYPE    20
+#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
+#define F_WR_DATATYPE    V_WR_DATATYPE(1U)
+
+#define S_WR_COMPL    21
+#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
+#define F_WR_COMPL    V_WR_COMPL(1U)
+
+#define S_WR_EOP    22
+#define V_WR_EOP(x) ((x) << S_WR_EOP)
+#define F_WR_EOP    V_WR_EOP(1U)
+
+#define S_WR_SOP    23
+#define V_WR_SOP(x) ((x) << S_WR_SOP)
+#define F_WR_SOP    V_WR_SOP(1U)
+
+#define S_WR_OP    24
+#define M_WR_OP    0xFF
+#define V_WR_OP(x) ((x) << S_WR_OP)
+#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
+
+/* wr_lo fields */
+#define S_WR_LEN    0
+#define M_WR_LEN    0xFF
+#define V_WR_LEN(x) ((x) << S_WR_LEN)
+#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
+
+#define S_WR_TID    8
+#define M_WR_TID    0xFFFFF
+#define V_WR_TID(x) ((x) << S_WR_TID)
+#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
+
+#define S_WR_CR_FLUSH    30
+#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
+#define F_WR_CR_FLUSH    V_WR_CR_FLUSH(1U)
+
+#define S_WR_GEN    31
+#define V_WR_GEN(x) ((x) << S_WR_GEN)
+#define F_WR_GEN    V_WR_GEN(1U)
+
+# define WR_HDR struct work_request_hdr wr
+# define RSS_HDR
+#else
+# define WR_HDR
+# define RSS_HDR struct rss_header rss_hdr;
+#endif
+
+/* option 0 lower-half fields */
+#define S_CPL_STATUS    0
+#define M_CPL_STATUS    0xFF
+#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
+#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
+
+#define S_INJECT_TIMER    6
+#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
+#define F_INJECT_TIMER    V_INJECT_TIMER(1U)
+
+#define S_NO_OFFLOAD    7
+#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
+#define F_NO_OFFLOAD    V_NO_OFFLOAD(1U)
+
+#define S_ULP_MODE    8
+#define M_ULP_MODE    0xF
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
+
+#define S_RCV_BUFSIZ    12
+#define M_RCV_BUFSIZ    0x3FFF
+#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
+#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
+
+#define S_TOS    26
+#define M_TOS    0x3F
+#define V_TOS(x) ((x) << S_TOS)
+#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
+
+/* option 0 upper-half fields */
+#define S_DELACK    0
+#define V_DELACK(x) ((x) << S_DELACK)
+#define F_DELACK    V_DELACK(1U)
+
+#define S_NO_CONG    1
+#define V_NO_CONG(x) ((x) << S_NO_CONG)
+#define F_NO_CONG    V_NO_CONG(1U)
+
+#define S_SRC_MAC_SEL    2
+#define M_SRC_MAC_SEL    0x3
+#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
+#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
+
+#define S_L2T_IDX    4
+#define M_L2T_IDX    0x7FF
+#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
+#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
+
+#define S_TX_CHANNEL    15
+#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
+#define F_TX_CHANNEL    V_TX_CHANNEL(1U)
+
+#define S_TCAM_BYPASS    16
+#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS    V_TCAM_BYPASS(1U)
+
+#define S_NAGLE    17
+#define V_NAGLE(x) ((x) << S_NAGLE)
+#define F_NAGLE    V_NAGLE(1U)
+
+#define S_WND_SCALE    18
+#define M_WND_SCALE    0xF
+#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
+#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
+
+#define S_KEEP_ALIVE    22
+#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
+#define F_KEEP_ALIVE    V_KEEP_ALIVE(1U)
+
+#define S_MAX_RETRANS    23
+#define M_MAX_RETRANS    0xF
+#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
+#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
+
+#define S_MAX_RETRANS_OVERRIDE    27
+#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
+#define F_MAX_RETRANS_OVERRIDE    V_MAX_RETRANS_OVERRIDE(1U)
+
+#define S_MSS_IDX    28
+#define M_MSS_IDX    0xF
+#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 1 fields */
+#define S_RSS_ENABLE    0
+#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
+#define F_RSS_ENABLE    V_RSS_ENABLE(1U)
+
+#define S_RSS_MASK_LEN    1
+#define M_RSS_MASK_LEN    0x7
+#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
+#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
+
+#define S_CPU_IDX    4
+#define M_CPU_IDX    0x3F
+#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
+#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
+
+#define S_MAC_MATCH_VALID    18
+#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
+#define F_MAC_MATCH_VALID    V_MAC_MATCH_VALID(1U)
+
+#define S_CONN_POLICY    19
+#define M_CONN_POLICY    0x3
+#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
+#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
+
+#define S_SYN_DEFENSE    21
+#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
+#define F_SYN_DEFENSE    V_SYN_DEFENSE(1U)
+
+#define S_VLAN_PRI    22
+#define M_VLAN_PRI    0x3
+#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
+#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
+
+#define S_VLAN_PRI_VALID    24
+#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
+#define F_VLAN_PRI_VALID    V_VLAN_PRI_VALID(1U)
+
+#define S_PKT_TYPE    25
+#define M_PKT_TYPE    0x3
+#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
+#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
+
+#define S_MAC_MATCH    27
+#define M_MAC_MATCH    0x1F
+#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
+#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
+
+/* option 2 fields */
+#define S_CPU_INDEX    0
+#define M_CPU_INDEX    0x7F
+#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
+#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
+
+#define S_CPU_INDEX_VALID    7
+#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
+#define F_CPU_INDEX_VALID    V_CPU_INDEX_VALID(1U)
+
+#define S_RX_COALESCE    8
+#define M_RX_COALESCE    0x3
+#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
+#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
+
+#define S_RX_COALESCE_VALID    10
+#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
+#define F_RX_COALESCE_VALID    V_RX_COALESCE_VALID(1U)
+
+#define S_CONG_CONTROL_FLAVOR    11
+#define M_CONG_CONTROL_FLAVOR    0x3
+#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
+#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
+
+#define S_PACING_FLAVOR    13
+#define M_PACING_FLAVOR    0x3
+#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
+#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
+
+#define S_FLAVORS_VALID    15
+#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
+#define F_FLAVORS_VALID    V_FLAVORS_VALID(1U)
+
+#define S_RX_FC_DISABLE    16
+#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
+#define F_RX_FC_DISABLE    V_RX_FC_DISABLE(1U)
+
+#define S_RX_FC_VALID    17
+#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
+#define F_RX_FC_VALID    V_RX_FC_VALID(1U)
+
+struct cpl_pass_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 opt0h;
+	__be32 opt0l;
+	__be32 peer_netmask;
+	__be32 opt1;
+};
+
+struct cpl_pass_open_rpl {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__u8 resvd[7];
+	__u8 status;
+};
+
+struct cpl_pass_establish {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 tos_tid;
+	__be16 l2t_idx;
+	__be16 tcp_opt;
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+/* cpl_pass_establish.tos_tid fields */
+#define S_PASS_OPEN_TID    0
+#define M_PASS_OPEN_TID    0xFFFFFF
+#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
+#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
+
+#define S_PASS_OPEN_TOS    24
+#define M_PASS_OPEN_TOS    0xFF
+#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
+#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
+
+/* cpl_pass_establish.l2t_idx fields */
+#define S_L2T_IDX16    5
+#define M_L2T_IDX16    0x7FF
+#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
+#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
+
+/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
+#define G_TCPOPT_WSCALE_OK(x)  (((x) >> 5) & 1)
+#define G_TCPOPT_SACK(x)       (((x) >> 6) & 1)
+#define G_TCPOPT_TSTAMP(x)     (((x) >> 7) & 1)
+#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
+#define G_TCPOPT_MSS(x)        (((x) >> 12) & 0xf)
+
+struct cpl_pass_accept_req {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 tos_tid;
+	struct tcp_options tcp_options;
+	__u8 dst_mac[6];
+	__be16 vlan_tag;
+	__u8 src_mac[6];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:3;
+	__u8 addr_idx:3;
+	__u8 port_idx:1;
+	__u8 exact_match:1;
+#else
+	__u8 exact_match:1;
+	__u8 port_idx:1;
+	__u8 addr_idx:3;
+	 __u8:3;
+#endif
+	__u8 rsvd;
+	__be32 rcv_isn;
+	__be32 rsvd2;
+};
+
+struct cpl_pass_accept_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 opt2;
+	__be32 rsvd;
+	__be32 peer_ip;
+	__be32 opt0h;
+	__be32 opt0l_status;
+};
+
+struct cpl_act_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 opt0h;
+	__be32 opt0l;
+	__be32 params;
+	__be32 opt2;
+};
+
+/* cpl_act_open_req.params fields */
+#define S_AOPEN_VLAN_PRI    9
+#define M_AOPEN_VLAN_PRI    0x3
+#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
+#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
+
+#define S_AOPEN_VLAN_PRI_VALID    11
+#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
+#define F_AOPEN_VLAN_PRI_VALID    V_AOPEN_VLAN_PRI_VALID(1U)
+
+#define S_AOPEN_PKT_TYPE    12
+#define M_AOPEN_PKT_TYPE    0x3
+#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
+#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
+
+#define S_AOPEN_MAC_MATCH    14
+#define M_AOPEN_MAC_MATCH    0x1F
+#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
+#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
+
+#define S_AOPEN_MAC_MATCH_VALID    19
+#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
+#define F_AOPEN_MAC_MATCH_VALID    V_AOPEN_MAC_MATCH_VALID(1U)
+
+#define S_AOPEN_IFF_VLAN    20
+#define M_AOPEN_IFF_VLAN    0xFFF
+#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
+#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
+
+struct cpl_act_open_rpl {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 atid;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct cpl_act_establish {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 tos_tid;
+	__be16 l2t_idx;
+	__be16 tcp_opt;
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 cpuno;
+	__be16 rsvd;
+};
+
+struct cpl_get_tcb_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd;
+	__u8 status;
+	__be16 len;
+};
+
+struct cpl_set_tcb {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 reply;
+	__u8 cpu_idx;
+	__be16 len;
+};
+
+/* cpl_set_tcb.reply fields */
+#define S_NO_REPLY    7
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+#define F_NO_REPLY    V_NO_REPLY(1U)
+
+struct cpl_set_tcb_field {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 reply;
+	__u8 cpu_idx;
+	__be16 word;
+	__be64 mask;
+	__be64 val;
+};
+
+struct cpl_set_tcb_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct cpl_pcmd {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 rsvd[3];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 src:1;
+	__u8 bundle:1;
+	__u8 channel:1;
+	 __u8:5;
+#else
+	 __u8:5;
+	__u8 channel:1;
+	__u8 bundle:1;
+	__u8 src:1;
+#endif
+	__be32 pcmd_parm[2];
+};
+
+struct cpl_pcmd_reply {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd;
+	__be16 len;
+};
+
+struct cpl_close_con_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+	__be32 snd_nxt;
+	__be32 rcv_nxt;
+};
+
+struct cpl_close_listserv_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 rsvd0;
+	__u8 cpu_idx;
+	__be16 rsvd1;
+};
+
+struct cpl_close_listserv_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct cpl_abort_req_rss {
+	RSS_HDR union opcode_tid ot;
+	__be32 rsvd0;
+	__u8 rsvd1;
+	__u8 status;
+	__u8 rsvd2[6];
+};
+
+struct cpl_abort_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	__u8 rsvd1;
+	__u8 cmd;
+	__u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+	RSS_HDR union opcode_tid ot;
+	__be32 rsvd0;
+	__u8 rsvd1;
+	__u8 status;
+	__u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	__u8 rsvd1;
+	__u8 cmd;
+	__u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+	RSS_HDR union opcode_tid ot;
+	__be32 rcv_nxt;
+};
+
+struct tx_data_wr {
+	__be32 wr_hi;
+	__be32 wr_lo;
+	__be32 len;
+	__be32 flags;
+	__be32 sndseq;
+	__be32 param;
+};
+
+/* tx_data_wr.param fields */
+#define S_TX_PORT    0
+#define M_TX_PORT    0x7
+#define V_TX_PORT(x) ((x) << S_TX_PORT)
+#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
+
+#define S_TX_MSS    4
+#define M_TX_MSS    0xF
+#define V_TX_MSS(x) ((x) << S_TX_MSS)
+#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
+
+#define S_TX_QOS    8
+#define M_TX_QOS    0xFF
+#define V_TX_QOS(x) ((x) << S_TX_QOS)
+#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
+
+#define S_TX_SNDBUF 16
+#define M_TX_SNDBUF 0xFFFF
+#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
+#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
+
+struct cpl_tx_data {
+	union opcode_tid ot;
+	__be32 len;
+	__be32 rsvd;
+	__be16 urg;
+	__be16 flags;
+};
+
+/* cpl_tx_data.flags fields */
+#define S_TX_ULP_SUBMODE    6
+#define M_TX_ULP_SUBMODE    0xF
+#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
+#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
+
+#define S_TX_ULP_MODE    10
+#define M_TX_ULP_MODE    0xF
+#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
+#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
+
+#define S_TX_SHOVE    14
+#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
+#define F_TX_SHOVE    V_TX_SHOVE(1U)
+
+#define S_TX_MORE    15
+#define V_TX_MORE(x) ((x) << S_TX_MORE)
+#define F_TX_MORE    V_TX_MORE(1U)
+
+/* additional tx_data_wr.flags fields */
+#define S_TX_CPU_IDX    0
+#define M_TX_CPU_IDX    0x3F
+#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
+#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
+
+#define S_TX_URG    16
+#define V_TX_URG(x) ((x) << S_TX_URG)
+#define F_TX_URG    V_TX_URG(1U)
+
+#define S_TX_CLOSE    17
+#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
+#define F_TX_CLOSE    V_TX_CLOSE(1U)
+
+#define S_TX_INIT    18
+#define V_TX_INIT(x) ((x) << S_TX_INIT)
+#define F_TX_INIT    V_TX_INIT(1U)
+
+#define S_TX_IMM_ACK    19
+#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
+#define F_TX_IMM_ACK    V_TX_IMM_ACK(1U)
+
+#define S_TX_IMM_DMA    20
+#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
+#define F_TX_IMM_DMA    V_TX_IMM_DMA(1U)
+
+struct cpl_tx_data_ack {
+	RSS_HDR union opcode_tid ot;
+	__be32 ack_seq;
+};
+
+struct cpl_wr_ack {
+	RSS_HDR union opcode_tid ot;
+	__be16 credits;
+	__be16 rsvd;
+	__be32 snd_nxt;
+	__be32 snd_una;
+};
+
+struct cpl_rdma_ec_status {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct mngt_pktsched_wr {
+	__be32 wr_hi;
+	__be32 wr_lo;
+	__u8 mngt_opcode;
+	__u8 rsvd[7];
+	__u8 sched;
+	__u8 idx;
+	__u8 min;
+	__u8 max;
+	__u8 binding;
+	__u8 rsvd1[3];
+};
+
+struct cpl_iscsi_hdr {
+	RSS_HDR union opcode_tid ot;
+	__be16 pdu_len_ddp;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+	__u8 rsvd;
+	__u8 status;
+};
+
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define S_ISCSI_PDU_LEN    0
+#define M_ISCSI_PDU_LEN    0x7FFF
+#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
+#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
+
+#define S_ISCSI_DDP    15
+#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
+#define F_ISCSI_DDP    V_ISCSI_DDP(1U)
+
+struct cpl_rx_data {
+	RSS_HDR union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 dack_mode:2;
+	__u8 psh:1;
+	__u8 heartbeat:1;
+	 __u8:4;
+#else
+	 __u8:4;
+	__u8 heartbeat:1;
+	__u8 psh:1;
+	__u8 dack_mode:2;
+#endif
+	__u8 status;
+};
+
+struct cpl_rx_data_ack {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 credit_dack;
+};
+
+/* cpl_rx_data_ack.ack_seq fields */
+#define S_RX_CREDITS    0
+#define M_RX_CREDITS    0x7FFFFFF
+#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
+#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
+
+#define S_RX_MODULATE    27
+#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
+#define F_RX_MODULATE    V_RX_MODULATE(1U)
+
+#define S_RX_FORCE_ACK    28
+#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
+#define F_RX_FORCE_ACK    V_RX_FORCE_ACK(1U)
+
+#define S_RX_DACK_MODE    29
+#define M_RX_DACK_MODE    0x3
+#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
+#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+
+#define S_RX_DACK_CHANGE    31
+#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
+#define F_RX_DACK_CHANGE    V_RX_DACK_CHANGE(1U)
+
+struct cpl_rx_urg_notify {
+	RSS_HDR union opcode_tid ot;
+	__be32 seq;
+};
+
+struct cpl_rx_ddp_complete {
+	RSS_HDR union opcode_tid ot;
+	__be32 ddp_report;
+};
+
+struct cpl_rx_data_ddp {
+	RSS_HDR union opcode_tid ot;
+	__be16 urg;
+	__be16 len;
+	__be32 seq;
+	union {
+		__be32 nxt_seq;
+		__be32 ddp_report;
+	};
+	__be32 ulp_crc;
+	__be32 ddpvld_status;
+};
+
+/* cpl_rx_data_ddp.ddpvld_status fields */
+#define S_DDP_STATUS    0
+#define M_DDP_STATUS    0xFF
+#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
+#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
+
+#define S_DDP_VALID    15
+#define M_DDP_VALID    0x1FFFF
+#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
+#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
+
+#define S_DDP_PPOD_MISMATCH    15
+#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
+#define F_DDP_PPOD_MISMATCH    V_DDP_PPOD_MISMATCH(1U)
+
+#define S_DDP_PDU    16
+#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
+#define F_DDP_PDU    V_DDP_PDU(1U)
+
+#define S_DDP_LLIMIT_ERR    17
+#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
+#define F_DDP_LLIMIT_ERR    V_DDP_LLIMIT_ERR(1U)
+
+#define S_DDP_PPOD_PARITY_ERR    18
+#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
+#define F_DDP_PPOD_PARITY_ERR    V_DDP_PPOD_PARITY_ERR(1U)
+
+#define S_DDP_PADDING_ERR    19
+#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
+#define F_DDP_PADDING_ERR    V_DDP_PADDING_ERR(1U)
+
+#define S_DDP_HDRCRC_ERR    20
+#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
+#define F_DDP_HDRCRC_ERR    V_DDP_HDRCRC_ERR(1U)
+
+#define S_DDP_DATACRC_ERR    21
+#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
+#define F_DDP_DATACRC_ERR    V_DDP_DATACRC_ERR(1U)
+
+#define S_DDP_INVALID_TAG    22
+#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
+#define F_DDP_INVALID_TAG    V_DDP_INVALID_TAG(1U)
+
+#define S_DDP_ULIMIT_ERR    23
+#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
+#define F_DDP_ULIMIT_ERR    V_DDP_ULIMIT_ERR(1U)
+
+#define S_DDP_OFFSET_ERR    24
+#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
+#define F_DDP_OFFSET_ERR    V_DDP_OFFSET_ERR(1U)
+
+#define S_DDP_COLOR_ERR    25
+#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
+#define F_DDP_COLOR_ERR    V_DDP_COLOR_ERR(1U)
+
+#define S_DDP_TID_MISMATCH    26
+#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
+#define F_DDP_TID_MISMATCH    V_DDP_TID_MISMATCH(1U)
+
+#define S_DDP_INVALID_PPOD    27
+#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
+#define F_DDP_INVALID_PPOD    V_DDP_INVALID_PPOD(1U)
+
+#define S_DDP_ULP_MODE    28
+#define M_DDP_ULP_MODE    0xF
+#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
+#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
+
+/* cpl_rx_data_ddp.ddp_report fields */
+#define S_DDP_OFFSET    0
+#define M_DDP_OFFSET    0x3FFFFF
+#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
+#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
+
+#define S_DDP_URG    24
+#define V_DDP_URG(x) ((x) << S_DDP_URG)
+#define F_DDP_URG    V_DDP_URG(1U)
+
+#define S_DDP_PSH    25
+#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
+#define F_DDP_PSH    V_DDP_PSH(1U)
+
+#define S_DDP_BUF_COMPLETE    26
+#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
+#define F_DDP_BUF_COMPLETE    V_DDP_BUF_COMPLETE(1U)
+
+#define S_DDP_BUF_TIMED_OUT    27
+#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
+#define F_DDP_BUF_TIMED_OUT    V_DDP_BUF_TIMED_OUT(1U)
+
+#define S_DDP_BUF_IDX    28
+#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
+#define F_DDP_BUF_IDX    V_DDP_BUF_IDX(1U)
+
+struct cpl_tx_pkt {
+	WR_HDR;
+	__be32 cntrl;
+	__be32 len;
+};
+
+struct cpl_tx_pkt_lso {
+	WR_HDR;
+	__be32 cntrl;
+	__be32 len;
+
+	__be32 rsvd;
+	__be32 lso_info;
+};
+
+/* cpl_tx_pkt*.cntrl fields */
+#define S_TXPKT_VLAN    0
+#define M_TXPKT_VLAN    0xFFFF
+#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
+#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define S_TXPKT_INTF    16
+#define M_TXPKT_INTF    0xF
+#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define S_TXPKT_IPCSUM_DIS    20
+#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
+#define F_TXPKT_IPCSUM_DIS    V_TXPKT_IPCSUM_DIS(1U)
+
+#define S_TXPKT_L4CSUM_DIS    21
+#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
+#define F_TXPKT_L4CSUM_DIS    V_TXPKT_L4CSUM_DIS(1U)
+
+#define S_TXPKT_VLAN_VLD    22
+#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
+#define F_TXPKT_VLAN_VLD    V_TXPKT_VLAN_VLD(1U)
+
+#define S_TXPKT_LOOPBACK    23
+#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
+#define F_TXPKT_LOOPBACK    V_TXPKT_LOOPBACK(1U)
+
+#define S_TXPKT_OPCODE    24
+#define M_TXPKT_OPCODE    0xFF
+#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_lso.lso_info fields */
+#define S_LSO_MSS    0
+#define M_LSO_MSS    0x3FFF
+#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
+#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
+
+#define S_LSO_ETH_TYPE    14
+#define M_LSO_ETH_TYPE    0x3
+#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
+#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
+
+#define S_LSO_TCPHDR_WORDS    16
+#define M_LSO_TCPHDR_WORDS    0xF
+#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
+#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
+
+#define S_LSO_IPHDR_WORDS    20
+#define M_LSO_IPHDR_WORDS    0xF
+#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
+#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
+
+#define S_LSO_IPV6    24
+#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define F_LSO_IPV6    V_LSO_IPV6(1U)
+
+struct cpl_trace_pkt {
+#ifdef CHELSIO_FW
+	__u8 rss_opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 err:1;
+	 __u8:7;
+#else
+	 __u8:7;
+	__u8 err:1;
+#endif
+	__u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 qid:4;
+	 __u8:4;
+#else
+	 __u8:4;
+	__u8 qid:4;
+#endif
+	__be32 tstamp;
+#endif				/* CHELSIO_FW */
+
+	__u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 iff:4;
+	 __u8:4;
+#else
+	 __u8:4;
+	__u8 iff:4;
+#endif
+	__u8 rsvd[4];
+	__be16 len;
+};
+
+struct cpl_rx_pkt {
+	RSS_HDR __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 iff:4;
+	__u8 csum_valid:1;
+	__u8 ipmi_pkt:1;
+	__u8 vlan_valid:1;
+	__u8 fragment:1;
+#else
+	__u8 fragment:1;
+	__u8 vlan_valid:1;
+	__u8 ipmi_pkt:1;
+	__u8 csum_valid:1;
+	__u8 iff:4;
+#endif
+	__be16 csum;
+	__be16 vlan;
+	__be16 len;
+};
+
+struct cpl_l2t_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+	__u8 rsvd[2];
+	__u8 dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define S_L2T_W_IDX    0
+#define M_L2T_W_IDX    0x7FF
+#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
+#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
+
+#define S_L2T_W_VLAN    11
+#define M_L2T_W_VLAN    0xFFF
+#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
+#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
+
+#define S_L2T_W_IFF    23
+#define M_L2T_W_IFF    0xF
+#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
+#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
+
+#define S_L2T_W_PRIO    27
+#define M_L2T_W_PRIO    0x7
+#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
+#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
+
+struct cpl_l2t_write_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+	RSS_HDR union opcode_tid ot;
+	__be32 params;
+	__u8 rsvd[2];
+	__u8 dst_mac[6];
+};
+
+/* cpl_l2t_read_rpl.params fields */
+#define S_L2T_R_PRIO    0
+#define M_L2T_R_PRIO    0x7
+#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
+#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
+
+#define S_L2T_R_VLAN    8
+#define M_L2T_R_VLAN    0xFFF
+#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
+#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
+
+#define S_L2T_R_IFF    20
+#define M_L2T_R_IFF    0xF
+#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
+#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
+
+#define S_L2T_STATUS    24
+#define M_L2T_STATUS    0xFF
+#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
+#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
+
+struct cpl_smt_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 mtu_idx:4;
+	__u8 iff:4;
+#else
+	__u8 iff:4;
+	__u8 mtu_idx:4;
+#endif
+	__be16 rsvd2;
+	__be16 rsvd3;
+	__u8 src_mac1[6];
+	__be16 rsvd4;
+	__u8 src_mac0[6];
+};
+
+struct cpl_smt_write_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:4;
+	__u8 iff:4;
+#else
+	__u8 iff:4;
+	 __u8:4;
+#endif
+	__be16 rsvd2;
+};
+
+struct cpl_smt_read_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 mtu_idx:4;
+	 __u8:4;
+#else
+	 __u8:4;
+	__u8 mtu_idx:4;
+#endif
+	__be16 rsvd2;
+	__be16 rsvd3;
+	__u8 src_mac1[6];
+	__be16 rsvd4;
+	__u8 src_mac0[6];
+};
+
+struct cpl_rte_delete_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+};
+
+/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
+#define S_RTE_REQ_LUT_IX    8
+#define M_RTE_REQ_LUT_IX    0x7FF
+#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
+#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
+
+#define S_RTE_REQ_LUT_BASE    19
+#define M_RTE_REQ_LUT_BASE    0x7FF
+#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
+#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
+
+#define S_RTE_READ_REQ_SELECT    31
+#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
+#define F_RTE_READ_REQ_SELECT    V_RTE_READ_REQ_SELECT(1U)
+
+struct cpl_rte_delete_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:6;
+	__u8 write_tcam:1;
+	__u8 write_l2t_lut:1;
+#else
+	__u8 write_l2t_lut:1;
+	__u8 write_tcam:1;
+	 __u8:6;
+#endif
+	__u8 rsvd[3];
+	__be32 lut_params;
+	__be16 rsvd2;
+	__be16 l2t_idx;
+	__be32 netmask;
+	__be32 faddr;
+};
+
+/* cpl_rte_write_req.lut_params fields */
+#define S_RTE_WRITE_REQ_LUT_IX    10
+#define M_RTE_WRITE_REQ_LUT_IX    0x7FF
+#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
+#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
+
+#define S_RTE_WRITE_REQ_LUT_BASE    21
+#define M_RTE_WRITE_REQ_LUT_BASE    0x7FF
+#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
+#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
+
+struct cpl_rte_write_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+};
+
+struct cpl_rte_read_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd0;
+	__be16 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:7;
+	__u8 select:1;
+#else
+	__u8 select:1;
+	 __u8:7;
+#endif
+	__u8 rsvd2[3];
+	__be32 addr;
+};
+
+struct cpl_tid_release {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct cpl_barrier {
+	WR_HDR;
+	__u8 opcode;
+	__u8 rsvd[7];
+};
+
+struct cpl_rdma_read_req {
+	__u8 opcode;
+	__u8 rsvd[15];
+};
+
+struct cpl_rdma_terminate {
+#ifdef CHELSIO_FW
+	__u8 opcode;
+	__u8 rsvd[2];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 rspq:3;
+	 __u8:5;
+#else
+	 __u8:5;
+	__u8 rspq:3;
+#endif
+	__be32 tid_len;
+#endif
+	__be32 msn;
+	__be32 mo;
+	__u8 data[0];
+};
+
+/* cpl_rdma_terminate.tid_len fields */
+#define S_FLIT_CNT    0
+#define M_FLIT_CNT    0xFF
+#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
+#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
+
+#define S_TERM_TID    8
+#define M_TERM_TID    0xFFFFF
+#define V_TERM_TID(x) ((x) << S_TERM_TID)
+#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
+#endif				/* T3_CPL_H */

+ 3375 - 0
drivers/net/cxgb3/t3_hw.c

@@ -0,0 +1,3375 @@
+/*
+ * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+#include "sge_defs.h"
+#include "firmware_exports.h"
+
+/**
+ *	t3_wait_op_done_val - wait until an operation is completed
+ *	@adapter: the adapter performing the operation
+ *	@reg: the register to check for completion
+ *	@mask: a single-bit field within @reg that indicates completion
+ *	@polarity: the value of the field when the operation is completed
+ *	@attempts: number of check iterations
+ *	@delay: delay in usecs between iterations
+ *	@valp: where to store the value of the register at completion time
+ *
+ *	Wait until an operation is completed by checking a bit in a register
+ *	up to @attempts times.  If @valp is not NULL the value of the register
+ *	at the time it indicated completion is stored there.  Returns 0 if the
+ *	operation completes and -EAGAIN otherwise.
+ */
+
+int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+			int polarity, int attempts, int delay, u32 *valp)
+{
+	while (1) {
+		u32 val = t3_read_reg(adapter, reg);
+
+		if (!!(val & mask) == polarity) {
+			if (valp)
+				*valp = val;
+			return 0;
+		}
+		if (--attempts == 0)
+ 			return -EAGAIN;
+		if (delay)
+			udelay(delay);
+	}
+}
+
+/**
+ *	t3_write_regs - write a bunch of registers
+ *	@adapter: the adapter to program
+ *	@p: an array of register address/register value pairs
+ *	@n: the number of address/value pairs
+ *	@offset: register address offset
+ *
+ *	Takes an array of register address/register value pairs and writes each
+ *	value to the corresponding register.  Register addresses are adjusted
+ *	by the supplied offset.
+ */
+void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
+		   int n, unsigned int offset)
+{
+	while (n--) {
+		t3_write_reg(adapter, p->reg_addr + offset, p->val);
+		p++;
+	}
+}
+
+/**
+ *	t3_set_reg_field - set a register field to a value
+ *	@adapter: the adapter to program
+ *	@addr: the register address
+ *	@mask: specifies the portion of the register to modify
+ *	@val: the new value for the register field
+ *
+ *	Sets a register field specified by the supplied mask to the
+ *	given value.
+ */
+void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+		      u32 val)
+{
+	u32 v = t3_read_reg(adapter, addr) & ~mask;
+
+	t3_write_reg(adapter, addr, v | val);
+	t3_read_reg(adapter, addr);	/* flush */
+}
+
+/**
+ *	t3_read_indirect - read indirectly addressed registers
+ *	@adap: the adapter
+ *	@addr_reg: register holding the indirect address
+ *	@data_reg: register holding the value of the indirect register
+ *	@vals: where the read register values are stored
+ *	@start_idx: index of first indirect register to read
+ *	@nregs: how many indirect registers to read
+ *
+ *	Reads registers that are accessed indirectly through an address/data
+ *	register pair.
+ */
+void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
+		      unsigned int data_reg, u32 *vals, unsigned int nregs,
+		      unsigned int start_idx)
+{
+	while (nregs--) {
+		t3_write_reg(adap, addr_reg, start_idx);
+		*vals++ = t3_read_reg(adap, data_reg);
+		start_idx++;
+	}
+}
+
+/**
+ *	t3_mc7_bd_read - read from MC7 through backdoor accesses
+ *	@mc7: identifies MC7 to read from
+ *	@start: index of first 64-bit word to read
+ *	@n: number of 64-bit words to read
+ *	@buf: where to store the read result
+ *
+ *	Read n 64-bit words from MC7 starting at word start, using backdoor
+ *	accesses.
+ */
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+		   u64 *buf)
+{
+	static const int shift[] = { 0, 0, 16, 24 };
+	static const int step[] = { 0, 32, 16, 8 };
+
+	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
+	struct adapter *adap = mc7->adapter;
+
+	if (start >= size64 || start + n > size64)
+		return -EINVAL;
+
+	start *= (8 << mc7->width);
+	while (n--) {
+		int i;
+		u64 val64 = 0;
+
+		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
+			int attempts = 10;
+			u32 val;
+
+			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
+			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
+			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
+			while ((val & F_BUSY) && attempts--)
+				val = t3_read_reg(adap,
+						  mc7->offset + A_MC7_BD_OP);
+			if (val & F_BUSY)
+				return -EIO;
+
+			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
+			if (mc7->width == 0) {
+				val64 = t3_read_reg(adap,
+						    mc7->offset +
+						    A_MC7_BD_DATA0);
+				val64 |= (u64) val << 32;
+			} else {
+				if (mc7->width > 1)
+					val >>= shift[mc7->width];
+				val64 |= (u64) val << (step[mc7->width] * i);
+			}
+			start += 8;
+		}
+		*buf++ = val64;
+	}
+	return 0;
+}
+
+/*
+ * Initialize MI1.
+ */
+static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
+{
+	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
+	u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
+	    V_CLKDIV(clkdiv);
+
+	if (!(ai->caps & SUPPORTED_10000baseT_Full))
+		val |= V_ST(1);
+	t3_write_reg(adap, A_MI1_CFG, val);
+}
+
+#define MDIO_ATTEMPTS 10
+
+/*
+ * MI1 read/write operations for direct-addressed PHYs.
+ */
+static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
+		    int reg_addr, unsigned int *valp)
+{
+	int ret;
+	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+	if (mmd_addr)
+		return -EINVAL;
+
+	mutex_lock(&adapter->mdio_lock);
+	t3_write_reg(adapter, A_MI1_ADDR, addr);
+	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
+	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+	if (!ret)
+		*valp = t3_read_reg(adapter, A_MI1_DATA);
+	mutex_unlock(&adapter->mdio_lock);
+	return ret;
+}
+
+static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
+		     int reg_addr, unsigned int val)
+{
+	int ret;
+	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+	if (mmd_addr)
+		return -EINVAL;
+
+	mutex_lock(&adapter->mdio_lock);
+	t3_write_reg(adapter, A_MI1_ADDR, addr);
+	t3_write_reg(adapter, A_MI1_DATA, val);
+	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+	mutex_unlock(&adapter->mdio_lock);
+	return ret;
+}
+
+static const struct mdio_ops mi1_mdio_ops = {
+	mi1_read,
+	mi1_write
+};
+
+/*
+ * MI1 read/write operations for indirect-addressed PHYs.
+ */
+static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
+			int reg_addr, unsigned int *valp)
+{
+	int ret;
+	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
+
+	mutex_lock(&adapter->mdio_lock);
+	t3_write_reg(adapter, A_MI1_ADDR, addr);
+	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
+	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
+	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+	if (!ret) {
+		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
+		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+				      MDIO_ATTEMPTS, 20);
+		if (!ret)
+			*valp = t3_read_reg(adapter, A_MI1_DATA);
+	}
+	mutex_unlock(&adapter->mdio_lock);
+	return ret;
+}
+
+static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
+			 int reg_addr, unsigned int val)
+{
+	int ret;
+	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
+
+	mutex_lock(&adapter->mdio_lock);
+	t3_write_reg(adapter, A_MI1_ADDR, addr);
+	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
+	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
+	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+	if (!ret) {
+		t3_write_reg(adapter, A_MI1_DATA, val);
+		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+				      MDIO_ATTEMPTS, 20);
+	}
+	mutex_unlock(&adapter->mdio_lock);
+	return ret;
+}
+
+static const struct mdio_ops mi1_mdio_ext_ops = {
+	mi1_ext_read,
+	mi1_ext_write
+};
+
+/**
+ *	t3_mdio_change_bits - modify the value of a PHY register
+ *	@phy: the PHY to operate on
+ *	@mmd: the device address
+ *	@reg: the register address
+ *	@clear: what part of the register value to mask off
+ *	@set: what part of the register value to set
+ *
+ *	Changes the value of a PHY register by applying a mask to its current
+ *	value and ORing the result with a new value.
+ */
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+			unsigned int set)
+{
+	int ret;
+	unsigned int val;
+
+	ret = mdio_read(phy, mmd, reg, &val);
+	if (!ret) {
+		val &= ~clear;
+		ret = mdio_write(phy, mmd, reg, val | set);
+	}
+	return ret;
+}
+
+/**
+ *	t3_phy_reset - reset a PHY block
+ *	@phy: the PHY to operate on
+ *	@mmd: the device address of the PHY block to reset
+ *	@wait: how long to wait for the reset to complete in 1ms increments
+ *
+ *	Resets a PHY block and optionally waits for the reset to complete.
+ *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
+ *	for 10G PHYs.
+ */
+int t3_phy_reset(struct cphy *phy, int mmd, int wait)
+{
+	int err;
+	unsigned int ctl;
+
+	err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
+	if (err || !wait)
+		return err;
+
+	do {
+		err = mdio_read(phy, mmd, MII_BMCR, &ctl);
+		if (err)
+			return err;
+		ctl &= BMCR_RESET;
+		if (ctl)
+			msleep(1);
+	} while (ctl && --wait);
+
+	return ctl ? -1 : 0;
+}
+
+/**
+ *	t3_phy_advertise - set the PHY advertisement registers for autoneg
+ *	@phy: the PHY to operate on
+ *	@advert: bitmap of capabilities the PHY should advertise
+ *
+ *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
+ *	requested capabilities.
+ */
+int t3_phy_advertise(struct cphy *phy, unsigned int advert)
+{
+	int err;
+	unsigned int val = 0;
+
+	err = mdio_read(phy, 0, MII_CTRL1000, &val);
+	if (err)
+		return err;
+
+	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+	if (advert & ADVERTISED_1000baseT_Half)
+		val |= ADVERTISE_1000HALF;
+	if (advert & ADVERTISED_1000baseT_Full)
+		val |= ADVERTISE_1000FULL;
+
+	err = mdio_write(phy, 0, MII_CTRL1000, val);
+	if (err)
+		return err;
+
+	val = 1;
+	if (advert & ADVERTISED_10baseT_Half)
+		val |= ADVERTISE_10HALF;
+	if (advert & ADVERTISED_10baseT_Full)
+		val |= ADVERTISE_10FULL;
+	if (advert & ADVERTISED_100baseT_Half)
+		val |= ADVERTISE_100HALF;
+	if (advert & ADVERTISED_100baseT_Full)
+		val |= ADVERTISE_100FULL;
+	if (advert & ADVERTISED_Pause)
+		val |= ADVERTISE_PAUSE_CAP;
+	if (advert & ADVERTISED_Asym_Pause)
+		val |= ADVERTISE_PAUSE_ASYM;
+	return mdio_write(phy, 0, MII_ADVERTISE, val);
+}
+
+/**
+ *	t3_set_phy_speed_duplex - force PHY speed and duplex
+ *	@phy: the PHY to operate on
+ *	@speed: requested PHY speed
+ *	@duplex: requested PHY duplex
+ *
+ *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
+ *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
+ */
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+	int err;
+	unsigned int ctl;
+
+	err = mdio_read(phy, 0, MII_BMCR, &ctl);
+	if (err)
+		return err;
+
+	if (speed >= 0) {
+		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+		if (speed == SPEED_100)
+			ctl |= BMCR_SPEED100;
+		else if (speed == SPEED_1000)
+			ctl |= BMCR_SPEED1000;
+	}
+	if (duplex >= 0) {
+		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+		if (duplex == DUPLEX_FULL)
+			ctl |= BMCR_FULLDPLX;
+	}
+	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
+		ctl |= BMCR_ANENABLE;
+	return mdio_write(phy, 0, MII_BMCR, ctl);
+}
+
+static const struct adapter_info t3_adap_info[] = {
+	{2, 0, 0, 0,
+	 F_GPIO2_OEN | F_GPIO4_OEN |
+	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
+	 SUPPORTED_OFFLOAD,
+	 &mi1_mdio_ops, "Chelsio PE9000"},
+	{2, 0, 0, 0,
+	 F_GPIO2_OEN | F_GPIO4_OEN |
+	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
+	 SUPPORTED_OFFLOAD,
+	 &mi1_mdio_ops, "Chelsio T302"},
+	{1, 0, 0, 0,
+	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
+	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
+	 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
+	 &mi1_mdio_ext_ops, "Chelsio T310"},
+	{2, 0, 0, 0,
+	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
+	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
+	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
+	 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
+	 &mi1_mdio_ext_ops, "Chelsio T320"},
+};
+
+/*
+ * Return the adapter_info structure with a given index.  Out-of-range indices
+ * return NULL.
+ */
+const struct adapter_info *t3_get_adapter_info(unsigned int id)
+{
+	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
+}
+
+#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
+		 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
+#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
+
+static const struct port_type_info port_types[] = {
+	{NULL},
+	{t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
+	 "10GBASE-XR"},
+	{t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
+	 "10/100/1000BASE-T"},
+	{NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
+	 "10/100/1000BASE-T"},
+	{t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
+	{NULL, CAPS_10G, "10GBASE-KX4"},
+	{t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
+	{t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
+	 "10GBASE-SR"},
+	{NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
+};
+
+#undef CAPS_1G
+#undef CAPS_10G
+
+#define VPD_ENTRY(name, len) \
+	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
+
+/*
+ * Partial EEPROM Vital Product Data structure.  Includes only the ID and
+ * VPD-R sections.
+ */
+struct t3_vpd {
+	u8 id_tag;
+	u8 id_len[2];
+	u8 id_data[16];
+	u8 vpdr_tag;
+	u8 vpdr_len[2];
+	VPD_ENTRY(pn, 16);	/* part number */
+	VPD_ENTRY(ec, 16);	/* EC level */
+	VPD_ENTRY(sn, 16);	/* serial number */
+	VPD_ENTRY(na, 12);	/* MAC address base */
+	VPD_ENTRY(cclk, 6);	/* core clock */
+	VPD_ENTRY(mclk, 6);	/* mem clock */
+	VPD_ENTRY(uclk, 6);	/* uP clk */
+	VPD_ENTRY(mdc, 6);	/* MDIO clk */
+	VPD_ENTRY(mt, 2);	/* mem timing */
+	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
+	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
+	VPD_ENTRY(port0, 2);	/* PHY0 complex */
+	VPD_ENTRY(port1, 2);	/* PHY1 complex */
+	VPD_ENTRY(port2, 2);	/* PHY2 complex */
+	VPD_ENTRY(port3, 2);	/* PHY3 complex */
+	VPD_ENTRY(rv, 1);	/* csum */
+	u32 pad;		/* for multiple-of-4 sizing and alignment */
+};
+
+#define EEPROM_MAX_POLL   4
+#define EEPROM_STAT_ADDR  0x4000
+#define VPD_BASE          0xc00
+
+/**
+ *	t3_seeprom_read - read a VPD EEPROM location
+ *	@adapter: adapter to read
+ *	@addr: EEPROM address
+ *	@data: where to store the read data
+ *
+ *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
+ *	VPD ROM capability.  A zero is written to the flag bit when the
+ *	addres is written to the control register.  The hardware device will
+ *	set the flag to 1 when 4 bytes have been read into the data register.
+ */
+int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
+{
+	u16 val;
+	int attempts = EEPROM_MAX_POLL;
+	unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
+		return -EINVAL;
+
+	pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
+	do {
+		udelay(10);
+		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
+	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+	if (!(val & PCI_VPD_ADDR_F)) {
+		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
+		return -EIO;
+	}
+	pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
+	*data = le32_to_cpu(*data);
+	return 0;
+}
+
+/**
+ *	t3_seeprom_write - write a VPD EEPROM location
+ *	@adapter: adapter to write
+ *	@addr: EEPROM address
+ *	@data: value to write
+ *
+ *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
+ *	VPD ROM capability.
+ */
+int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
+{
+	u16 val;
+	int attempts = EEPROM_MAX_POLL;
+	unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
+		return -EINVAL;
+
+	pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
+			       cpu_to_le32(data));
+	pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
+			      addr | PCI_VPD_ADDR_F);
+	do {
+		msleep(1);
+		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
+	} while ((val & PCI_VPD_ADDR_F) && --attempts);
+
+	if (val & PCI_VPD_ADDR_F) {
+		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ *	t3_seeprom_wp - enable/disable EEPROM write protection
+ *	@adapter: the adapter
+ *	@enable: 1 to enable write protection, 0 to disable it
+ *
+ *	Enables or disables write protection on the serial EEPROM.
+ */
+int t3_seeprom_wp(struct adapter *adapter, int enable)
+{
+	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+}
+
+/*
+ * Convert a character holding a hex digit to a number.
+ */
+static unsigned int hex2int(unsigned char c)
+{
+	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
+}
+
+/**
+ *	get_vpd_params - read VPD parameters from VPD EEPROM
+ *	@adapter: adapter to read
+ *	@p: where to store the parameters
+ *
+ *	Reads card parameters stored in VPD EEPROM.
+ */
+static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+	int i, addr, ret;
+	struct t3_vpd vpd;
+
+	/*
+	 * Card information is normally at VPD_BASE but some early cards had
+	 * it at 0.
+	 */
+	ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
+	if (ret)
+		return ret;
+	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
+
+	for (i = 0; i < sizeof(vpd); i += 4) {
+		ret = t3_seeprom_read(adapter, addr + i,
+				      (u32 *)((u8 *)&vpd + i));
+		if (ret)
+			return ret;
+	}
+
+	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
+	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
+	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
+	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
+	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
+
+	/* Old eeproms didn't have port information */
+	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
+		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
+		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
+	} else {
+		p->port_type[0] = hex2int(vpd.port0_data[0]);
+		p->port_type[1] = hex2int(vpd.port1_data[0]);
+		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
+		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
+	}
+
+	for (i = 0; i < 6; i++)
+		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
+				 hex2int(vpd.na_data[2 * i + 1]);
+	return 0;
+}
+
+/* serial flash and firmware constants */
+enum {
+	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
+	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
+	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
+
+	/* flash command opcodes */
+	SF_PROG_PAGE = 2,	/* program page */
+	SF_WR_DISABLE = 4,	/* disable writes */
+	SF_RD_STATUS = 5,	/* read status register */
+	SF_WR_ENABLE = 6,	/* enable writes */
+	SF_RD_DATA_FAST = 0xb,	/* read flash */
+	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
+
+	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
+	FW_VERS_ADDR = 0x77ffc	/* flash address holding FW version */
+};
+
+/**
+ *	sf1_read - read data from the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to read
+ *	@cont: whether another operation will be chained
+ *	@valp: where to store the read data
+ *
+ *	Reads up to 4 bytes of data from the serial flash.  The location of
+ *	the read needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+		    u32 *valp)
+{
+	int ret;
+
+	if (!byte_cnt || byte_cnt > 4)
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+		return -EBUSY;
+	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+	if (!ret)
+		*valp = t3_read_reg(adapter, A_SF_DATA);
+	return ret;
+}
+
+/**
+ *	sf1_write - write data to the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to write
+ *	@cont: whether another operation will be chained
+ *	@val: value to write
+ *
+ *	Writes up to 4 bytes of data to the serial flash.  The location of
+ *	the write needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+		     u32 val)
+{
+	if (!byte_cnt || byte_cnt > 4)
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+		return -EBUSY;
+	t3_write_reg(adapter, A_SF_DATA, val);
+	t3_write_reg(adapter, A_SF_OP,
+		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+}
+
+/**
+ *	flash_wait_op - wait for a flash operation to complete
+ *	@adapter: the adapter
+ *	@attempts: max number of polls of the status register
+ *	@delay: delay between polls in ms
+ *
+ *	Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+{
+	int ret;
+	u32 status;
+
+	while (1) {
+		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
+		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
+			return ret;
+		if (!(status & 1))
+			return 0;
+		if (--attempts == 0)
+			return -EAGAIN;
+		if (delay)
+			msleep(delay);
+	}
+}
+
+/**
+ *	t3_read_flash - read words from serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address for the read
+ *	@nwords: how many 32-bit words to read
+ *	@data: where to store the read data
+ *	@byte_oriented: whether to store data as bytes or as words
+ *
+ *	Read the specified number of 32-bit words from the serial flash.
+ *	If @byte_oriented is set the read data is stored as a byte array
+ *	(i.e., big-endian), otherwise as 32-bit words in the platform's
+ *	natural endianess.
+ */
+int t3_read_flash(struct adapter *adapter, unsigned int addr,
+		  unsigned int nwords, u32 *data, int byte_oriented)
+{
+	int ret;
+
+	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+		return -EINVAL;
+
+	addr = swab32(addr) | SF_RD_DATA_FAST;
+
+	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
+	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
+		return ret;
+
+	for (; nwords; nwords--, data++) {
+		ret = sf1_read(adapter, 4, nwords > 1, data);
+		if (ret)
+			return ret;
+		if (byte_oriented)
+			*data = htonl(*data);
+	}
+	return 0;
+}
+
+/**
+ *	t3_write_flash - write up to a page of data to the serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address to write
+ *	@n: length of data to write
+ *	@data: the data to write
+ *
+ *	Writes up to a page of data (256 bytes) to the serial flash starting
+ *	at the given address.
+ */
+static int t3_write_flash(struct adapter *adapter, unsigned int addr,
+			  unsigned int n, const u8 *data)
+{
+	int ret;
+	u32 buf[64];
+	unsigned int i, c, left, val, offset = addr & 0xff;
+
+	if (addr + n > SF_SIZE || offset + n > 256)
+		return -EINVAL;
+
+	val = swab32(addr) | SF_PROG_PAGE;
+
+	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
+		return ret;
+
+	for (left = n; left; left -= c) {
+		c = min(left, 4U);
+		for (val = 0, i = 0; i < c; ++i)
+			val = (val << 8) + *data++;
+
+		ret = sf1_write(adapter, c, c != left, val);
+		if (ret)
+			return ret;
+	}
+	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
+		return ret;
+
+	/* Read the page to verify the write succeeded */
+	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+	if (ret)
+		return ret;
+
+	if (memcmp(data - n, (u8 *) buf + offset, n))
+		return -EIO;
+	return 0;
+}
+
+enum fw_version_type {
+	FW_VERSION_N3,
+	FW_VERSION_T3
+};
+
+/**
+ *	t3_get_fw_version - read the firmware version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the FW version from flash.
+ */
+int t3_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
+}
+
+/**
+ *	t3_check_fw_version - check if the FW is compatible with this driver
+ *	@adapter: the adapter
+ *
+ *	Checks if an adapter's FW is compatible with the driver.  Returns 0
+ *	if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_fw_version(struct adapter *adapter)
+{
+	int ret;
+	u32 vers;
+	unsigned int type, major, minor;
+
+	ret = t3_get_fw_version(adapter, &vers);
+	if (ret)
+		return ret;
+
+	type = G_FW_VERSION_TYPE(vers);
+	major = G_FW_VERSION_MAJOR(vers);
+	minor = G_FW_VERSION_MINOR(vers);
+
+	if (type == FW_VERSION_T3 && major == 3 && minor == 1)
+		return 0;
+
+	CH_ERR(adapter, "found wrong FW version(%u.%u), "
+	       "driver needs version 3.1\n", major, minor);
+	return -EINVAL;
+}
+
+/**
+ *	t3_flash_erase_sectors - erase a range of flash sectors
+ *	@adapter: the adapter
+ *	@start: the first sector to erase
+ *	@end: the last sector to erase
+ *
+ *	Erases the sectors in the given range.
+ */
+static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+	while (start <= end) {
+		int ret;
+
+		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+		    (ret = sf1_write(adapter, 4, 0,
+				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
+		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
+			return ret;
+		start++;
+	}
+	return 0;
+}
+
+/*
+ *	t3_load_fw - download firmware
+ *	@adapter: the adapter
+ *	@fw_data: the firrware image to write
+ *	@size: image size
+ *
+ *	Write the supplied firmware image to the card's serial flash.
+ *	The FW image has the following sections: @size - 8 bytes of code and
+ *	data, followed by 4 bytes of FW version, followed by the 32-bit
+ *	1's complement checksum of the whole image.
+ */
+int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
+{
+	u32 csum;
+	unsigned int i;
+	const u32 *p = (const u32 *)fw_data;
+	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
+
+	if (size & 3)
+		return -EINVAL;
+	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
+		return -EFBIG;
+
+	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+		csum += ntohl(p[i]);
+	if (csum != 0xffffffff) {
+		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
+		       csum);
+		return -EINVAL;
+	}
+
+	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
+	if (ret)
+		goto out;
+
+	size -= 8;		/* trim off version and checksum */
+	for (addr = FW_FLASH_BOOT_ADDR; size;) {
+		unsigned int chunk_size = min(size, 256U);
+
+		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
+		if (ret)
+			goto out;
+
+		addr += chunk_size;
+		fw_data += chunk_size;
+		size -= chunk_size;
+	}
+
+	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
+out:
+	if (ret)
+		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
+	return ret;
+}
+
+#define CIM_CTL_BASE 0x2000
+
+/**
+ *      t3_cim_ctl_blk_read - read a block from CIM control region
+ *
+ *      @adap: the adapter
+ *      @addr: the start address within the CIM control region
+ *      @n: number of words to read
+ *      @valp: where to store the result
+ *
+ *      Reads a block of 4-byte words from the CIM control region.
+ */
+int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
+			unsigned int n, unsigned int *valp)
+{
+	int ret = 0;
+
+	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+		return -EBUSY;
+
+	for ( ; !ret && n--; addr += 4) {
+		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
+		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+				      0, 5, 2);
+		if (!ret)
+			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
+	}
+	return ret;
+}
+
+
+/**
+ *	t3_link_changed - handle interface link changes
+ *	@adapter: the adapter
+ *	@port_id: the port index that changed link state
+ *
+ *	Called when a port's link settings change to propagate the new values
+ *	to the associated PHY and MAC.  After performing the common tasks it
+ *	invokes an OS-specific handler.
+ */
+void t3_link_changed(struct adapter *adapter, int port_id)
+{
+	int link_ok, speed, duplex, fc;
+	struct port_info *pi = adap2pinfo(adapter, port_id);
+	struct cphy *phy = &pi->phy;
+	struct cmac *mac = &pi->mac;
+	struct link_config *lc = &pi->link_config;
+
+	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
+	    uses_xaui(adapter)) {
+		if (link_ok)
+			t3b_pcs_reset(mac);
+		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
+			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
+	}
+	lc->link_ok = link_ok;
+	lc->speed = speed < 0 ? SPEED_INVALID : speed;
+	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+	if (lc->requested_fc & PAUSE_AUTONEG)
+		fc &= lc->requested_fc;
+	else
+		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+		/* Set MAC speed, duplex, and flow control to match PHY. */
+		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
+		lc->fc = fc;
+	}
+
+	t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+}
+
+/**
+ *	t3_link_start - apply link configuration to MAC/PHY
+ *	@phy: the PHY to setup
+ *	@mac: the MAC to setup
+ *	@lc: the requested link configuration
+ *
+ *	Set up a port's MAC and PHY according to a desired link configuration.
+ *	- If the PHY can auto-negotiate first decide what to advertise, then
+ *	  enable/disable auto-negotiation as desired, and reset.
+ *	- If the PHY does not auto-negotiate just reset it.
+ *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ *	  otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	lc->link_ok = 0;
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
+		if (fc) {
+			lc->advertising |= ADVERTISED_Asym_Pause;
+			if (fc & PAUSE_RX)
+				lc->advertising |= ADVERTISED_Pause;
+		}
+		phy->ops->advertise(phy, lc->advertising);
+
+		if (lc->autoneg == AUTONEG_DISABLE) {
+			lc->speed = lc->requested_speed;
+			lc->duplex = lc->requested_duplex;
+			lc->fc = (unsigned char)fc;
+			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
+						   fc);
+			/* Also disables autoneg */
+			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+			phy->ops->reset(phy, 0);
+		} else
+			phy->ops->autoneg_enable(phy);
+	} else {
+		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
+		lc->fc = (unsigned char)fc;
+		phy->ops->reset(phy, 0);
+	}
+	return 0;
+}
+
+/**
+ *	t3_set_vlan_accel - control HW VLAN extraction
+ *	@adapter: the adapter
+ *	@ports: bitmap of adapter ports to operate on
+ *	@on: enable (1) or disable (0) HW VLAN extraction
+ *
+ *	Enables or disables HW extraction of VLAN tags for the given port.
+ */
+void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
+{
+	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
+			 ports << S_VLANEXTRACTIONENABLE,
+			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
+}
+
+struct intr_info {
+	unsigned int mask;	/* bits to check in interrupt status */
+	const char *msg;	/* message to print or NULL */
+	short stat_idx;		/* stat counter to increment or -1 */
+	unsigned short fatal:1;	/* whether the condition reported is fatal */
+};
+
+/**
+ *	t3_handle_intr_status - table driven interrupt handler
+ *	@adapter: the adapter that generated the interrupt
+ *	@reg: the interrupt status register to process
+ *	@mask: a mask to apply to the interrupt status
+ *	@acts: table of interrupt actions
+ *	@stats: statistics counters tracking interrupt occurences
+ *
+ *	A table driven interrupt handler that applies a set of masks to an
+ *	interrupt status word and performs the corresponding actions if the
+ *	interrupts described by the mask have occured.  The actions include
+ *	optionally printing a warning or alert message, and optionally
+ *	incrementing a stat counter.  The table is terminated by an entry
+ *	specifying mask 0.  Returns the number of fatal interrupt conditions.
+ */
+static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
+				 unsigned int mask,
+				 const struct intr_info *acts,
+				 unsigned long *stats)
+{
+	int fatal = 0;
+	unsigned int status = t3_read_reg(adapter, reg) & mask;
+
+	for (; acts->mask; ++acts) {
+		if (!(status & acts->mask))
+			continue;
+		if (acts->fatal) {
+			fatal++;
+			CH_ALERT(adapter, "%s (0x%x)\n",
+				 acts->msg, status & acts->mask);
+		} else if (acts->msg)
+			CH_WARN(adapter, "%s (0x%x)\n",
+				acts->msg, status & acts->mask);
+		if (acts->stat_idx >= 0)
+			stats[acts->stat_idx]++;
+	}
+	if (status)		/* clear processed interrupts */
+		t3_write_reg(adapter, reg, status);
+	return fatal;
+}
+
+#define SGE_INTR_MASK (F_RSPQDISABLED)
+#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
+		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
+		       F_NFASRCHFAIL)
+#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
+#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
+		       F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
+#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
+			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
+			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
+			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
+			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
+			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
+#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
+			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
+			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
+			V_BISTERR(M_BISTERR) | F_PEXERR)
+#define ULPRX_INTR_MASK F_PARERR
+#define ULPTX_INTR_MASK 0
+#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
+			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
+			 F_ZERO_SWITCH_ERROR)
+#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
+		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
+		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
+	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
+#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
+			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
+			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
+#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
+			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
+			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
+#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
+		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
+		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
+		       V_MCAPARERRENB(M_MCAPARERRENB))
+#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
+		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
+		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
+		      F_MPS0 | F_CPL_SWITCH)
+
+/*
+ * Interrupt handler for the PCIX1 module.
+ */
+static void pci_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pcix1_intr_info[] = {
+		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
+		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
+		{F_RCVTARABT, "PCI received target abort", -1, 1},
+		{F_RCVMSTABT, "PCI received master abort", -1, 1},
+		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
+		{F_DETPARERR, "PCI detected parity error", -1, 1},
+		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
+		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
+		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
+		 1},
+		{F_DETCORECCERR, "PCI correctable ECC error",
+		 STAT_PCI_CORR_ECC, 0},
+		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
+		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
+		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
+		 1},
+		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
+		 1},
+		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
+		 1},
+		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
+		 "error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
+				  pcix1_intr_info, adapter->irq_stats))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pcie_intr_info[] = {
+		{F_PEXERR, "PCI PEX error", -1, 1},
+		{F_UNXSPLCPLERRR,
+		 "PCI unexpected split completion DMA read error", -1, 1},
+		{F_UNXSPLCPLERRC,
+		 "PCI unexpected split completion DMA command error", -1, 1},
+		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
+		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
+		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
+		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
+		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
+		 "PCI MSI-X table/PBA parity error", -1, 1},
+		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
+				  pcie_intr_info, adapter->irq_stats))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info tp_intr_info[] = {
+		{0xffffff, "TP parity error", -1, 1},
+		{0x1000000, "TP out of Rx pages", -1, 1},
+		{0x2000000, "TP out of Tx pages", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
+				  tp_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info cim_intr_info[] = {
+		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
+		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
+		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
+		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
+		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
+		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
+		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
+		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
+		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
+		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
+		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
+		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
+				  cim_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info ulprx_intr_info[] = {
+		{F_PARERR, "ULP RX parity error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
+				  ulprx_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info ulptx_intr_info[] = {
+		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
+		 STAT_ULP_CH0_PBL_OOB, 0},
+		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
+		 STAT_ULP_CH1_PBL_OOB, 0},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
+				  ulptx_intr_info, adapter->irq_stats))
+		t3_fatal_err(adapter);
+}
+
+#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
+	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
+	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
+	F_ICSPI1_TX_FRAMING_ERROR)
+#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
+	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
+	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
+	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pmtx_intr_info[] = {
+		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
+		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
+		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
+		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
+		 "PMTX ispi parity error", -1, 1},
+		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
+		 "PMTX ospi parity error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
+				  pmtx_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
+	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
+	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
+	F_IESPI1_TX_FRAMING_ERROR)
+#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
+	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
+	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
+	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pmrx_intr_info[] = {
+		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
+		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
+		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
+		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
+		 "PMRX ispi parity error", -1, 1},
+		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
+		 "PMRX ospi parity error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
+				  pmrx_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info cplsw_intr_info[] = {
+/*		{ F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
+		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
+		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
+		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
+		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
+				  cplsw_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info mps_intr_info[] = {
+		{0x1ff, "MPS parity error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
+				  mps_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
+
+/*
+ * MC7 interrupt handler.
+ */
+static void mc7_intr_handler(struct mc7 *mc7)
+{
+	struct adapter *adapter = mc7->adapter;
+	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
+
+	if (cause & F_CE) {
+		mc7->stats.corr_err++;
+		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
+			"data 0x%x 0x%x 0x%x\n", mc7->name,
+			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
+			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
+			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
+			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
+	}
+
+	if (cause & F_UE) {
+		mc7->stats.uncorr_err++;
+		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
+			 "data 0x%x 0x%x 0x%x\n", mc7->name,
+			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
+			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
+			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
+			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
+	}
+
+	if (G_PE(cause)) {
+		mc7->stats.parity_err++;
+		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
+			 mc7->name, G_PE(cause));
+	}
+
+	if (cause & F_AE) {
+		u32 addr = 0;
+
+		if (adapter->params.rev > 0)
+			addr = t3_read_reg(adapter,
+					   mc7->offset + A_MC7_ERR_ADDR);
+		mc7->stats.addr_err++;
+		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
+			 mc7->name, addr);
+	}
+
+	if (cause & MC7_INTR_FATAL)
+		t3_fatal_err(adapter);
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
+}
+
+#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
+/*
+ * XGMAC interrupt handler.
+ */
+static int mac_intr_handler(struct adapter *adap, unsigned int idx)
+{
+	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
+	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
+
+	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
+		mac->stats.tx_fifo_parity_err++;
+		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
+	}
+	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
+		mac->stats.rx_fifo_parity_err++;
+		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
+	}
+	if (cause & F_TXFIFO_UNDERRUN)
+		mac->stats.tx_fifo_urun++;
+	if (cause & F_RXFIFO_OVERFLOW)
+		mac->stats.rx_fifo_ovfl++;
+	if (cause & V_SERDES_LOS(M_SERDES_LOS))
+		mac->stats.serdes_signal_loss++;
+	if (cause & F_XAUIPCSCTCERR)
+		mac->stats.xaui_pcs_ctc_err++;
+	if (cause & F_XAUIPCSALIGNCHANGE)
+		mac->stats.xaui_pcs_align_change++;
+
+	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
+	if (cause & XGM_INTR_FATAL)
+		t3_fatal_err(adap);
+	return cause != 0;
+}
+
+/*
+ * Interrupt handler for PHY events.
+ */
+int t3_phy_intr_handler(struct adapter *adapter)
+{
+	static const int intr_gpio_bits[] = { 8, 0x20 };
+
+	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
+
+	for_each_port(adapter, i) {
+		if (cause & intr_gpio_bits[i]) {
+			struct cphy *phy = &adap2pinfo(adapter, i)->phy;
+			int phy_cause = phy->ops->intr_handler(phy);
+
+			if (phy_cause & cphy_cause_link_change)
+				t3_link_changed(adapter, i);
+			if (phy_cause & cphy_cause_fifo_error)
+				phy->fifo_errors++;
+		}
+	}
+
+	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
+	return 0;
+}
+
+/*
+ * T3 slow path (non-data) interrupt handler.
+ */
+int t3_slow_intr_handler(struct adapter *adapter)
+{
+	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
+
+	cause &= adapter->slow_intr_mask;
+	if (!cause)
+		return 0;
+	if (cause & F_PCIM0) {
+		if (is_pcie(adapter))
+			pcie_intr_handler(adapter);
+		else
+			pci_intr_handler(adapter);
+	}
+	if (cause & F_SGE3)
+		t3_sge_err_intr_handler(adapter);
+	if (cause & F_MC7_PMRX)
+		mc7_intr_handler(&adapter->pmrx);
+	if (cause & F_MC7_PMTX)
+		mc7_intr_handler(&adapter->pmtx);
+	if (cause & F_MC7_CM)
+		mc7_intr_handler(&adapter->cm);
+	if (cause & F_CIM)
+		cim_intr_handler(adapter);
+	if (cause & F_TP1)
+		tp_intr_handler(adapter);
+	if (cause & F_ULP2_RX)
+		ulprx_intr_handler(adapter);
+	if (cause & F_ULP2_TX)
+		ulptx_intr_handler(adapter);
+	if (cause & F_PM1_RX)
+		pmrx_intr_handler(adapter);
+	if (cause & F_PM1_TX)
+		pmtx_intr_handler(adapter);
+	if (cause & F_CPL_SWITCH)
+		cplsw_intr_handler(adapter);
+	if (cause & F_MPS0)
+		mps_intr_handler(adapter);
+	if (cause & F_MC5A)
+		t3_mc5_intr_handler(&adapter->mc5);
+	if (cause & F_XGMAC0_0)
+		mac_intr_handler(adapter, 0);
+	if (cause & F_XGMAC0_1)
+		mac_intr_handler(adapter, 1);
+	if (cause & F_T3DBG)
+		t3_os_ext_intr_handler(adapter);
+
+	/* Clear the interrupts just processed. */
+	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
+	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
+	return 1;
+}
+
+/**
+ *	t3_intr_enable - enable interrupts
+ *	@adapter: the adapter whose interrupts should be enabled
+ *
+ *	Enable interrupts by setting the interrupt enable registers of the
+ *	various HW modules and then enabling the top-level interrupt
+ *	concentrator.
+ */
+void t3_intr_enable(struct adapter *adapter)
+{
+	static const struct addr_val_pair intr_en_avp[] = {
+		{A_SG_INT_ENABLE, SGE_INTR_MASK},
+		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
+		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+		 MC7_INTR_MASK},
+		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+		 MC7_INTR_MASK},
+		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
+		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
+		{A_TP_INT_ENABLE, 0x3bfffff},
+		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
+		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
+		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
+		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
+	};
+
+	adapter->slow_intr_mask = PL_INTR_MASK;
+
+	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
+
+	if (adapter->params.rev > 0) {
+		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
+			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
+		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
+			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
+			     F_PBL_BOUND_ERR_CH1);
+	} else {
+		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
+		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
+	}
+
+	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
+		     adapter_info(adapter)->gpio_intr);
+	t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
+		     adapter_info(adapter)->gpio_intr);
+	if (is_pcie(adapter))
+		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
+	else
+		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
+	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
+	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
+}
+
+/**
+ *	t3_intr_disable - disable a card's interrupts
+ *	@adapter: the adapter whose interrupts should be disabled
+ *
+ *	Disable interrupts.  We only disable the top-level interrupt
+ *	concentrator and the SGE data interrupts.
+ */
+void t3_intr_disable(struct adapter *adapter)
+{
+	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
+	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
+	adapter->slow_intr_mask = 0;
+}
+
+/**
+ *	t3_intr_clear - clear all interrupts
+ *	@adapter: the adapter whose interrupts should be cleared
+ *
+ *	Clears all interrupts.
+ */
+void t3_intr_clear(struct adapter *adapter)
+{
+	static const unsigned int cause_reg_addr[] = {
+		A_SG_INT_CAUSE,
+		A_SG_RSPQ_FL_STATUS,
+		A_PCIX_INT_CAUSE,
+		A_MC7_INT_CAUSE,
+		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+		A_CIM_HOST_INT_CAUSE,
+		A_TP_INT_CAUSE,
+		A_MC5_DB_INT_CAUSE,
+		A_ULPRX_INT_CAUSE,
+		A_ULPTX_INT_CAUSE,
+		A_CPL_INTR_CAUSE,
+		A_PM1_TX_INT_CAUSE,
+		A_PM1_RX_INT_CAUSE,
+		A_MPS_INT_CAUSE,
+		A_T3DBG_INT_CAUSE,
+	};
+	unsigned int i;
+
+	/* Clear PHY and MAC interrupts for each port. */
+	for_each_port(adapter, i)
+	    t3_port_intr_clear(adapter, i);
+
+	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
+		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
+
+	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
+	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
+}
+
+/**
+ *	t3_port_intr_enable - enable port-specific interrupts
+ *	@adapter: associated adapter
+ *	@idx: index of port whose interrupts should be enabled
+ *
+ *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
+ *	adapter port.
+ */
+void t3_port_intr_enable(struct adapter *adapter, int idx)
+{
+	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
+	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
+	phy->ops->intr_enable(phy);
+}
+
+/**
+ *	t3_port_intr_disable - disable port-specific interrupts
+ *	@adapter: associated adapter
+ *	@idx: index of port whose interrupts should be disabled
+ *
+ *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
+ *	adapter port.
+ */
+void t3_port_intr_disable(struct adapter *adapter, int idx)
+{
+	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
+	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
+	phy->ops->intr_disable(phy);
+}
+
+/**
+ *	t3_port_intr_clear - clear port-specific interrupts
+ *	@adapter: associated adapter
+ *	@idx: index of port whose interrupts to clear
+ *
+ *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
+ *	adapter port.
+ */
+void t3_port_intr_clear(struct adapter *adapter, int idx)
+{
+	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
+	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
+	phy->ops->intr_clear(phy);
+}
+
+/**
+ * 	t3_sge_write_context - write an SGE context
+ * 	@adapter: the adapter
+ * 	@id: the context id
+ * 	@type: the context type
+ *
+ * 	Program an SGE context with the values already loaded in the
+ * 	CONTEXT_DATA? registers.
+ */
+static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
+				unsigned int type)
+{
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, 5, 1);
+}
+
+/**
+ *	t3_sge_init_ecntxt - initialize an SGE egress context
+ *	@adapter: the adapter to configure
+ *	@id: the context id
+ *	@gts_enable: whether to enable GTS for the context
+ *	@type: the egress context type
+ *	@respq: associated response queue
+ *	@base_addr: base address of queue
+ *	@size: number of queue entries
+ *	@token: uP token
+ *	@gen: initial generation value for the context
+ *	@cidx: consumer pointer
+ *
+ *	Initialize an SGE egress context and make it ready for use.  If the
+ *	platform allows concurrent context operations, the caller is
+ *	responsible for appropriate locking.
+ */
+int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+		       enum sge_context_type type, int respq, u64 base_addr,
+		       unsigned int size, unsigned int token, int gen,
+		       unsigned int cidx)
+{
+	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
+
+	if (base_addr & 0xfff)	/* must be 4K aligned */
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	base_addr >>= 12;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
+		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
+		     V_EC_BASE_LO(base_addr & 0xffff));
+	base_addr >>= 16;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
+	base_addr >>= 32;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
+		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
+		     F_EC_VALID);
+	return t3_sge_write_context(adapter, id, F_EGRESS);
+}
+
+/**
+ *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
+ *	@adapter: the adapter to configure
+ *	@id: the context id
+ *	@gts_enable: whether to enable GTS for the context
+ *	@base_addr: base address of queue
+ *	@size: number of queue entries
+ *	@bsize: size of each buffer for this queue
+ *	@cong_thres: threshold to signal congestion to upstream producers
+ *	@gen: initial generation value for the context
+ *	@cidx: consumer pointer
+ *
+ *	Initialize an SGE free list context and make it ready for use.  The
+ *	caller is responsible for ensuring only one context operation occurs
+ *	at a time.
+ */
+int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
+			int gts_enable, u64 base_addr, unsigned int size,
+			unsigned int bsize, unsigned int cong_thres, int gen,
+			unsigned int cidx)
+{
+	if (base_addr & 0xfff)	/* must be 4K aligned */
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	base_addr >>= 12;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
+	base_addr >>= 32;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
+		     V_FL_BASE_HI((u32) base_addr) |
+		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
+		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
+		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
+		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
+	return t3_sge_write_context(adapter, id, F_FREELIST);
+}
+
+/**
+ *	t3_sge_init_rspcntxt - initialize an SGE response queue context
+ *	@adapter: the adapter to configure
+ *	@id: the context id
+ *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
+ *	@base_addr: base address of queue
+ *	@size: number of queue entries
+ *	@fl_thres: threshold for selecting the normal or jumbo free list
+ *	@gen: initial generation value for the context
+ *	@cidx: consumer pointer
+ *
+ *	Initialize an SGE response queue context and make it ready for use.
+ *	The caller is responsible for ensuring only one context operation
+ *	occurs at a time.
+ */
+int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
+			 int irq_vec_idx, u64 base_addr, unsigned int size,
+			 unsigned int fl_thres, int gen, unsigned int cidx)
+{
+	unsigned int intr = 0;
+
+	if (base_addr & 0xfff)	/* must be 4K aligned */
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	base_addr >>= 12;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
+		     V_CQ_INDEX(cidx));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+	base_addr >>= 32;
+	if (irq_vec_idx >= 0)
+		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
+	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
+}
+
+/**
+ *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
+ *	@adapter: the adapter to configure
+ *	@id: the context id
+ *	@base_addr: base address of queue
+ *	@size: number of queue entries
+ *	@rspq: response queue for async notifications
+ *	@ovfl_mode: CQ overflow mode
+ *	@credits: completion queue credits
+ *	@credit_thres: the credit threshold
+ *
+ *	Initialize an SGE completion queue context and make it ready for use.
+ *	The caller is responsible for ensuring only one context operation
+ *	occurs at a time.
+ */
+int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
+			unsigned int size, int rspq, int ovfl_mode,
+			unsigned int credits, unsigned int credit_thres)
+{
+	if (base_addr & 0xfff)	/* must be 4K aligned */
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	base_addr >>= 12;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+	base_addr >>= 32;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
+		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
+		     V_CQ_CREDIT_THRES(credit_thres));
+	return t3_sge_write_context(adapter, id, F_CQ);
+}
+
+/**
+ *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
+ *	@adapter: the adapter
+ *	@id: the egress context id
+ *	@enable: enable (1) or disable (0) the context
+ *
+ *	Enable or disable an SGE egress context.  The caller is responsible for
+ *	ensuring only one context operation occurs at a time.
+ */
+int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
+{
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, 5, 1);
+}
+
+/**
+ *	t3_sge_disable_fl - disable an SGE free-buffer list
+ *	@adapter: the adapter
+ *	@id: the free list context id
+ *
+ *	Disable an SGE free-buffer list.  The caller is responsible for
+ *	ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
+{
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, 5, 1);
+}
+
+/**
+ *	t3_sge_disable_rspcntxt - disable an SGE response queue
+ *	@adapter: the adapter
+ *	@id: the response queue context id
+ *
+ *	Disable an SGE response queue.  The caller is responsible for
+ *	ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
+{
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, 5, 1);
+}
+
+/**
+ *	t3_sge_disable_cqcntxt - disable an SGE completion queue
+ *	@adapter: the adapter
+ *	@id: the completion queue context id
+ *
+ *	Disable an SGE completion queue.  The caller is responsible for
+ *	ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
+{
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, 5, 1);
+}
+
+/**
+ *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
+ *	@adapter: the adapter
+ *	@id: the context id
+ *	@op: the operation to perform
+ *
+ *	Perform the selected operation on an SGE completion queue context.
+ *	The caller is responsible for ensuring only one context operation
+ *	occurs at a time.
+ */
+int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+		      unsigned int credits)
+{
+	u32 val;
+
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
+		     V_CONTEXT(id) | F_CQ);
+	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+				0, 5, 1, &val))
+		return -EIO;
+
+	if (op >= 2 && op < 7) {
+		if (adapter->params.rev > 0)
+			return G_CQ_INDEX(val);
+
+		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
+		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
+				    F_CONTEXT_CMD_BUSY, 0, 5, 1))
+			return -EIO;
+		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
+	}
+	return 0;
+}
+
+/**
+ * 	t3_sge_read_context - read an SGE context
+ * 	@type: the context type
+ * 	@adapter: the adapter
+ * 	@id: the context id
+ * 	@data: holds the retrieved context
+ *
+ * 	Read an SGE egress context.  The caller is responsible for ensuring
+ * 	only one context operation occurs at a time.
+ */
+static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
+			       unsigned int id, u32 data[4])
+{
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
+	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
+			    5, 1))
+		return -EIO;
+	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
+	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
+	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
+	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
+	return 0;
+}
+
+/**
+ * 	t3_sge_read_ecntxt - read an SGE egress context
+ * 	@adapter: the adapter
+ * 	@id: the context id
+ * 	@data: holds the retrieved context
+ *
+ * 	Read an SGE egress context.  The caller is responsible for ensuring
+ * 	only one context operation occurs at a time.
+ */
+int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
+{
+	if (id >= 65536)
+		return -EINVAL;
+	return t3_sge_read_context(F_EGRESS, adapter, id, data);
+}
+
+/**
+ * 	t3_sge_read_cq - read an SGE CQ context
+ * 	@adapter: the adapter
+ * 	@id: the context id
+ * 	@data: holds the retrieved context
+ *
+ * 	Read an SGE CQ context.  The caller is responsible for ensuring
+ * 	only one context operation occurs at a time.
+ */
+int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
+{
+	if (id >= 65536)
+		return -EINVAL;
+	return t3_sge_read_context(F_CQ, adapter, id, data);
+}
+
+/**
+ * 	t3_sge_read_fl - read an SGE free-list context
+ * 	@adapter: the adapter
+ * 	@id: the context id
+ * 	@data: holds the retrieved context
+ *
+ * 	Read an SGE free-list context.  The caller is responsible for ensuring
+ * 	only one context operation occurs at a time.
+ */
+int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
+{
+	if (id >= SGE_QSETS * 2)
+		return -EINVAL;
+	return t3_sge_read_context(F_FREELIST, adapter, id, data);
+}
+
+/**
+ * 	t3_sge_read_rspq - read an SGE response queue context
+ * 	@adapter: the adapter
+ * 	@id: the context id
+ * 	@data: holds the retrieved context
+ *
+ * 	Read an SGE response queue context.  The caller is responsible for
+ * 	ensuring only one context operation occurs at a time.
+ */
+int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
+{
+	if (id >= SGE_QSETS)
+		return -EINVAL;
+	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
+}
+
+/**
+ *	t3_config_rss - configure Rx packet steering
+ *	@adapter: the adapter
+ *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
+ *	@cpus: values for the CPU lookup table (0xff terminated)
+ *	@rspq: values for the response queue lookup table (0xffff terminated)
+ *
+ *	Programs the receive packet steering logic.  @cpus and @rspq provide
+ *	the values for the CPU and response queue lookup tables.  If they
+ *	provide fewer values than the size of the tables the supplied values
+ *	are used repeatedly until the tables are fully populated.
+ */
+void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
+		   const u8 * cpus, const u16 *rspq)
+{
+	int i, j, cpu_idx = 0, q_idx = 0;
+
+	if (cpus)
+		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+			u32 val = i << 16;
+
+			for (j = 0; j < 2; ++j) {
+				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
+				if (cpus[cpu_idx] == 0xff)
+					cpu_idx = 0;
+			}
+			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
+		}
+
+	if (rspq)
+		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
+				     (i << 16) | rspq[q_idx++]);
+			if (rspq[q_idx] == 0xffff)
+				q_idx = 0;
+		}
+
+	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
+}
+
+/**
+ *	t3_read_rss - read the contents of the RSS tables
+ *	@adapter: the adapter
+ *	@lkup: holds the contents of the RSS lookup table
+ *	@map: holds the contents of the RSS map table
+ *
+ *	Reads the contents of the receive packet steering tables.
+ */
+int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
+{
+	int i;
+	u32 val;
+
+	if (lkup)
+		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
+				     0xffff0000 | i);
+			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
+			if (!(val & 0x80000000))
+				return -EAGAIN;
+			*lkup++ = val;
+			*lkup++ = (val >> 8);
+		}
+
+	if (map)
+		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
+				     0xffff0000 | i);
+			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
+			if (!(val & 0x80000000))
+				return -EAGAIN;
+			*map++ = val;
+		}
+	return 0;
+}
+
+/**
+ *	t3_tp_set_offload_mode - put TP in NIC/offload mode
+ *	@adap: the adapter
+ *	@enable: 1 to select offload mode, 0 for regular NIC
+ *
+ *	Switches TP to NIC/offload mode.
+ */
+void t3_tp_set_offload_mode(struct adapter *adap, int enable)
+{
+	if (is_offload(adap) || !enable)
+		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
+				 V_NICMODE(!enable));
+}
+
+/**
+ *	pm_num_pages - calculate the number of pages of the payload memory
+ *	@mem_size: the size of the payload memory
+ *	@pg_size: the size of each payload memory page
+ *
+ *	Calculate the number of pages, each of the given size, that fit in a
+ *	memory of the specified size, respecting the HW requirement that the
+ *	number of pages must be a multiple of 24.
+ */
+static inline unsigned int pm_num_pages(unsigned int mem_size,
+					unsigned int pg_size)
+{
+	unsigned int n = mem_size / pg_size;
+
+	return n - n % 24;
+}
+
+#define mem_region(adap, start, size, reg) \
+	t3_write_reg((adap), A_ ## reg, (start)); \
+	start += size
+
+/*
+ *	partition_mem - partition memory and configure TP memory settings
+ *	@adap: the adapter
+ *	@p: the TP parameters
+ *
+ *	Partitions context and payload memory and configures TP's memory
+ *	registers.
+ */
+static void partition_mem(struct adapter *adap, const struct tp_params *p)
+{
+	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
+	unsigned int timers = 0, timers_shift = 22;
+
+	if (adap->params.rev > 0) {
+		if (tids <= 16 * 1024) {
+			timers = 1;
+			timers_shift = 16;
+		} else if (tids <= 64 * 1024) {
+			timers = 2;
+			timers_shift = 18;
+		} else if (tids <= 256 * 1024) {
+			timers = 3;
+			timers_shift = 20;
+		}
+	}
+
+	t3_write_reg(adap, A_TP_PMM_SIZE,
+		     p->chan_rx_size | (p->chan_tx_size >> 16));
+
+	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
+	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
+	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
+	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
+			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
+
+	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
+	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
+	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
+
+	pstructs = p->rx_num_pgs + p->tx_num_pgs;
+	/* Add a bit of headroom and make multiple of 24 */
+	pstructs += 48;
+	pstructs -= pstructs % 24;
+	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
+
+	m = tids * TCB_SIZE;
+	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
+	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
+	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
+	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
+	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
+	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
+	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
+	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
+
+	m = (m + 4095) & ~0xfff;
+	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
+	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
+
+	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
+	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
+	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
+	if (tids < m)
+		adap->params.mc5.nservers += m - tids;
+}
+
+static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
+				  u32 val)
+{
+	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
+	t3_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+static void tp_config(struct adapter *adap, const struct tp_params *p)
+{
+	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
+		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
+		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
+	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
+		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
+		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
+	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
+		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
+		     V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
+		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
+	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
+			 F_IPV6ENABLE | F_NICMODE);
+	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
+	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
+	t3_set_reg_field(adap, A_TP_PARA_REG6,
+			 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
+			 0);
+
+	t3_set_reg_field(adap, A_TP_PC_CONFIG,
+			 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
+			 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
+			 F_RXCONGESTIONMODE);
+	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
+
+	if (adap->params.rev > 0) {
+		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
+		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
+				 F_TXPACEAUTO);
+		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
+		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
+	} else
+		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
+
+	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
+	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
+	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
+}
+
+/* Desired TP timer resolution in usec */
+#define TP_TMR_RES 50
+
+/* TCP timer values in ms */
+#define TP_DACK_TIMER 50
+#define TP_RTO_MIN    250
+
+/**
+ *	tp_set_timers - set TP timing parameters
+ *	@adap: the adapter to set
+ *	@core_clk: the core clock frequency in Hz
+ *
+ *	Set TP's timing parameters, such as the various timer resolutions and
+ *	the TCP timer values.
+ */
+static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
+{
+	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
+	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
+	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
+	unsigned int tps = core_clk >> tre;
+
+	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
+		     V_DELAYEDACKRESOLUTION(dack_re) |
+		     V_TIMESTAMPRESOLUTION(tstamp_re));
+	t3_write_reg(adap, A_TP_DACK_TIMER,
+		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
+	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
+	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
+	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
+	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
+	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
+		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
+		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
+		     V_KEEPALIVEMAX(9));
+
+#define SECONDS * tps
+
+	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
+	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
+	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
+	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
+	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
+	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
+	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
+	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
+	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
+
+#undef SECONDS
+}
+
+/**
+ *	t3_tp_set_coalescing_size - set receive coalescing size
+ *	@adap: the adapter
+ *	@size: the receive coalescing size
+ *	@psh: whether a set PSH bit should deliver coalesced data
+ *
+ *	Set the receive coalescing size and PSH bit handling.
+ */
+int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
+{
+	u32 val;
+
+	if (size > MAX_RX_COALESCING_LEN)
+		return -EINVAL;
+
+	val = t3_read_reg(adap, A_TP_PARA_REG3);
+	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
+
+	if (size) {
+		val |= F_RXCOALESCEENABLE;
+		if (psh)
+			val |= F_RXCOALESCEPSHEN;
+		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
+			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
+	}
+	t3_write_reg(adap, A_TP_PARA_REG3, val);
+	return 0;
+}
+
+/**
+ *	t3_tp_set_max_rxsize - set the max receive size
+ *	@adap: the adapter
+ *	@size: the max receive size
+ *
+ *	Set TP's max receive size.  This is the limit that applies when
+ *	receive coalescing is disabled.
+ */
+void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
+{
+	t3_write_reg(adap, A_TP_PARA_REG7,
+		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
+}
+
+static void __devinit init_mtus(unsigned short mtus[])
+{
+	/*
+	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
+	 * it can accomodate max size TCP/IP headers when SACK and timestamps
+	 * are enabled and still have at least 8 bytes of payload.
+	 */
+	mtus[0] = 88;
+	mtus[1] = 256;
+	mtus[2] = 512;
+	mtus[3] = 576;
+	mtus[4] = 808;
+	mtus[5] = 1024;
+	mtus[6] = 1280;
+	mtus[7] = 1492;
+	mtus[8] = 1500;
+	mtus[9] = 2002;
+	mtus[10] = 2048;
+	mtus[11] = 4096;
+	mtus[12] = 4352;
+	mtus[13] = 8192;
+	mtus[14] = 9000;
+	mtus[15] = 9600;
+}
+
+/*
+ * Initial congestion control parameters.
+ */
+static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+	a[9] = 2;
+	a[10] = 3;
+	a[11] = 4;
+	a[12] = 5;
+	a[13] = 6;
+	a[14] = 7;
+	a[15] = 8;
+	a[16] = 9;
+	a[17] = 10;
+	a[18] = 14;
+	a[19] = 17;
+	a[20] = 21;
+	a[21] = 25;
+	a[22] = 30;
+	a[23] = 35;
+	a[24] = 45;
+	a[25] = 60;
+	a[26] = 80;
+	a[27] = 100;
+	a[28] = 200;
+	a[29] = 300;
+	a[30] = 400;
+	a[31] = 500;
+
+	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+	b[9] = b[10] = 1;
+	b[11] = b[12] = 2;
+	b[13] = b[14] = b[15] = b[16] = 3;
+	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+	b[28] = b[29] = 6;
+	b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ *	t3_load_mtus - write the MTU and congestion control HW tables
+ *	@adap: the adapter
+ *	@mtus: the unrestricted values for the MTU table
+ *	@alphs: the values for the congestion control alpha parameter
+ *	@beta: the values for the congestion control beta parameter
+ *	@mtu_cap: the maximum permitted effective MTU
+ *
+ *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
+ *	Update the high-speed congestion control table with the supplied alpha,
+ * 	beta, and MTUs.
+ */
+void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
+		  unsigned short alpha[NCCTRL_WIN],
+		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
+{
+	static const unsigned int avg_pkts[NCCTRL_WIN] = {
+		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+		28672, 40960, 57344, 81920, 114688, 163840, 229376
+	};
+
+	unsigned int i, w;
+
+	for (i = 0; i < NMTUS; ++i) {
+		unsigned int mtu = min(mtus[i], mtu_cap);
+		unsigned int log2 = fls(mtu);
+
+		if (!(mtu & ((1 << log2) >> 2)))	/* round */
+			log2--;
+		t3_write_reg(adap, A_TP_MTU_TABLE,
+			     (i << 24) | (log2 << 16) | mtu);
+
+		for (w = 0; w < NCCTRL_WIN; ++w) {
+			unsigned int inc;
+
+			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+				  CC_MIN_INCR);
+
+			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+				     (w << 16) | (beta[w] << 13) | inc);
+		}
+	}
+}
+
+/**
+ *	t3_read_hw_mtus - returns the values in the HW MTU table
+ *	@adap: the adapter
+ *	@mtus: where to store the HW MTU values
+ *
+ *	Reads the HW MTU table.
+ */
+void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
+{
+	int i;
+
+	for (i = 0; i < NMTUS; ++i) {
+		unsigned int val;
+
+		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
+		val = t3_read_reg(adap, A_TP_MTU_TABLE);
+		mtus[i] = val & 0x3fff;
+	}
+}
+
+/**
+ *	t3_get_cong_cntl_tab - reads the congestion control table
+ *	@adap: the adapter
+ *	@incr: where to store the alpha values
+ *
+ *	Reads the additive increments programmed into the HW congestion
+ *	control table.
+ */
+void t3_get_cong_cntl_tab(struct adapter *adap,
+			  unsigned short incr[NMTUS][NCCTRL_WIN])
+{
+	unsigned int mtu, w;
+
+	for (mtu = 0; mtu < NMTUS; ++mtu)
+		for (w = 0; w < NCCTRL_WIN; ++w) {
+			t3_write_reg(adap, A_TP_CCTRL_TABLE,
+				     0xffff0000 | (mtu << 5) | w);
+			incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
+				       0x1fff;
+		}
+}
+
+/**
+ *	t3_tp_get_mib_stats - read TP's MIB counters
+ *	@adap: the adapter
+ *	@tps: holds the returned counter values
+ *
+ *	Returns the values of TP's MIB counters.
+ */
+void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
+{
+	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
+			 sizeof(*tps) / sizeof(u32), 0);
+}
+
+#define ulp_region(adap, name, start, len) \
+	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
+	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
+		     (start) + (len) - 1); \
+	start += len
+
+#define ulptx_region(adap, name, start, len) \
+	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
+	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
+		     (start) + (len) - 1)
+
+static void ulp_config(struct adapter *adap, const struct tp_params *p)
+{
+	unsigned int m = p->chan_rx_size;
+
+	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
+	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
+	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
+	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
+	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
+	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
+	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
+	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
+}
+
+void t3_config_trace_filter(struct adapter *adapter,
+			    const struct trace_params *tp, int filter_index,
+			    int invert, int enable)
+{
+	u32 addr, key[4], mask[4];
+
+	key[0] = tp->sport | (tp->sip << 16);
+	key[1] = (tp->sip >> 16) | (tp->dport << 16);
+	key[2] = tp->dip;
+	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
+
+	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
+	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
+	mask[2] = tp->dip_mask;
+	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
+
+	if (invert)
+		key[3] |= (1 << 29);
+	if (enable)
+		key[3] |= (1 << 28);
+
+	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
+	tp_wr_indirect(adapter, addr++, key[0]);
+	tp_wr_indirect(adapter, addr++, mask[0]);
+	tp_wr_indirect(adapter, addr++, key[1]);
+	tp_wr_indirect(adapter, addr++, mask[1]);
+	tp_wr_indirect(adapter, addr++, key[2]);
+	tp_wr_indirect(adapter, addr++, mask[2]);
+	tp_wr_indirect(adapter, addr++, key[3]);
+	tp_wr_indirect(adapter, addr, mask[3]);
+	t3_read_reg(adapter, A_TP_PIO_DATA);
+}
+
+/**
+ *	t3_config_sched - configure a HW traffic scheduler
+ *	@adap: the adapter
+ *	@kbps: target rate in Kbps
+ *	@sched: the scheduler index
+ *
+ *	Configure a HW scheduler for the target rate
+ */
+int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
+{
+	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
+	unsigned int clk = adap->params.vpd.cclk * 1000;
+	unsigned int selected_cpt = 0, selected_bpt = 0;
+
+	if (kbps > 0) {
+		kbps *= 125;	/* -> bytes */
+		for (cpt = 1; cpt <= 255; cpt++) {
+			tps = clk / cpt;
+			bpt = (kbps + tps / 2) / tps;
+			if (bpt > 0 && bpt <= 255) {
+				v = bpt * tps;
+				delta = v >= kbps ? v - kbps : kbps - v;
+				if (delta <= mindelta) {
+					mindelta = delta;
+					selected_cpt = cpt;
+					selected_bpt = bpt;
+				}
+			} else if (selected_cpt)
+				break;
+		}
+		if (!selected_cpt)
+			return -EINVAL;
+	}
+	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
+		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
+	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+	if (sched & 1)
+		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
+	else
+		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
+	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
+	return 0;
+}
+
+static int tp_init(struct adapter *adap, const struct tp_params *p)
+{
+	int busy = 0;
+
+	tp_config(adap, p);
+	t3_set_vlan_accel(adap, 3, 0);
+
+	if (is_offload(adap)) {
+		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
+		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
+		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
+				       0, 1000, 5);
+		if (busy)
+			CH_ERR(adap, "TP initialization timed out\n");
+	}
+
+	if (!busy)
+		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
+	return busy;
+}
+
+int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
+{
+	if (port_mask & ~((1 << adap->params.nports) - 1))
+		return -EINVAL;
+	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
+			 port_mask << S_PORT0ACTIVE);
+	return 0;
+}
+
+/*
+ * Perform the bits of HW initialization that are dependent on the number
+ * of available ports.
+ */
+static void init_hw_for_avail_ports(struct adapter *adap, int nports)
+{
+	int i;
+
+	if (nports == 1) {
+		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
+		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
+		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
+			     F_PORT0ACTIVE | F_ENFORCEPKT);
+		t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
+	} else {
+		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
+		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
+		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
+			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
+		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
+			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
+			     F_ENFORCEPKT);
+		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
+		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
+		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
+		for (i = 0; i < 16; i++)
+			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
+				     (i << 16) | 0x1010);
+	}
+}
+
+static int calibrate_xgm(struct adapter *adapter)
+{
+	if (uses_xaui(adapter)) {
+		unsigned int v, i;
+
+		for (i = 0; i < 5; ++i) {
+			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
+			t3_read_reg(adapter, A_XGM_XAUI_IMP);
+			msleep(1);
+			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
+			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
+				t3_write_reg(adapter, A_XGM_XAUI_IMP,
+					     V_XAUIIMP(G_CALIMP(v) >> 2));
+				return 0;
+			}
+		}
+		CH_ERR(adapter, "MAC calibration failed\n");
+		return -1;
+	} else {
+		t3_write_reg(adapter, A_XGM_RGMII_IMP,
+			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+				 F_XGM_IMPSETUPDATE);
+	}
+	return 0;
+}
+
+static void calibrate_xgm_t3b(struct adapter *adapter)
+{
+	if (!uses_xaui(adapter)) {
+		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
+			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
+				 F_XGM_IMPSETUPDATE);
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+				 0);
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
+	}
+}
+
+struct mc7_timing_params {
+	unsigned char ActToPreDly;
+	unsigned char ActToRdWrDly;
+	unsigned char PreCyc;
+	unsigned char RefCyc[5];
+	unsigned char BkCyc;
+	unsigned char WrToRdDly;
+	unsigned char RdToWrDly;
+};
+
+/*
+ * Write a value to a register and check that the write completed.  These
+ * writes normally complete in a cycle or two, so one read should suffice.
+ * The very first read exists to flush the posted write to the device.
+ */
+static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
+{
+	t3_write_reg(adapter, addr, val);
+	t3_read_reg(adapter, addr);	/* flush */
+	if (!(t3_read_reg(adapter, addr) & F_BUSY))
+		return 0;
+	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
+	return -EIO;
+}
+
+static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
+{
+	static const unsigned int mc7_mode[] = {
+		0x632, 0x642, 0x652, 0x432, 0x442
+	};
+	static const struct mc7_timing_params mc7_timings[] = {
+		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
+		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
+		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
+		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
+		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
+	};
+
+	u32 val;
+	unsigned int width, density, slow, attempts;
+	struct adapter *adapter = mc7->adapter;
+	const struct mc7_timing_params *p = &mc7_timings[mem_type];
+
+	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+	slow = val & F_SLOW;
+	width = G_WIDTH(val);
+	density = G_DEN(val);
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
+	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
+	msleep(1);
+
+	if (!slow) {
+		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
+		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
+		msleep(1);
+		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
+		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
+			CH_ERR(adapter, "%s MC7 calibration timed out\n",
+			       mc7->name);
+			goto out_fail;
+		}
+	}
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
+		     V_ACTTOPREDLY(p->ActToPreDly) |
+		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
+		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
+		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
+		     val | F_CLKEN | F_TERM150);
+	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
+
+	if (!slow)
+		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
+				 F_DLLENB);
+	udelay(1);
+
+	val = slow ? 3 : 6;
+	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+		goto out_fail;
+
+	if (!slow) {
+		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
+		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
+		udelay(5);
+	}
+
+	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
+		       mc7_mode[mem_type]) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+		goto out_fail;
+
+	/* clock value is in KHz */
+	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
+	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
+		     F_PERREFEN | V_PREREFDIV(mc7_clock));
+	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
+	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
+	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
+	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
+		     (mc7->size << width) - 1);
+	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
+	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
+
+	attempts = 50;
+	do {
+		msleep(250);
+		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
+	} while ((val & F_BUSY) && --attempts);
+	if (val & F_BUSY) {
+		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
+		goto out_fail;
+	}
+
+	/* Enable normal memory accesses. */
+	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
+	return 0;
+
+out_fail:
+	return -1;
+}
+
+static void config_pcie(struct adapter *adap)
+{
+	static const u16 ack_lat[4][6] = {
+		{237, 416, 559, 1071, 2095, 4143},
+		{128, 217, 289, 545, 1057, 2081},
+		{73, 118, 154, 282, 538, 1050},
+		{67, 107, 86, 150, 278, 534}
+	};
+	static const u16 rpl_tmr[4][6] = {
+		{711, 1248, 1677, 3213, 6285, 12429},
+		{384, 651, 867, 1635, 3171, 6243},
+		{219, 354, 462, 846, 1614, 3150},
+		{201, 321, 258, 450, 834, 1602}
+	};
+
+	u16 val;
+	unsigned int log2_width, pldsize;
+	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
+
+	pci_read_config_word(adap->pdev,
+			     adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
+			     &val);
+	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
+	pci_read_config_word(adap->pdev,
+			     adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
+			     &val);
+
+	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
+	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
+	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
+	log2_width = fls(adap->params.pci.width) - 1;
+	acklat = ack_lat[log2_width][pldsize];
+	if (val & 1)		/* check LOsEnable */
+		acklat += fst_trn_tx * 4;
+	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
+
+	if (adap->params.rev == 0)
+		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
+				 V_T3A_ACKLAT(M_T3A_ACKLAT),
+				 V_T3A_ACKLAT(acklat));
+	else
+		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
+				 V_ACKLAT(acklat));
+
+	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
+			 V_REPLAYLMT(rpllmt));
+
+	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
+	t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
+}
+
+/*
+ * Initialize and configure T3 HW modules.  This performs the
+ * initialization steps that need to be done once after a card is reset.
+ * MAC and PHY initialization is handled separarely whenever a port is enabled.
+ *
+ * fw_params are passed to FW and their value is platform dependent.  Only the
+ * top 8 bits are available for use, the rest must be 0.
+ */
+int t3_init_hw(struct adapter *adapter, u32 fw_params)
+{
+	int err = -EIO, attempts = 100;
+	const struct vpd_params *vpd = &adapter->params.vpd;
+
+	if (adapter->params.rev > 0)
+		calibrate_xgm_t3b(adapter);
+	else if (calibrate_xgm(adapter))
+		goto out_err;
+
+	if (vpd->mclk) {
+		partition_mem(adapter, &adapter->params.tp);
+
+		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
+		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
+		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
+		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
+				adapter->params.mc5.nfilters,
+				adapter->params.mc5.nroutes))
+			goto out_err;
+	}
+
+	if (tp_init(adapter, &adapter->params.tp))
+		goto out_err;
+
+	t3_tp_set_coalescing_size(adapter,
+				  min(adapter->params.sge.max_pkt_size,
+				      MAX_RX_COALESCING_LEN), 1);
+	t3_tp_set_max_rxsize(adapter,
+			     min(adapter->params.sge.max_pkt_size, 16384U));
+	ulp_config(adapter, &adapter->params.tp);
+
+	if (is_pcie(adapter))
+		config_pcie(adapter);
+	else
+		t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
+
+	t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
+	init_hw_for_avail_ports(adapter, adapter->params.nports);
+	t3_sge_init(adapter, &adapter->params.sge);
+
+	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
+	t3_write_reg(adapter, A_CIM_BOOT_CFG,
+		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
+	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
+
+	do {			/* wait for uP to initialize */
+		msleep(20);
+	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
+	if (!attempts)
+		goto out_err;
+
+	err = 0;
+out_err:
+	return err;
+}
+
+/**
+ *	get_pci_mode - determine a card's PCI mode
+ *	@adapter: the adapter
+ *	@p: where to store the PCI settings
+ *
+ *	Determines a card's PCI mode and associated parameters, such as speed
+ *	and width.
+ */
+static void __devinit get_pci_mode(struct adapter *adapter,
+				   struct pci_params *p)
+{
+	static unsigned short speed_map[] = { 33, 66, 100, 133 };
+	u32 pci_mode, pcie_cap;
+
+	pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+	if (pcie_cap) {
+		u16 val;
+
+		p->variant = PCI_VARIANT_PCIE;
+		p->pcie_cap_addr = pcie_cap;
+		pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
+					&val);
+		p->width = (val >> 4) & 0x3f;
+		return;
+	}
+
+	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
+	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
+	p->width = (pci_mode & F_64BIT) ? 64 : 32;
+	pci_mode = G_PCIXINITPAT(pci_mode);
+	if (pci_mode == 0)
+		p->variant = PCI_VARIANT_PCI;
+	else if (pci_mode < 4)
+		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
+	else if (pci_mode < 8)
+		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
+	else
+		p->variant = PCI_VARIANT_PCIX_266_MODE2;
+}
+
+/**
+ *	init_link_config - initialize a link's SW state
+ *	@lc: structure holding the link state
+ *	@ai: information about the current card
+ *
+ *	Initializes the SW state maintained for each link, including the link's
+ *	capabilities and default speed/duplex/flow-control/autonegotiation
+ *	settings.
+ */
+static void __devinit init_link_config(struct link_config *lc,
+				       unsigned int caps)
+{
+	lc->supported = caps;
+	lc->requested_speed = lc->speed = SPEED_INVALID;
+	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising = lc->supported;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+/**
+ *	mc7_calc_size - calculate MC7 memory size
+ *	@cfg: the MC7 configuration
+ *
+ *	Calculates the size of an MC7 memory in bytes from the value of its
+ *	configuration register.
+ */
+static unsigned int __devinit mc7_calc_size(u32 cfg)
+{
+	unsigned int width = G_WIDTH(cfg);
+	unsigned int banks = !!(cfg & F_BKS) + 1;
+	unsigned int org = !!(cfg & F_ORG) + 1;
+	unsigned int density = G_DEN(cfg);
+	unsigned int MBs = ((256 << density) * banks) / (org << width);
+
+	return MBs << 20;
+}
+
+static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
+			       unsigned int base_addr, const char *name)
+{
+	u32 cfg;
+
+	mc7->adapter = adapter;
+	mc7->name = name;
+	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
+	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+	mc7->size = mc7_calc_size(cfg);
+	mc7->width = G_WIDTH(cfg);
+}
+
+void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
+{
+	mac->adapter = adapter;
+	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
+	mac->nucast = 1;
+
+	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
+		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
+			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
+		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
+				 F_ENRGMII, 0);
+	}
+}
+
+void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
+{
+	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
+
+	mi1_init(adapter, ai);
+	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
+		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
+	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
+		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
+
+	if (adapter->params.rev == 0 || !uses_xaui(adapter))
+		val |= F_ENRGMII;
+
+	/* Enable MAC clocks so we can access the registers */
+	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+	t3_read_reg(adapter, A_XGM_PORT_CFG);
+
+	val |= F_CLKDIVRESET_;
+	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+	t3_read_reg(adapter, A_XGM_PORT_CFG);
+	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
+	t3_read_reg(adapter, A_XGM_PORT_CFG);
+}
+
+/*
+ * Reset the adapter.  PCIe cards lose their config space during reset, PCI-X
+ * ones don't.
+ */
+int t3_reset_adapter(struct adapter *adapter)
+{
+	int i;
+	uint16_t devid = 0;
+
+	if (is_pcie(adapter))
+		pci_save_state(adapter->pdev);
+	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
+
+	/*
+	 * Delay. Give Some time to device to reset fully.
+	 * XXX The delay time should be modified.
+	 */
+	for (i = 0; i < 10; i++) {
+		msleep(50);
+		pci_read_config_word(adapter->pdev, 0x00, &devid);
+		if (devid == 0x1425)
+			break;
+	}
+
+	if (devid != 0x1425)
+		return -1;
+
+	if (is_pcie(adapter))
+		pci_restore_state(adapter->pdev);
+	return 0;
+}
+
+/*
+ * Initialize adapter SW state for the various HW modules, set initial values
+ * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
+ * interface.
+ */
+int __devinit t3_prep_adapter(struct adapter *adapter,
+			      const struct adapter_info *ai, int reset)
+{
+	int ret;
+	unsigned int i, j = 0;
+
+	get_pci_mode(adapter, &adapter->params.pci);
+
+	adapter->params.info = ai;
+	adapter->params.nports = ai->nports;
+	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
+	adapter->params.linkpoll_period = 0;
+	adapter->params.stats_update_period = is_10G(adapter) ?
+	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
+	adapter->params.pci.vpd_cap_addr =
+	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+	ret = get_vpd_params(adapter, &adapter->params.vpd);
+	if (ret < 0)
+		return ret;
+
+	if (reset && t3_reset_adapter(adapter))
+		return -1;
+
+	t3_sge_prep(adapter, &adapter->params.sge);
+
+	if (adapter->params.vpd.mclk) {
+		struct tp_params *p = &adapter->params.tp;
+
+		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
+		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
+		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
+
+		p->nchan = ai->nports;
+		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
+		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
+		p->cm_size = t3_mc7_size(&adapter->cm);
+		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
+		p->chan_tx_size = p->pmtx_size / p->nchan;
+		p->rx_pg_size = 64 * 1024;
+		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
+		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
+		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
+		p->ntimer_qs = p->cm_size >= (128 << 20) ||
+		    adapter->params.rev > 0 ? 12 : 6;
+
+		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
+		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
+		    DEFAULT_NFILTERS : 0;
+		adapter->params.mc5.nroutes = 0;
+		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
+
+		init_mtus(adapter->params.mtus);
+		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+	}
+
+	early_hw_init(adapter, ai);
+
+	for_each_port(adapter, i) {
+		u8 hw_addr[6];
+		struct port_info *p = adap2pinfo(adapter, i);
+
+		while (!adapter->params.vpd.port_type[j])
+			++j;
+
+		p->port_type = &port_types[adapter->params.vpd.port_type[j]];
+		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+				       ai->mdio_ops);
+		mac_prep(&p->mac, adapter, j);
+		++j;
+
+		/*
+		 * The VPD EEPROM stores the base Ethernet address for the
+		 * card.  A port's address is derived from the base by adding
+		 * the port's index to the base's low octet.
+		 */
+		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
+		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
+
+		memcpy(adapter->port[i]->dev_addr, hw_addr,
+		       ETH_ALEN);
+		memcpy(adapter->port[i]->perm_addr, hw_addr,
+		       ETH_ALEN);
+		init_link_config(&p->link_config, p->port_type->caps);
+		p->phy.ops->power_down(&p->phy, 1);
+		if (!(p->port_type->caps & SUPPORTED_IRQ))
+			adapter->params.linkpoll_period = 10;
+	}
+
+	return 0;
+}
+
+void t3_led_ready(struct adapter *adapter)
+{
+	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+			 F_GPIO0_OUT_VAL);
+}

+ 73 - 0
drivers/net/cxgb3/t3cdev.h

@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2006-2007 Chelsio Communications.  All rights reserved.
+ * Copyright (C) 2006-2007 Open Grid Computing, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _T3CDEV_H_
+#define _T3CDEV_H_
+
+#include <linux/list.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <net/neighbour.h>
+
+#define T3CNAMSIZ 16
+
+/* Get the t3cdev associated with a net_device */
+#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv)
+
+struct cxgb3_client;
+
+enum t3ctype {
+	T3A = 0,
+	T3B
+};
+
+struct t3cdev {
+	char name[T3CNAMSIZ];	/* T3C device name */
+	enum t3ctype type;
+	struct list_head ofld_dev_list;	/* for list linking */
+	struct net_device *lldev;	/* LL dev associated with T3C messages */
+	struct proc_dir_entry *proc_dir;	/* root of proc dir for this T3C */
+	int (*send)(struct t3cdev *dev, struct sk_buff *skb);
+	int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
+	int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
+	void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
+	void *priv;		/* driver private data */
+	void *l2opt;		/* optional layer 2 data */
+	void *l3opt;		/* optional layer 3 data */
+	void *l4opt;		/* optional layer 4 data */
+	void *ulp;		/* ulp stuff */
+};
+
+#endif				/* _T3CDEV_H_ */

+ 39 - 0
drivers/net/cxgb3/version.h

@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
+#ifndef __CHELSIO_VERSION_H
+#define __CHELSIO_VERSION_H
+#define DRV_DESC "Chelsio T3 Network Driver"
+#define DRV_NAME "cxgb3"
+/* Driver version */
+#define DRV_VERSION "1.0"
+#endif				/* __CHELSIO_VERSION_H */

+ 228 - 0
drivers/net/cxgb3/vsc8211.c

@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+
+/* VSC8211 PHY specific registers. */
+enum {
+	VSC8211_INTR_ENABLE = 25,
+	VSC8211_INTR_STATUS = 26,
+	VSC8211_AUX_CTRL_STAT = 28,
+};
+
+enum {
+	VSC_INTR_RX_ERR = 1 << 0,
+	VSC_INTR_MS_ERR = 1 << 1,	/* master/slave resolution error */
+	VSC_INTR_CABLE = 1 << 2,	/* cable impairment */
+	VSC_INTR_FALSE_CARR = 1 << 3,	/* false carrier */
+	VSC_INTR_MEDIA_CHG = 1 << 4,	/* AMS media change */
+	VSC_INTR_RX_FIFO = 1 << 5,	/* Rx FIFO over/underflow */
+	VSC_INTR_TX_FIFO = 1 << 6,	/* Tx FIFO over/underflow */
+	VSC_INTR_DESCRAMBL = 1 << 7,	/* descrambler lock-lost */
+	VSC_INTR_SYMBOL_ERR = 1 << 8,	/* symbol error */
+	VSC_INTR_NEG_DONE = 1 << 10,	/* autoneg done */
+	VSC_INTR_NEG_ERR = 1 << 11,	/* autoneg error */
+	VSC_INTR_LINK_CHG = 1 << 13,	/* link change */
+	VSC_INTR_ENABLE = 1 << 15,	/* interrupt enable */
+};
+
+#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
+	 		   VSC_INTR_NEG_DONE)
+#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
+		   VSC_INTR_ENABLE)
+
+/* PHY specific auxiliary control & status register fields */
+#define S_ACSR_ACTIPHY_TMR    0
+#define M_ACSR_ACTIPHY_TMR    0x3
+#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
+
+#define S_ACSR_SPEED    3
+#define M_ACSR_SPEED    0x3
+#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
+
+#define S_ACSR_DUPLEX 5
+#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
+
+#define S_ACSR_ACTIPHY 6
+#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
+
+/*
+ * Reset the PHY.  This PHY completes reset immediately so we never wait.
+ */
+static int vsc8211_reset(struct cphy *cphy, int wait)
+{
+	return t3_phy_reset(cphy, 0, 0);
+}
+
+static int vsc8211_intr_enable(struct cphy *cphy)
+{
+	return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
+}
+
+static int vsc8211_intr_disable(struct cphy *cphy)
+{
+	return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
+}
+
+static int vsc8211_intr_clear(struct cphy *cphy)
+{
+	u32 val;
+
+	/* Clear PHY interrupts by reading the register. */
+	return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
+}
+
+static int vsc8211_autoneg_enable(struct cphy *cphy)
+{
+	return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+				   BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int vsc8211_autoneg_restart(struct cphy *cphy)
+{
+	return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+				   BMCR_ANRESTART);
+}
+
+static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
+				   int *speed, int *duplex, int *fc)
+{
+	unsigned int bmcr, status, lpa, adv;
+	int err, sp = -1, dplx = -1, pause = 0;
+
+	err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
+	if (!err)
+		err = mdio_read(cphy, 0, MII_BMSR, &status);
+	if (err)
+		return err;
+
+	if (link_ok) {
+		/*
+		 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+		 * once more to get the current link state.
+		 */
+		if (!(status & BMSR_LSTATUS))
+			err = mdio_read(cphy, 0, MII_BMSR, &status);
+		if (err)
+			return err;
+		*link_ok = (status & BMSR_LSTATUS) != 0;
+	}
+	if (!(bmcr & BMCR_ANENABLE)) {
+		dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+		if (bmcr & BMCR_SPEED1000)
+			sp = SPEED_1000;
+		else if (bmcr & BMCR_SPEED100)
+			sp = SPEED_100;
+		else
+			sp = SPEED_10;
+	} else if (status & BMSR_ANEGCOMPLETE) {
+		err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
+		if (err)
+			return err;
+
+		dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+		sp = G_ACSR_SPEED(status);
+		if (sp == 0)
+			sp = SPEED_10;
+		else if (sp == 1)
+			sp = SPEED_100;
+		else
+			sp = SPEED_1000;
+
+		if (fc && dplx == DUPLEX_FULL) {
+			err = mdio_read(cphy, 0, MII_LPA, &lpa);
+			if (!err)
+				err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
+			if (err)
+				return err;
+
+			if (lpa & adv & ADVERTISE_PAUSE_CAP)
+				pause = PAUSE_RX | PAUSE_TX;
+			else if ((lpa & ADVERTISE_PAUSE_CAP) &&
+				 (lpa & ADVERTISE_PAUSE_ASYM) &&
+				 (adv & ADVERTISE_PAUSE_ASYM))
+				pause = PAUSE_TX;
+			else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
+				 (adv & ADVERTISE_PAUSE_CAP))
+				pause = PAUSE_RX;
+		}
+	}
+	if (speed)
+		*speed = sp;
+	if (duplex)
+		*duplex = dplx;
+	if (fc)
+		*fc = pause;
+	return 0;
+}
+
+static int vsc8211_power_down(struct cphy *cphy, int enable)
+{
+	return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
+				   enable ? BMCR_PDOWN : 0);
+}
+
+static int vsc8211_intr_handler(struct cphy *cphy)
+{
+	unsigned int cause;
+	int err, cphy_cause = 0;
+
+	err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
+	if (err)
+		return err;
+
+	cause &= INTR_MASK;
+	if (cause & CFG_CHG_INTR_MASK)
+		cphy_cause |= cphy_cause_link_change;
+	if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
+		cphy_cause |= cphy_cause_fifo_error;
+	return cphy_cause;
+}
+
+static struct cphy_ops vsc8211_ops = {
+	.reset = vsc8211_reset,
+	.intr_enable = vsc8211_intr_enable,
+	.intr_disable = vsc8211_intr_disable,
+	.intr_clear = vsc8211_intr_clear,
+	.intr_handler = vsc8211_intr_handler,
+	.autoneg_enable = vsc8211_autoneg_enable,
+	.autoneg_restart = vsc8211_autoneg_restart,
+	.advertise = t3_phy_advertise,
+	.set_speed_duplex = t3_set_phy_speed_duplex,
+	.get_link_status = vsc8211_get_link_status,
+	.power_down = vsc8211_power_down,
+};
+
+void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+			 int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
+}

+ 409 - 0
drivers/net/cxgb3/xgmac.c

@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+/*
+ * # of exact address filters.  The first one is used for the station address,
+ * the rest are available for multicast addresses.
+ */
+#define EXACT_ADDR_FILTERS 8
+
+static inline int macidx(const struct cmac *mac)
+{
+	return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
+}
+
+static void xaui_serdes_reset(struct cmac *mac)
+{
+	static const unsigned int clear[] = {
+		F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
+		F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
+	};
+
+	int i;
+	struct adapter *adap = mac->adapter;
+	u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
+
+	t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
+		     F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
+		     F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
+		     F_RESETPLL23 | F_RESETPLL01);
+	t3_read_reg(adap, ctrl);
+	udelay(15);
+
+	for (i = 0; i < ARRAY_SIZE(clear); i++) {
+		t3_set_reg_field(adap, ctrl, clear[i], 0);
+		udelay(15);
+	}
+}
+
+void t3b_pcs_reset(struct cmac *mac)
+{
+	t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
+			 F_PCS_RESET_, 0);
+	udelay(20);
+	t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
+			 F_PCS_RESET_);
+}
+
+int t3_mac_reset(struct cmac *mac)
+{
+	static const struct addr_val_pair mac_reset_avp[] = {
+		{A_XGM_TX_CTRL, 0},
+		{A_XGM_RX_CTRL, 0},
+		{A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+		 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
+		{A_XGM_RX_HASH_LOW, 0},
+		{A_XGM_RX_HASH_HIGH, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_1, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_2, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_3, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_4, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_5, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_6, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_7, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_8, 0},
+		{A_XGM_STAT_CTRL, F_CLRSTATS}
+	};
+	u32 val;
+	struct adapter *adap = mac->adapter;
+	unsigned int oft = mac->offset;
+
+	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
+	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);	/* flush */
+
+	t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
+	t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
+			 F_RXSTRFRWRD | F_DISERRFRAMES,
+			 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
+
+	if (uses_xaui(adap)) {
+		if (adap->params.rev == 0) {
+			t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+					 F_RXENABLE | F_TXENABLE);
+			if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
+					    F_CMULOCK, 1, 5, 2)) {
+				CH_ERR(adap,
+				       "MAC %d XAUI SERDES CMU lock failed\n",
+				       macidx(mac));
+				return -1;
+			}
+			t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+					 F_SERDESRESET_);
+		} else
+			xaui_serdes_reset(mac);
+	}
+
+	if (adap->params.rev > 0)
+		t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
+
+	val = F_MAC_RESET_;
+	if (is_10G(adap))
+		val |= F_PCS_RESET_;
+	else if (uses_xaui(adap))
+		val |= F_PCS_RESET_ | F_XG2G_RESET_;
+	else
+		val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
+	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);	/* flush */
+	if ((val & F_PCS_RESET_) && adap->params.rev) {
+		msleep(1);
+		t3b_pcs_reset(mac);
+	}
+
+	memset(&mac->stats, 0, sizeof(mac->stats));
+	return 0;
+}
+
+/*
+ * Set the exact match register 'idx' to recognize the given Ethernet address.
+ */
+static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
+{
+	u32 addr_lo, addr_hi;
+	unsigned int oft = mac->offset + idx * 8;
+
+	addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+	addr_hi = (addr[5] << 8) | addr[4];
+
+	t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
+	t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
+}
+
+/* Set one of the station's unicast MAC addresses. */
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
+{
+	if (idx >= mac->nucast)
+		return -EINVAL;
+	set_addr_filter(mac, idx, addr);
+	return 0;
+}
+
+/*
+ * Specify the number of exact address filters that should be reserved for
+ * unicast addresses.  Caller should reload the unicast and multicast addresses
+ * after calling this.
+ */
+int t3_mac_set_num_ucast(struct cmac *mac, int n)
+{
+	if (n > EXACT_ADDR_FILTERS)
+		return -EINVAL;
+	mac->nucast = n;
+	return 0;
+}
+
+/* Calculate the RX hash filter index of an Ethernet address */
+static int hash_hw_addr(const u8 * addr)
+{
+	int hash = 0, octet, bit, i = 0, c;
+
+	for (octet = 0; octet < 6; ++octet)
+		for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
+			hash ^= (c & 1) << i;
+			if (++i == 6)
+				i = 0;
+		}
+	return hash;
+}
+
+int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
+{
+	u32 val, hash_lo, hash_hi;
+	struct adapter *adap = mac->adapter;
+	unsigned int oft = mac->offset;
+
+	val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
+	if (rm->dev->flags & IFF_PROMISC)
+		val |= F_COPYALLFRAMES;
+	t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
+
+	if (rm->dev->flags & IFF_ALLMULTI)
+		hash_lo = hash_hi = 0xffffffff;
+	else {
+		u8 *addr;
+		int exact_addr_idx = mac->nucast;
+
+		hash_lo = hash_hi = 0;
+		while ((addr = t3_get_next_mcaddr(rm)))
+			if (exact_addr_idx < EXACT_ADDR_FILTERS)
+				set_addr_filter(mac, exact_addr_idx++, addr);
+			else {
+				int hash = hash_hw_addr(addr);
+
+				if (hash < 32)
+					hash_lo |= (1 << hash);
+				else
+					hash_hi |= (1 << (hash - 32));
+			}
+	}
+
+	t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
+	t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
+	return 0;
+}
+
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
+{
+	int hwm, lwm;
+	unsigned int thres, v;
+	struct adapter *adap = mac->adapter;
+
+	/*
+	 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't.  The HW max
+	 * packet size register includes header, but not FCS.
+	 */
+	mtu += 14;
+	if (mtu > MAX_FRAME_SIZE - 4)
+		return -EINVAL;
+	t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
+
+	/*
+	 * Adjust the PAUSE frame watermarks.  We always set the LWM, and the
+	 * HWM only if flow-control is enabled.
+	 */
+	hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
+	hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
+	lwm = hwm - 1024;
+	v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
+	v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
+	v |= V_RXFIFOPAUSELWM(lwm / 8);
+	if (G_RXFIFOPAUSEHWM(v))
+		v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
+		    V_RXFIFOPAUSEHWM(hwm / 8);
+	t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
+
+	/* Adjust the TX FIFO threshold based on the MTU */
+	thres = (adap->params.vpd.cclk * 1000) / 15625;
+	thres = (thres * mtu) / 1000;
+	if (is_10G(adap))
+		thres /= 10;
+	thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
+	thres = max(thres, 8U);	/* need at least 8 */
+	t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
+			 V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
+	return 0;
+}
+
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
+{
+	u32 val;
+	struct adapter *adap = mac->adapter;
+	unsigned int oft = mac->offset;
+
+	if (duplex >= 0 && duplex != DUPLEX_FULL)
+		return -EINVAL;
+	if (speed >= 0) {
+		if (speed == SPEED_10)
+			val = V_PORTSPEED(0);
+		else if (speed == SPEED_100)
+			val = V_PORTSPEED(1);
+		else if (speed == SPEED_1000)
+			val = V_PORTSPEED(2);
+		else if (speed == SPEED_10000)
+			val = V_PORTSPEED(3);
+		else
+			return -EINVAL;
+
+		t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
+				 V_PORTSPEED(M_PORTSPEED), val);
+	}
+
+	val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
+	val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
+	if (fc & PAUSE_TX)
+		val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128);	/* +1KB */
+	t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
+
+	t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
+			 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
+	return 0;
+}
+
+int t3_mac_enable(struct cmac *mac, int which)
+{
+	int idx = macidx(mac);
+	struct adapter *adap = mac->adapter;
+	unsigned int oft = mac->offset;
+
+	if (which & MAC_DIRECTION_TX) {
+		t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
+		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+		t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
+		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
+		t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
+	}
+	if (which & MAC_DIRECTION_RX)
+		t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
+	return 0;
+}
+
+int t3_mac_disable(struct cmac *mac, int which)
+{
+	int idx = macidx(mac);
+	struct adapter *adap = mac->adapter;
+
+	if (which & MAC_DIRECTION_TX) {
+		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
+		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+		t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
+		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
+		t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
+	}
+	if (which & MAC_DIRECTION_RX)
+		t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
+	return 0;
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics.  Since the packet counters are only
+ * 32 bits they can overflow in ~286 secs at 10G, so the function should be
+ * called more frequently than that.  The byte counters are 45-bit wide, they
+ * would overflow in ~7.8 hours.
+ */
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
+{
+#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
+#define RMON_UPDATE(mac, name, reg) \
+	(mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
+#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
+	(mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
+			     ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
+
+	u32 v, lo;
+
+	RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
+	RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
+	RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
+	RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
+	RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
+	RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
+	RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
+	RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
+	RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
+
+	RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
+	mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
+
+	RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
+
+	RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
+	RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
+	RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
+	RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
+	RMON_UPDATE(mac, tx_pause, TX_PAUSE);
+	/* This counts error frames in general (bad FCS, underrun, etc). */
+	RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
+
+	RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
+
+	/* The next stat isn't clear-on-read. */
+	t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
+	v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
+	lo = (u32) mac->stats.rx_cong_drops;
+	mac->stats.rx_cong_drops += (u64) (v - lo);
+
+	return &mac->stats;
+}

+ 117 - 47
drivers/net/declance.c

@@ -5,7 +5,7 @@
  *
  *
  *      adopted from sunlance.c by Richard van den Berg
  *      adopted from sunlance.c by Richard van den Berg
  *
  *
- *      Copyright (C) 2002, 2003, 2005  Maciej W. Rozycki
+ *      Copyright (C) 2002, 2003, 2005, 2006  Maciej W. Rozycki
  *
  *
  *      additional sources:
  *      additional sources:
  *      - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
  *      - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
@@ -44,6 +44,8 @@
  *      v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
  *      v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
  *              PMAX requirement to only use halfword accesses to the
  *              PMAX requirement to only use halfword accesses to the
  *              buffer. macro
  *              buffer. macro
+ *
+ *      v0.011: Converted the PMAD to the driver model. macro
  */
  */
 
 
 #include <linux/crc32.h>
 #include <linux/crc32.h>
@@ -58,6 +60,7 @@
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/stddef.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/string.h>
+#include <linux/tc.h>
 #include <linux/types.h>
 #include <linux/types.h>
 
 
 #include <asm/addrspace.h>
 #include <asm/addrspace.h>
@@ -69,15 +72,16 @@
 #include <asm/dec/kn01.h>
 #include <asm/dec/kn01.h>
 #include <asm/dec/machtype.h>
 #include <asm/dec/machtype.h>
 #include <asm/dec/system.h>
 #include <asm/dec/system.h>
-#include <asm/dec/tc.h>
 
 
 static char version[] __devinitdata =
 static char version[] __devinitdata =
-"declance.c: v0.010 by Linux MIPS DECstation task force\n";
+"declance.c: v0.011 by Linux MIPS DECstation task force\n";
 
 
 MODULE_AUTHOR("Linux MIPS DECstation task force");
 MODULE_AUTHOR("Linux MIPS DECstation task force");
 MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
 MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 
 
+#define __unused __attribute__ ((unused))
+
 /*
 /*
  * card types
  * card types
  */
  */
@@ -246,7 +250,6 @@ struct lance_init_block {
 struct lance_private {
 struct lance_private {
 	struct net_device *next;
 	struct net_device *next;
 	int type;
 	int type;
-	int slot;
 	int dma_irq;
 	int dma_irq;
 	volatile struct lance_regs *ll;
 	volatile struct lance_regs *ll;
 
 
@@ -288,6 +291,7 @@ struct lance_regs {
 
 
 int dec_lance_debug = 2;
 int dec_lance_debug = 2;
 
 
+static struct tc_driver dec_lance_tc_driver;
 static struct net_device *root_lance_dev;
 static struct net_device *root_lance_dev;
 
 
 static inline void writereg(volatile unsigned short *regptr, short value)
 static inline void writereg(volatile unsigned short *regptr, short value)
@@ -1023,7 +1027,7 @@ static void lance_set_multicast_retry(unsigned long _opaque)
 	lance_set_multicast(dev);
 	lance_set_multicast(dev);
 }
 }
 
 
-static int __init dec_lance_init(const int type, const int slot)
+static int __init dec_lance_probe(struct device *bdev, const int type)
 {
 {
 	static unsigned version_printed;
 	static unsigned version_printed;
 	static const char fmt[] = "declance%d";
 	static const char fmt[] = "declance%d";
@@ -1031,6 +1035,7 @@ static int __init dec_lance_init(const int type, const int slot)
 	struct net_device *dev;
 	struct net_device *dev;
 	struct lance_private *lp;
 	struct lance_private *lp;
 	volatile struct lance_regs *ll;
 	volatile struct lance_regs *ll;
+	resource_size_t start = 0, len = 0;
 	int i, ret;
 	int i, ret;
 	unsigned long esar_base;
 	unsigned long esar_base;
 	unsigned char *esar;
 	unsigned char *esar;
@@ -1038,14 +1043,18 @@ static int __init dec_lance_init(const int type, const int slot)
 	if (dec_lance_debug && version_printed++ == 0)
 	if (dec_lance_debug && version_printed++ == 0)
 		printk(version);
 		printk(version);
 
 
-	i = 0;
-	dev = root_lance_dev;
-	while (dev) {
-		i++;
-		lp = (struct lance_private *)dev->priv;
-		dev = lp->next;
+	if (bdev)
+		snprintf(name, sizeof(name), "%s", bdev->bus_id);
+	else {
+		i = 0;
+		dev = root_lance_dev;
+		while (dev) {
+			i++;
+			lp = (struct lance_private *)dev->priv;
+			dev = lp->next;
+		}
+		snprintf(name, sizeof(name), fmt, i);
 	}
 	}
-	snprintf(name, sizeof(name), fmt, i);
 
 
 	dev = alloc_etherdev(sizeof(struct lance_private));
 	dev = alloc_etherdev(sizeof(struct lance_private));
 	if (!dev) {
 	if (!dev) {
@@ -1063,7 +1072,6 @@ static int __init dec_lance_init(const int type, const int slot)
 	spin_lock_init(&lp->lock);
 	spin_lock_init(&lp->lock);
 
 
 	lp->type = type;
 	lp->type = type;
-	lp->slot = slot;
 	switch (type) {
 	switch (type) {
 	case ASIC_LANCE:
 	case ASIC_LANCE:
 		dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
 		dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
@@ -1110,12 +1118,22 @@ static int __init dec_lance_init(const int type, const int slot)
 		break;
 		break;
 #ifdef CONFIG_TC
 #ifdef CONFIG_TC
 	case PMAD_LANCE:
 	case PMAD_LANCE:
-		claim_tc_card(slot);
+		dev_set_drvdata(bdev, dev);
+
+		start = to_tc_dev(bdev)->resource.start;
+		len = to_tc_dev(bdev)->resource.end - start + 1;
+		if (!request_mem_region(start, len, bdev->bus_id)) {
+			printk(KERN_ERR
+			       "%s: Unable to reserve MMIO resource\n",
+			       bdev->bus_id);
+			ret = -EBUSY;
+			goto err_out_dev;
+		}
 
 
-		dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot));
+		dev->mem_start = CKSEG1ADDR(start);
 		dev->mem_end = dev->mem_start + 0x100000;
 		dev->mem_end = dev->mem_start + 0x100000;
 		dev->base_addr = dev->mem_start + 0x100000;
 		dev->base_addr = dev->mem_start + 0x100000;
-		dev->irq = get_tc_irq_nr(slot);
+		dev->irq = to_tc_dev(bdev)->interrupt;
 		esar_base = dev->mem_start + 0x1c0002;
 		esar_base = dev->mem_start + 0x1c0002;
 		lp->dma_irq = -1;
 		lp->dma_irq = -1;
 
 
@@ -1174,7 +1192,7 @@ static int __init dec_lance_init(const int type, const int slot)
 		printk(KERN_ERR "%s: declance_init called with unknown type\n",
 		printk(KERN_ERR "%s: declance_init called with unknown type\n",
 			name);
 			name);
 		ret = -ENODEV;
 		ret = -ENODEV;
-		goto err_out_free_dev;
+		goto err_out_dev;
 	}
 	}
 
 
 	ll = (struct lance_regs *) dev->base_addr;
 	ll = (struct lance_regs *) dev->base_addr;
@@ -1188,7 +1206,7 @@ static int __init dec_lance_init(const int type, const int slot)
 			"%s: Ethernet station address prom not found!\n",
 			"%s: Ethernet station address prom not found!\n",
 			name);
 			name);
 		ret = -ENODEV;
 		ret = -ENODEV;
-		goto err_out_free_dev;
+		goto err_out_resource;
 	}
 	}
 	/* Check the prom contents */
 	/* Check the prom contents */
 	for (i = 0; i < 8; i++) {
 	for (i = 0; i < 8; i++) {
@@ -1198,7 +1216,7 @@ static int __init dec_lance_init(const int type, const int slot)
 			printk(KERN_ERR "%s: Something is wrong with the "
 			printk(KERN_ERR "%s: Something is wrong with the "
 				"ethernet station address prom!\n", name);
 				"ethernet station address prom!\n", name);
 			ret = -ENODEV;
 			ret = -ENODEV;
-			goto err_out_free_dev;
+			goto err_out_resource;
 		}
 		}
 	}
 	}
 
 
@@ -1255,48 +1273,51 @@ static int __init dec_lance_init(const int type, const int slot)
 	if (ret) {
 	if (ret) {
 		printk(KERN_ERR
 		printk(KERN_ERR
 			"%s: Unable to register netdev, aborting.\n", name);
 			"%s: Unable to register netdev, aborting.\n", name);
-		goto err_out_free_dev;
+		goto err_out_resource;
 	}
 	}
 
 
-	lp->next = root_lance_dev;
-	root_lance_dev = dev;
+	if (!bdev) {
+		lp->next = root_lance_dev;
+		root_lance_dev = dev;
+	}
 
 
 	printk("%s: registered as %s.\n", name, dev->name);
 	printk("%s: registered as %s.\n", name, dev->name);
 	return 0;
 	return 0;
 
 
-err_out_free_dev:
+err_out_resource:
+	if (bdev)
+		release_mem_region(start, len);
+
+err_out_dev:
 	free_netdev(dev);
 	free_netdev(dev);
 
 
 err_out:
 err_out:
 	return ret;
 	return ret;
 }
 }
 
 
+static void __exit dec_lance_remove(struct device *bdev)
+{
+	struct net_device *dev = dev_get_drvdata(bdev);
+	resource_size_t start, len;
+
+	unregister_netdev(dev);
+	start = to_tc_dev(bdev)->resource.start;
+	len = to_tc_dev(bdev)->resource.end - start + 1;
+	release_mem_region(start, len);
+	free_netdev(dev);
+}
 
 
 /* Find all the lance cards on the system and initialize them */
 /* Find all the lance cards on the system and initialize them */
-static int __init dec_lance_probe(void)
+static int __init dec_lance_platform_probe(void)
 {
 {
 	int count = 0;
 	int count = 0;
 
 
-	/* Scan slots for PMAD-AA cards first. */
-#ifdef CONFIG_TC
-	if (TURBOCHANNEL) {
-		int slot;
-
-		while ((slot = search_tc_card("PMAD-AA")) >= 0) {
-			if (dec_lance_init(PMAD_LANCE, slot) < 0)
-				break;
-			count++;
-		}
-	}
-#endif
-
-	/* Then handle onboard devices. */
 	if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
 	if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
 		if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
 		if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
-			if (dec_lance_init(ASIC_LANCE, -1) >= 0)
+			if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
 				count++;
 				count++;
 		} else if (!TURBOCHANNEL) {
 		} else if (!TURBOCHANNEL) {
-			if (dec_lance_init(PMAX_LANCE, -1) >= 0)
+			if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
 				count++;
 				count++;
 		}
 		}
 	}
 	}
@@ -1304,21 +1325,70 @@ static int __init dec_lance_probe(void)
 	return (count > 0) ? 0 : -ENODEV;
 	return (count > 0) ? 0 : -ENODEV;
 }
 }
 
 
-static void __exit dec_lance_cleanup(void)
+static void __exit dec_lance_platform_remove(void)
 {
 {
 	while (root_lance_dev) {
 	while (root_lance_dev) {
 		struct net_device *dev = root_lance_dev;
 		struct net_device *dev = root_lance_dev;
 		struct lance_private *lp = netdev_priv(dev);
 		struct lance_private *lp = netdev_priv(dev);
 
 
 		unregister_netdev(dev);
 		unregister_netdev(dev);
-#ifdef CONFIG_TC
-		if (lp->slot >= 0)
-			release_tc_card(lp->slot);
-#endif
 		root_lance_dev = lp->next;
 		root_lance_dev = lp->next;
 		free_netdev(dev);
 		free_netdev(dev);
 	}
 	}
 }
 }
 
 
-module_init(dec_lance_probe);
-module_exit(dec_lance_cleanup);
+#ifdef CONFIG_TC
+static int __init dec_lance_tc_probe(struct device *dev);
+static int __exit dec_lance_tc_remove(struct device *dev);
+
+static const struct tc_device_id dec_lance_tc_table[] = {
+	{ "DEC     ", "PMAD-AA " },
+	{ }
+};
+MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
+
+static struct tc_driver dec_lance_tc_driver = {
+	.id_table	= dec_lance_tc_table,
+	.driver		= {
+		.name	= "declance",
+		.bus	= &tc_bus_type,
+		.probe	= dec_lance_tc_probe,
+		.remove	= __exit_p(dec_lance_tc_remove),
+	},
+};
+
+static int __init dec_lance_tc_probe(struct device *dev)
+{
+        int status = dec_lance_probe(dev, PMAD_LANCE);
+        if (!status)
+                get_device(dev);
+        return status;
+}
+
+static int __exit dec_lance_tc_remove(struct device *dev)
+{
+        put_device(dev);
+        dec_lance_remove(dev);
+        return 0;
+}
+#endif
+
+static int __init dec_lance_init(void)
+{
+	int status;
+
+	status = tc_register_driver(&dec_lance_tc_driver);
+	if (!status)
+		dec_lance_platform_probe();
+	return status;
+}
+
+static void __exit dec_lance_exit(void)
+{
+	dec_lance_platform_remove();
+	tc_unregister_driver(&dec_lance_tc_driver);
+}
+
+
+module_init(dec_lance_init);
+module_exit(dec_lance_exit);

+ 0 - 7
drivers/net/e1000/e1000.h

@@ -59,17 +59,13 @@
 #include <linux/capability.h>
 #include <linux/capability.h>
 #include <linux/in.h>
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ip.h>
-#ifdef NETIF_F_TSO6
 #include <linux/ipv6.h>
 #include <linux/ipv6.h>
-#endif
 #include <linux/tcp.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/udp.h>
 #include <net/pkt_sched.h>
 #include <net/pkt_sched.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/reboot.h>
 #include <linux/reboot.h>
-#ifdef NETIF_F_TSO
 #include <net/checksum.h>
 #include <net/checksum.h>
-#endif
 #include <linux/mii.h>
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
 #include <linux/if_vlan.h>
@@ -257,7 +253,6 @@ struct e1000_adapter {
 	spinlock_t tx_queue_lock;
 	spinlock_t tx_queue_lock;
 #endif
 #endif
 	atomic_t irq_sem;
 	atomic_t irq_sem;
-	unsigned int detect_link;
 	unsigned int total_tx_bytes;
 	unsigned int total_tx_bytes;
 	unsigned int total_tx_packets;
 	unsigned int total_tx_packets;
 	unsigned int total_rx_bytes;
 	unsigned int total_rx_bytes;
@@ -348,9 +343,7 @@ struct e1000_adapter {
 	boolean_t have_msi;
 	boolean_t have_msi;
 #endif
 #endif
 	/* to not mess up cache alignment, always add to the bottom */
 	/* to not mess up cache alignment, always add to the bottom */
-#ifdef NETIF_F_TSO
 	boolean_t tso_force;
 	boolean_t tso_force;
-#endif
 	boolean_t smart_power_down;	/* phy smart power down */
 	boolean_t smart_power_down;	/* phy smart power down */
 	boolean_t quad_port_a;
 	boolean_t quad_port_a;
 	unsigned long flags;
 	unsigned long flags;

+ 0 - 6
drivers/net/e1000/e1000_ethtool.c

@@ -338,7 +338,6 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
 	return 0;
 	return 0;
 }
 }
 
 
-#ifdef NETIF_F_TSO
 static int
 static int
 e1000_set_tso(struct net_device *netdev, uint32_t data)
 e1000_set_tso(struct net_device *netdev, uint32_t data)
 {
 {
@@ -352,18 +351,15 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
 	else
 	else
 		netdev->features &= ~NETIF_F_TSO;
 		netdev->features &= ~NETIF_F_TSO;
 
 
-#ifdef NETIF_F_TSO6
 	if (data)
 	if (data)
 		netdev->features |= NETIF_F_TSO6;
 		netdev->features |= NETIF_F_TSO6;
 	else
 	else
 		netdev->features &= ~NETIF_F_TSO6;
 		netdev->features &= ~NETIF_F_TSO6;
-#endif
 
 
 	DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
 	DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
 	adapter->tso_force = TRUE;
 	adapter->tso_force = TRUE;
 	return 0;
 	return 0;
 }
 }
-#endif /* NETIF_F_TSO */
 
 
 static uint32_t
 static uint32_t
 e1000_get_msglevel(struct net_device *netdev)
 e1000_get_msglevel(struct net_device *netdev)
@@ -1971,10 +1967,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
 	.set_tx_csum            = e1000_set_tx_csum,
 	.set_tx_csum            = e1000_set_tx_csum,
 	.get_sg                 = ethtool_op_get_sg,
 	.get_sg                 = ethtool_op_get_sg,
 	.set_sg                 = ethtool_op_set_sg,
 	.set_sg                 = ethtool_op_set_sg,
-#ifdef NETIF_F_TSO
 	.get_tso                = ethtool_op_get_tso,
 	.get_tso                = ethtool_op_get_tso,
 	.set_tso                = e1000_set_tso,
 	.set_tso                = e1000_set_tso,
-#endif
 	.self_test_count        = e1000_diag_test_count,
 	.self_test_count        = e1000_diag_test_count,
 	.self_test              = e1000_diag_test,
 	.self_test              = e1000_diag_test,
 	.get_strings            = e1000_get_strings,
 	.get_strings            = e1000_get_strings,

+ 47 - 81
drivers/net/e1000/e1000_main.c

@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #else
 #else
 #define DRIVERNAPI "-NAPI"
 #define DRIVERNAPI "-NAPI"
 #endif
 #endif
-#define DRV_VERSION "7.3.15-k2"DRIVERNAPI
+#define DRV_VERSION "7.3.20-k2"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
 char e1000_driver_version[] = DRV_VERSION;
 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
 
@@ -990,16 +990,12 @@ e1000_probe(struct pci_dev *pdev,
 			netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
 			netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
 	}
 	}
 
 
-#ifdef NETIF_F_TSO
 	if ((adapter->hw.mac_type >= e1000_82544) &&
 	if ((adapter->hw.mac_type >= e1000_82544) &&
 	   (adapter->hw.mac_type != e1000_82547))
 	   (adapter->hw.mac_type != e1000_82547))
 		netdev->features |= NETIF_F_TSO;
 		netdev->features |= NETIF_F_TSO;
 
 
-#ifdef NETIF_F_TSO6
 	if (adapter->hw.mac_type > e1000_82547_rev_2)
 	if (adapter->hw.mac_type > e1000_82547_rev_2)
 		netdev->features |= NETIF_F_TSO6;
 		netdev->features |= NETIF_F_TSO6;
-#endif
-#endif
 	if (pci_using_dac)
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 		netdev->features |= NETIF_F_HIGHDMA;
 
 
@@ -2583,15 +2579,22 @@ e1000_watchdog(unsigned long data)
 
 
 	if (link) {
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
 		if (!netif_carrier_ok(netdev)) {
+			uint32_t ctrl;
 			boolean_t txb2b = 1;
 			boolean_t txb2b = 1;
 			e1000_get_speed_and_duplex(&adapter->hw,
 			e1000_get_speed_and_duplex(&adapter->hw,
 			                           &adapter->link_speed,
 			                           &adapter->link_speed,
 			                           &adapter->link_duplex);
 			                           &adapter->link_duplex);
 
 
-			DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
-			       adapter->link_speed,
-			       adapter->link_duplex == FULL_DUPLEX ?
-			       "Full Duplex" : "Half Duplex");
+			ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+			DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
+			        "Flow Control: %s\n",
+			        adapter->link_speed,
+			        adapter->link_duplex == FULL_DUPLEX ?
+			        "Full Duplex" : "Half Duplex",
+			        ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+			        E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+			        E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+			        E1000_CTRL_TFCE) ? "TX" : "None" )));
 
 
 			/* tweak tx_queue_len according to speed/duplex
 			/* tweak tx_queue_len according to speed/duplex
 			 * and adjust the timeout factor */
 			 * and adjust the timeout factor */
@@ -2619,7 +2622,6 @@ e1000_watchdog(unsigned long data)
 				E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
 				E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
 			}
 			}
 
 
-#ifdef NETIF_F_TSO
 			/* disable TSO for pcie and 10/100 speeds, to avoid
 			/* disable TSO for pcie and 10/100 speeds, to avoid
 			 * some hardware issues */
 			 * some hardware issues */
 			if (!adapter->tso_force &&
 			if (!adapter->tso_force &&
@@ -2630,22 +2632,17 @@ e1000_watchdog(unsigned long data)
 					DPRINTK(PROBE,INFO,
 					DPRINTK(PROBE,INFO,
 				        "10/100 speed: disabling TSO\n");
 				        "10/100 speed: disabling TSO\n");
 					netdev->features &= ~NETIF_F_TSO;
 					netdev->features &= ~NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
 					netdev->features &= ~NETIF_F_TSO6;
 					netdev->features &= ~NETIF_F_TSO6;
-#endif
 					break;
 					break;
 				case SPEED_1000:
 				case SPEED_1000:
 					netdev->features |= NETIF_F_TSO;
 					netdev->features |= NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
 					netdev->features |= NETIF_F_TSO6;
 					netdev->features |= NETIF_F_TSO6;
-#endif
 					break;
 					break;
 				default:
 				default:
 					/* oops */
 					/* oops */
 					break;
 					break;
 				}
 				}
 			}
 			}
-#endif
 
 
 			/* enable transmits in the hardware, need to do this
 			/* enable transmits in the hardware, need to do this
 			 * after setting TARC0 */
 			 * after setting TARC0 */
@@ -2875,7 +2872,6 @@ static int
 e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
           struct sk_buff *skb)
           struct sk_buff *skb)
 {
 {
-#ifdef NETIF_F_TSO
 	struct e1000_context_desc *context_desc;
 	struct e1000_context_desc *context_desc;
 	struct e1000_buffer *buffer_info;
 	struct e1000_buffer *buffer_info;
 	unsigned int i;
 	unsigned int i;
@@ -2904,7 +2900,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 						   0);
 						   0);
 			cmd_length = E1000_TXD_CMD_IP;
 			cmd_length = E1000_TXD_CMD_IP;
 			ipcse = skb->h.raw - skb->data - 1;
 			ipcse = skb->h.raw - skb->data - 1;
-#ifdef NETIF_F_TSO6
 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
 			skb->nh.ipv6h->payload_len = 0;
 			skb->nh.ipv6h->payload_len = 0;
 			skb->h.th->check =
 			skb->h.th->check =
@@ -2914,7 +2909,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 						 IPPROTO_TCP,
 						 IPPROTO_TCP,
 						 0);
 						 0);
 			ipcse = 0;
 			ipcse = 0;
-#endif
 		}
 		}
 		ipcss = skb->nh.raw - skb->data;
 		ipcss = skb->nh.raw - skb->data;
 		ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
 		ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
@@ -2947,8 +2941,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
 
 		return TRUE;
 		return TRUE;
 	}
 	}
-#endif
-
 	return FALSE;
 	return FALSE;
 }
 }
 
 
@@ -2968,8 +2960,9 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 		buffer_info = &tx_ring->buffer_info[i];
 		buffer_info = &tx_ring->buffer_info[i];
 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
 
 
+		context_desc->lower_setup.ip_config = 0;
 		context_desc->upper_setup.tcp_fields.tucss = css;
 		context_desc->upper_setup.tcp_fields.tucss = css;
-		context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
+		context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
 		context_desc->upper_setup.tcp_fields.tucse = 0;
 		context_desc->upper_setup.tcp_fields.tucse = 0;
 		context_desc->tcp_seg_setup.data = 0;
 		context_desc->tcp_seg_setup.data = 0;
 		context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
 		context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
@@ -3005,7 +2998,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 	while (len) {
 	while (len) {
 		buffer_info = &tx_ring->buffer_info[i];
 		buffer_info = &tx_ring->buffer_info[i];
 		size = min(len, max_per_txd);
 		size = min(len, max_per_txd);
-#ifdef NETIF_F_TSO
 		/* Workaround for Controller erratum --
 		/* Workaround for Controller erratum --
 		 * descriptor for non-tso packet in a linear SKB that follows a
 		 * descriptor for non-tso packet in a linear SKB that follows a
 		 * tso gets written back prematurely before the data is fully
 		 * tso gets written back prematurely before the data is fully
@@ -3020,7 +3012,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 		 * in TSO mode.  Append 4-byte sentinel desc */
 		 * in TSO mode.  Append 4-byte sentinel desc */
 		if (unlikely(mss && !nr_frags && size == len && size > 8))
 		if (unlikely(mss && !nr_frags && size == len && size > 8))
 			size -= 4;
 			size -= 4;
-#endif
 		/* work-around for errata 10 and it applies
 		/* work-around for errata 10 and it applies
 		 * to all controllers in PCI-X mode
 		 * to all controllers in PCI-X mode
 		 * The fix is to make sure that the first descriptor of a
 		 * The fix is to make sure that the first descriptor of a
@@ -3062,12 +3053,10 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 		while (len) {
 		while (len) {
 			buffer_info = &tx_ring->buffer_info[i];
 			buffer_info = &tx_ring->buffer_info[i];
 			size = min(len, max_per_txd);
 			size = min(len, max_per_txd);
-#ifdef NETIF_F_TSO
 			/* Workaround for premature desc write-backs
 			/* Workaround for premature desc write-backs
 			 * in TSO mode.  Append 4-byte sentinel desc */
 			 * in TSO mode.  Append 4-byte sentinel desc */
 			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
 			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
 				size -= 4;
 				size -= 4;
-#endif
 			/* Workaround for potential 82544 hang in PCI-X.
 			/* Workaround for potential 82544 hang in PCI-X.
 			 * Avoid terminating buffers within evenly-aligned
 			 * Avoid terminating buffers within evenly-aligned
 			 * dwords. */
 			 * dwords. */
@@ -3292,7 +3281,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	if (adapter->hw.mac_type >= e1000_82571)
 	if (adapter->hw.mac_type >= e1000_82571)
 		max_per_txd = 8192;
 		max_per_txd = 8192;
 
 
-#ifdef NETIF_F_TSO
 	mss = skb_shinfo(skb)->gso_size;
 	mss = skb_shinfo(skb)->gso_size;
 	/* The controller does a simple calculation to
 	/* The controller does a simple calculation to
 	 * make sure there is enough room in the FIFO before
 	 * make sure there is enough room in the FIFO before
@@ -3346,16 +3334,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
 		count++;
 		count++;
 	count++;
 	count++;
-#else
-	if (skb->ip_summed == CHECKSUM_PARTIAL)
-		count++;
-#endif
 
 
-#ifdef NETIF_F_TSO
 	/* Controller Erratum workaround */
 	/* Controller Erratum workaround */
 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
 		count++;
 		count++;
-#endif
 
 
 	count += TXD_USE_COUNT(len, max_txd_pwr);
 	count += TXD_USE_COUNT(len, max_txd_pwr);
 
 
@@ -3765,8 +3747,8 @@ e1000_update_stats(struct e1000_adapter *adapter)
  * @data: pointer to a network interface device structure
  * @data: pointer to a network interface device structure
  **/
  **/
 
 
-static
-irqreturn_t e1000_intr_msi(int irq, void *data)
+static irqreturn_t
+e1000_intr_msi(int irq, void *data)
 {
 {
 	struct net_device *netdev = data;
 	struct net_device *netdev = data;
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3774,49 +3756,27 @@ irqreturn_t e1000_intr_msi(int irq, void *data)
 #ifndef CONFIG_E1000_NAPI
 #ifndef CONFIG_E1000_NAPI
 	int i;
 	int i;
 #endif
 #endif
+	uint32_t icr = E1000_READ_REG(hw, ICR);
 
 
-	/* this code avoids the read of ICR but has to get 1000 interrupts
-	 * at every link change event before it will notice the change */
-	if (++adapter->detect_link >= 1000) {
-		uint32_t icr = E1000_READ_REG(hw, ICR);
 #ifdef CONFIG_E1000_NAPI
 #ifdef CONFIG_E1000_NAPI
-		/* read ICR disables interrupts using IAM, so keep up with our
-		 * enable/disable accounting */
-		atomic_inc(&adapter->irq_sem);
+	/* read ICR disables interrupts using IAM, so keep up with our
+	 * enable/disable accounting */
+	atomic_inc(&adapter->irq_sem);
 #endif
 #endif
-		adapter->detect_link = 0;
-		if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) &&
-		    (icr & E1000_ICR_INT_ASSERTED)) {
-			hw->get_link_status = 1;
-			/* 80003ES2LAN workaround--
-			* For packet buffer work-around on link down event;
-			* disable receives here in the ISR and
-			* reset adapter in watchdog
-			*/
-			if (netif_carrier_ok(netdev) &&
-			    (adapter->hw.mac_type == e1000_80003es2lan)) {
-				/* disable receives */
-				uint32_t rctl = E1000_READ_REG(hw, RCTL);
-				E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
-			}
-			/* guard against interrupt when we're going down */
-			if (!test_bit(__E1000_DOWN, &adapter->flags))
-				mod_timer(&adapter->watchdog_timer,
-				          jiffies + 1);
+	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+		hw->get_link_status = 1;
+		/* 80003ES2LAN workaround-- For packet buffer work-around on
+		 * link down event; disable receives here in the ISR and reset
+		 * adapter in watchdog */
+		if (netif_carrier_ok(netdev) &&
+		    (adapter->hw.mac_type == e1000_80003es2lan)) {
+			/* disable receives */
+			uint32_t rctl = E1000_READ_REG(hw, RCTL);
+			E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
 		}
 		}
-	} else {
-		E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ |
-		                                         E1000_ICR_LSC)));
-		/* bummer we have to flush here, but things break otherwise as
-		 * some event appears to be lost or delayed and throughput
-		 * drops.  In almost all tests this flush is un-necessary */
-		E1000_WRITE_FLUSH(hw);
-#ifdef CONFIG_E1000_NAPI
-		/* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
-		 * masked.  No need for the IMC write, but it does mean we
-		 * should account for it ASAP. */
-		atomic_inc(&adapter->irq_sem);
-#endif
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->flags))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 	}
 
 
 #ifdef CONFIG_E1000_NAPI
 #ifdef CONFIG_E1000_NAPI
@@ -3836,7 +3796,7 @@ irqreturn_t e1000_intr_msi(int irq, void *data)
 
 
 	for (i = 0; i < E1000_MAX_INTR; i++)
 	for (i = 0; i < E1000_MAX_INTR; i++)
 		if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
 		if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
-		   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+		   e1000_clean_tx_irq(adapter, adapter->tx_ring)))
 			break;
 			break;
 
 
 	if (likely(adapter->itr_setting & 3))
 	if (likely(adapter->itr_setting & 3))
@@ -3939,7 +3899,7 @@ e1000_intr(int irq, void *data)
 
 
 	for (i = 0; i < E1000_MAX_INTR; i++)
 	for (i = 0; i < E1000_MAX_INTR; i++)
 		if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
 		if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
-		   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+		   e1000_clean_tx_irq(adapter, adapter->tx_ring)))
 			break;
 			break;
 
 
 	if (likely(adapter->itr_setting & 3))
 	if (likely(adapter->itr_setting & 3))
@@ -3989,7 +3949,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 	poll_dev->quota -= work_done;
 	poll_dev->quota -= work_done;
 
 
 	/* If no Tx and not enough Rx work done, exit the polling mode */
 	/* If no Tx and not enough Rx work done, exit the polling mode */
-	if ((!tx_cleaned && (work_done == 0)) ||
+	if ((tx_cleaned && (work_done < work_to_do)) ||
 	   !netif_running(poll_dev)) {
 	   !netif_running(poll_dev)) {
 quit_polling:
 quit_polling:
 		if (likely(adapter->itr_setting & 3))
 		if (likely(adapter->itr_setting & 3))
@@ -4019,7 +3979,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 #ifdef CONFIG_E1000_NAPI
 #ifdef CONFIG_E1000_NAPI
 	unsigned int count = 0;
 	unsigned int count = 0;
 #endif
 #endif
-	boolean_t cleaned = FALSE;
+	boolean_t cleaned = TRUE;
 	unsigned int total_tx_bytes=0, total_tx_packets=0;
 	unsigned int total_tx_bytes=0, total_tx_packets=0;
 
 
 	i = tx_ring->next_to_clean;
 	i = tx_ring->next_to_clean;
@@ -4034,10 +3994,13 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 
 
 			if (cleaned) {
 			if (cleaned) {
 				struct sk_buff *skb = buffer_info->skb;
 				struct sk_buff *skb = buffer_info->skb;
-				unsigned int segs = skb_shinfo(skb)->gso_segs;
+				unsigned int segs, bytecount;
+				segs = skb_shinfo(skb)->gso_segs ?: 1;
+				/* multiply data chunks by size of headers */
+				bytecount = ((segs - 1) * skb_headlen(skb)) +
+				            skb->len;
 				total_tx_packets += segs;
 				total_tx_packets += segs;
-				total_tx_packets++;
-				total_tx_bytes += skb->len;
+				total_tx_bytes += bytecount;
 			}
 			}
 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
 			tx_desc->upper.data = 0;
 			tx_desc->upper.data = 0;
@@ -4050,7 +4013,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 #ifdef CONFIG_E1000_NAPI
 #ifdef CONFIG_E1000_NAPI
 #define E1000_TX_WEIGHT 64
 #define E1000_TX_WEIGHT 64
 		/* weight of a sort for tx, to avoid endless transmit cleanup */
 		/* weight of a sort for tx, to avoid endless transmit cleanup */
-		if (count++ == E1000_TX_WEIGHT) break;
+		if (count++ == E1000_TX_WEIGHT) {
+			cleaned = FALSE;
+			break;
+		}
 #endif
 #endif
 	}
 	}
 
 

+ 1 - 3
drivers/net/e1000/e1000_osdep.h

@@ -48,8 +48,6 @@ typedef enum {
     TRUE = 1
     TRUE = 1
 } boolean_t;
 } boolean_t;
 
 
-#define MSGOUT(S, A, B)	printk(KERN_DEBUG S "\n", A, B)
-
 #ifdef DBG
 #ifdef DBG
 #define DEBUGOUT(S)		printk(KERN_DEBUG S "\n")
 #define DEBUGOUT(S)		printk(KERN_DEBUG S "\n")
 #define DEBUGOUT1(S, A...)	printk(KERN_DEBUG S "\n", A)
 #define DEBUGOUT1(S, A...)	printk(KERN_DEBUG S "\n", A)
@@ -58,7 +56,7 @@ typedef enum {
 #define DEBUGOUT1(S, A...)
 #define DEBUGOUT1(S, A...)
 #endif
 #endif
 
 
-#define DEBUGFUNC(F) DEBUGOUT(F)
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
 #define DEBUGOUT2 DEBUGOUT1
 #define DEBUGOUT2 DEBUGOUT1
 #define DEBUGOUT3 DEBUGOUT2
 #define DEBUGOUT3 DEBUGOUT2
 #define DEBUGOUT7 DEBUGOUT3
 #define DEBUGOUT7 DEBUGOUT3

+ 3 - 12
drivers/net/e1000/e1000_param.c

@@ -760,22 +760,13 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
 	case SPEED_1000:
 	case SPEED_1000:
 		DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
 		DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
 			"Duplex\n");
 			"Duplex\n");
-		DPRINTK(PROBE, INFO,
-			"Using Autonegotiation at 1000 Mbps "
-			"Full Duplex only\n");
-		adapter->hw.autoneg = adapter->fc_autoneg = 1;
-		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
-		break;
+		goto full_duplex_only;
 	case SPEED_1000 + HALF_DUPLEX:
 	case SPEED_1000 + HALF_DUPLEX:
 		DPRINTK(PROBE, INFO,
 		DPRINTK(PROBE, INFO,
 			"Half Duplex is not supported at 1000 Mbps\n");
 			"Half Duplex is not supported at 1000 Mbps\n");
-		DPRINTK(PROBE, INFO,
-			"Using Autonegotiation at 1000 Mbps "
-			"Full Duplex only\n");
-		adapter->hw.autoneg = adapter->fc_autoneg = 1;
-		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
-		break;
+		/* fall through */
 	case SPEED_1000 + FULL_DUPLEX:
 	case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
 		DPRINTK(PROBE, INFO,
 		DPRINTK(PROBE, INFO,
 		       "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
 		       "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
 		adapter->hw.autoneg = adapter->fc_autoneg = 1;
 		adapter->hw.autoneg = adapter->fc_autoneg = 1;

File diff suppressed because it is too large
+ 479 - 190
drivers/net/forcedeth.c


+ 1 - 1
drivers/net/hp100.c

@@ -3034,7 +3034,7 @@ static int __init hp100_module_init(void)
 		goto out2;
 		goto out2;
 #endif
 #endif
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
-	err = pci_module_init(&hp100_pci_driver);
+	err = pci_register_driver(&hp100_pci_driver);
 	if (err && err != -ENODEV)
 	if (err && err != -ENODEV)
 		goto out3;
 		goto out3;
 #endif
 #endif

+ 0 - 2
drivers/net/ixgb/ixgb.h

@@ -61,9 +61,7 @@
 #include <net/pkt_sched.h>
 #include <net/pkt_sched.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/reboot.h>
 #include <linux/reboot.h>
-#ifdef NETIF_F_TSO
 #include <net/checksum.h>
 #include <net/checksum.h>
-#endif
 
 
 #include <linux/ethtool.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
 #include <linux/if_vlan.h>

+ 0 - 6
drivers/net/ixgb/ixgb_ethtool.c

@@ -82,10 +82,8 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
 	{"tx_restart_queue", IXGB_STAT(restart_queue) },
 	{"tx_restart_queue", IXGB_STAT(restart_queue) },
 	{"rx_long_length_errors", IXGB_STAT(stats.roc)},
 	{"rx_long_length_errors", IXGB_STAT(stats.roc)},
 	{"rx_short_length_errors", IXGB_STAT(stats.ruc)},
 	{"rx_short_length_errors", IXGB_STAT(stats.ruc)},
-#ifdef NETIF_F_TSO
 	{"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
 	{"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
 	{"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
 	{"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
-#endif
 	{"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
 	{"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
 	{"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
 	{"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
 	{"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
 	{"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
@@ -240,7 +238,6 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
 	return 0;
 	return 0;
 }
 }
 
 
-#ifdef NETIF_F_TSO
 static int
 static int
 ixgb_set_tso(struct net_device *netdev, uint32_t data)
 ixgb_set_tso(struct net_device *netdev, uint32_t data)
 {
 {
@@ -250,7 +247,6 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
 		netdev->features &= ~NETIF_F_TSO;
 		netdev->features &= ~NETIF_F_TSO;
 	return 0;
 	return 0;
 } 
 } 
-#endif /* NETIF_F_TSO */
 
 
 static uint32_t
 static uint32_t
 ixgb_get_msglevel(struct net_device *netdev)
 ixgb_get_msglevel(struct net_device *netdev)
@@ -722,10 +718,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
 	.set_sg	= ethtool_op_set_sg,
 	.set_sg	= ethtool_op_set_sg,
 	.get_msglevel = ixgb_get_msglevel,
 	.get_msglevel = ixgb_get_msglevel,
 	.set_msglevel = ixgb_set_msglevel,
 	.set_msglevel = ixgb_set_msglevel,
-#ifdef NETIF_F_TSO
 	.get_tso = ethtool_op_get_tso,
 	.get_tso = ethtool_op_get_tso,
 	.set_tso = ixgb_set_tso,
 	.set_tso = ixgb_set_tso,
-#endif
 	.get_strings = ixgb_get_strings,
 	.get_strings = ixgb_get_strings,
 	.phys_id = ixgb_phys_id,
 	.phys_id = ixgb_phys_id,
 	.get_stats_count = ixgb_get_stats_count,
 	.get_stats_count = ixgb_get_stats_count,

+ 0 - 4
drivers/net/ixgb/ixgb_main.c

@@ -456,9 +456,7 @@ ixgb_probe(struct pci_dev *pdev,
 			   NETIF_F_HW_VLAN_TX |
 			   NETIF_F_HW_VLAN_TX |
 			   NETIF_F_HW_VLAN_RX |
 			   NETIF_F_HW_VLAN_RX |
 			   NETIF_F_HW_VLAN_FILTER;
 			   NETIF_F_HW_VLAN_FILTER;
-#ifdef NETIF_F_TSO
 	netdev->features |= NETIF_F_TSO;
 	netdev->features |= NETIF_F_TSO;
-#endif
 #ifdef NETIF_F_LLTX
 #ifdef NETIF_F_LLTX
 	netdev->features |= NETIF_F_LLTX;
 	netdev->features |= NETIF_F_LLTX;
 #endif
 #endif
@@ -1176,7 +1174,6 @@ ixgb_watchdog(unsigned long data)
 static int
 static int
 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 {
 {
-#ifdef NETIF_F_TSO
 	struct ixgb_context_desc *context_desc;
 	struct ixgb_context_desc *context_desc;
 	unsigned int i;
 	unsigned int i;
 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
@@ -1233,7 +1230,6 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 
 
 		return 1;
 		return 1;
 	}
 	}
-#endif
 
 
 	return 0;
 	return 0;
 }
 }

+ 23 - 2
drivers/net/macb.c

@@ -1046,6 +1046,14 @@ static int __devinit macb_probe(struct platform_device *pdev)
 
 
 	spin_lock_init(&bp->lock);
 	spin_lock_init(&bp->lock);
 
 
+#if defined(CONFIG_ARCH_AT91)
+	bp->pclk = clk_get(&pdev->dev, "macb_clk");
+	if (IS_ERR(bp->pclk)) {
+		dev_err(&pdev->dev, "failed to get macb_clk\n");
+		goto err_out_free_dev;
+	}
+	clk_enable(bp->pclk);
+#else
 	bp->pclk = clk_get(&pdev->dev, "pclk");
 	bp->pclk = clk_get(&pdev->dev, "pclk");
 	if (IS_ERR(bp->pclk)) {
 	if (IS_ERR(bp->pclk)) {
 		dev_err(&pdev->dev, "failed to get pclk\n");
 		dev_err(&pdev->dev, "failed to get pclk\n");
@@ -1059,6 +1067,7 @@ static int __devinit macb_probe(struct platform_device *pdev)
 
 
 	clk_enable(bp->pclk);
 	clk_enable(bp->pclk);
 	clk_enable(bp->hclk);
 	clk_enable(bp->hclk);
+#endif
 
 
 	bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
 	bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
 	if (!bp->regs) {
 	if (!bp->regs) {
@@ -1119,9 +1128,17 @@ static int __devinit macb_probe(struct platform_device *pdev)
 
 
 	pdata = pdev->dev.platform_data;
 	pdata = pdev->dev.platform_data;
 	if (pdata && pdata->is_rmii)
 	if (pdata && pdata->is_rmii)
+#if defined(CONFIG_ARCH_AT91)
+		macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
+#else
 		macb_writel(bp, USRIO, 0);
 		macb_writel(bp, USRIO, 0);
+#endif
 	else
 	else
+#if defined(CONFIG_ARCH_AT91)
+		macb_writel(bp, USRIO, MACB_BIT(CLKEN));
+#else
 		macb_writel(bp, USRIO, MACB_BIT(MII));
 		macb_writel(bp, USRIO, MACB_BIT(MII));
+#endif
 
 
 	bp->tx_pending = DEF_TX_RING_PENDING;
 	bp->tx_pending = DEF_TX_RING_PENDING;
 
 
@@ -1148,9 +1165,11 @@ err_out_free_irq:
 err_out_iounmap:
 err_out_iounmap:
 	iounmap(bp->regs);
 	iounmap(bp->regs);
 err_out_disable_clocks:
 err_out_disable_clocks:
+#ifndef CONFIG_ARCH_AT91
 	clk_disable(bp->hclk);
 	clk_disable(bp->hclk);
-	clk_disable(bp->pclk);
 	clk_put(bp->hclk);
 	clk_put(bp->hclk);
+#endif
+	clk_disable(bp->pclk);
 err_out_put_pclk:
 err_out_put_pclk:
 	clk_put(bp->pclk);
 	clk_put(bp->pclk);
 err_out_free_dev:
 err_out_free_dev:
@@ -1173,9 +1192,11 @@ static int __devexit macb_remove(struct platform_device *pdev)
 		unregister_netdev(dev);
 		unregister_netdev(dev);
 		free_irq(dev->irq, dev);
 		free_irq(dev->irq, dev);
 		iounmap(bp->regs);
 		iounmap(bp->regs);
+#ifndef CONFIG_ARCH_AT91
 		clk_disable(bp->hclk);
 		clk_disable(bp->hclk);
-		clk_disable(bp->pclk);
 		clk_put(bp->hclk);
 		clk_put(bp->hclk);
+#endif
+		clk_disable(bp->pclk);
 		clk_put(bp->pclk);
 		clk_put(bp->pclk);
 		free_netdev(dev);
 		free_netdev(dev);
 		platform_set_drvdata(pdev, NULL);
 		platform_set_drvdata(pdev, NULL);

+ 7 - 1
drivers/net/macb.h

@@ -200,7 +200,7 @@
 #define MACB_SOF_OFFSET				30
 #define MACB_SOF_OFFSET				30
 #define MACB_SOF_SIZE				2
 #define MACB_SOF_SIZE				2
 
 
-/* Bitfields in USRIO */
+/* Bitfields in USRIO (AVR32) */
 #define MACB_MII_OFFSET				0
 #define MACB_MII_OFFSET				0
 #define MACB_MII_SIZE				1
 #define MACB_MII_SIZE				1
 #define MACB_EAM_OFFSET				1
 #define MACB_EAM_OFFSET				1
@@ -210,6 +210,12 @@
 #define MACB_TX_PAUSE_ZERO_OFFSET		3
 #define MACB_TX_PAUSE_ZERO_OFFSET		3
 #define MACB_TX_PAUSE_ZERO_SIZE			1
 #define MACB_TX_PAUSE_ZERO_SIZE			1
 
 
+/* Bitfields in USRIO (AT91) */
+#define MACB_RMII_OFFSET			0
+#define MACB_RMII_SIZE				1
+#define MACB_CLKEN_OFFSET			1
+#define MACB_CLKEN_SIZE				1
+
 /* Bitfields in WOL */
 /* Bitfields in WOL */
 #define MACB_IP_OFFSET				0
 #define MACB_IP_OFFSET				0
 #define MACB_IP_SIZE				16
 #define MACB_IP_SIZE				16

+ 2 - 14
drivers/net/mace.c

@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/crc32.h>
 #include <linux/crc32.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
+#include <linux/bitrev.h>
 #include <asm/prom.h>
 #include <asm/prom.h>
 #include <asm/dbdma.h>
 #include <asm/dbdma.h>
 #include <asm/io.h>
 #include <asm/io.h>
@@ -74,7 +75,6 @@ struct mace_data {
 #define PRIV_BYTES	(sizeof(struct mace_data) \
 #define PRIV_BYTES	(sizeof(struct mace_data) \
 	+ (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
 	+ (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
 
 
-static int bitrev(int);
 static int mace_open(struct net_device *dev);
 static int mace_open(struct net_device *dev);
 static int mace_close(struct net_device *dev);
 static int mace_close(struct net_device *dev);
 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
@@ -96,18 +96,6 @@ static void __mace_set_address(struct net_device *dev, void *addr);
  */
  */
 static unsigned char *dummy_buf;
 static unsigned char *dummy_buf;
 
 
-/* Bit-reverse one byte of an ethernet hardware address. */
-static inline int
-bitrev(int b)
-{
-    int d = 0, i;
-
-    for (i = 0; i < 8; ++i, b >>= 1)
-	d = (d << 1) | (b & 1);
-    return d;
-}
-
-
 static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
 static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
 {
 {
 	struct device_node *mace = macio_get_of_node(mdev);
 	struct device_node *mace = macio_get_of_node(mdev);
@@ -173,7 +161,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
 
 
 	rev = addr[0] == 0 && addr[1] == 0xA0;
 	rev = addr[0] == 0 && addr[1] == 0xA0;
 	for (j = 0; j < 6; ++j) {
 	for (j = 0; j < 6; ++j) {
-		dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
+		dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
 	}
 	}
 	mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
 	mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
 			in_8(&mp->mace->chipid_lo);
 			in_8(&mp->mace->chipid_lo);

+ 3 - 15
drivers/net/macmace.c

@@ -22,6 +22,7 @@
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <linux/crc32.h>
 #include <linux/crc32.h>
+#include <linux/bitrev.h>
 #include <asm/io.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
 #include <asm/irq.h>
 #include <asm/irq.h>
@@ -81,19 +82,6 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id);
 static irqreturn_t mace_dma_intr(int irq, void *dev_id);
 static irqreturn_t mace_dma_intr(int irq, void *dev_id);
 static void mace_tx_timeout(struct net_device *dev);
 static void mace_tx_timeout(struct net_device *dev);
 
 
-/* Bit-reverse one byte of an ethernet hardware address. */
-
-static int bitrev(int b)
-{
-	int d = 0, i;
-
-	for (i = 0; i < 8; ++i, b >>= 1) {
-		d = (d << 1) | (b & 1);
-	}
-
-	return d;
-}
-
 /*
 /*
  * Load a receive DMA channel with a base address and ring length
  * Load a receive DMA channel with a base address and ring length
  */
  */
@@ -219,12 +207,12 @@ struct net_device *mace_probe(int unit)
 	addr = (void *)MACE_PROM;
 	addr = (void *)MACE_PROM;
 
 
 	for (j = 0; j < 6; ++j) {
 	for (j = 0; j < 6; ++j) {
-		u8 v=bitrev(addr[j<<4]);
+		u8 v = bitrev8(addr[j<<4]);
 		checksum ^= v;
 		checksum ^= v;
 		dev->dev_addr[j] = v;
 		dev->dev_addr[j] = v;
 	}
 	}
 	for (; j < 8; ++j) {
 	for (; j < 8; ++j) {
-		checksum ^= bitrev(addr[j<<4]);
+		checksum ^= bitrev8(addr[j<<4]);
 	}
 	}
 
 
 	if (checksum != 0xFF) {
 	if (checksum != 0xFF) {

+ 1 - 5
drivers/net/macsonic.c

@@ -121,16 +121,12 @@ enum macsonic_type {
  * For reversing the PROM address
  * For reversing the PROM address
  */
  */
 
 
-static unsigned char nibbletab[] = {0, 8, 4, 12, 2, 10, 6, 14,
-				    1, 9, 5, 13, 3, 11, 7, 15};
-
 static inline void bit_reverse_addr(unsigned char addr[6])
 static inline void bit_reverse_addr(unsigned char addr[6])
 {
 {
 	int i;
 	int i;
 
 
 	for(i = 0; i < 6; i++)
 	for(i = 0; i < 6; i++)
-		addr[i] = ((nibbletab[addr[i] & 0xf] << 4) |
-			   nibbletab[(addr[i] >> 4) &0xf]);
+		addr[i] = bitrev8(addr[i]);
 }
 }
 
 
 int __init macsonic_init(struct net_device* dev)
 int __init macsonic_init(struct net_device* dev)

+ 0 - 10
drivers/net/myri10ge/myri10ge.c

@@ -1412,10 +1412,8 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
 	.set_tx_csum = ethtool_op_set_tx_hw_csum,
 	.set_tx_csum = ethtool_op_set_tx_hw_csum,
 	.get_sg = ethtool_op_get_sg,
 	.get_sg = ethtool_op_get_sg,
 	.set_sg = ethtool_op_set_sg,
 	.set_sg = ethtool_op_set_sg,
-#ifdef NETIF_F_TSO
 	.get_tso = ethtool_op_get_tso,
 	.get_tso = ethtool_op_get_tso,
 	.set_tso = ethtool_op_set_tso,
 	.set_tso = ethtool_op_set_tso,
-#endif
 	.get_strings = myri10ge_get_strings,
 	.get_strings = myri10ge_get_strings,
 	.get_stats_count = myri10ge_get_stats_count,
 	.get_stats_count = myri10ge_get_stats_count,
 	.get_ethtool_stats = myri10ge_get_ethtool_stats,
 	.get_ethtool_stats = myri10ge_get_ethtool_stats,
@@ -1975,13 +1973,11 @@ again:
 	mss = 0;
 	mss = 0;
 	max_segments = MXGEFW_MAX_SEND_DESC;
 	max_segments = MXGEFW_MAX_SEND_DESC;
 
 
-#ifdef NETIF_F_TSO
 	if (skb->len > (dev->mtu + ETH_HLEN)) {
 	if (skb->len > (dev->mtu + ETH_HLEN)) {
 		mss = skb_shinfo(skb)->gso_size;
 		mss = skb_shinfo(skb)->gso_size;
 		if (mss != 0)
 		if (mss != 0)
 			max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
 			max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
 	}
 	}
-#endif				/*NETIF_F_TSO */
 
 
 	if ((unlikely(avail < max_segments))) {
 	if ((unlikely(avail < max_segments))) {
 		/* we are out of transmit resources */
 		/* we are out of transmit resources */
@@ -2013,7 +2009,6 @@ again:
 
 
 	cum_len = 0;
 	cum_len = 0;
 
 
-#ifdef NETIF_F_TSO
 	if (mss) {		/* TSO */
 	if (mss) {		/* TSO */
 		/* this removes any CKSUM flag from before */
 		/* this removes any CKSUM flag from before */
 		flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
 		flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
@@ -2029,7 +2024,6 @@ again:
 		 * the checksum by parsing the header. */
 		 * the checksum by parsing the header. */
 		pseudo_hdr_offset = mss;
 		pseudo_hdr_offset = mss;
 	} else
 	} else
-#endif				/*NETIF_F_TSO */
 		/* Mark small packets, and pad out tiny packets */
 		/* Mark small packets, and pad out tiny packets */
 	if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
 	if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
 		flags |= MXGEFW_FLAGS_SMALL;
 		flags |= MXGEFW_FLAGS_SMALL;
@@ -2097,7 +2091,6 @@ again:
 				seglen = len;
 				seglen = len;
 			flags_next = flags & ~MXGEFW_FLAGS_FIRST;
 			flags_next = flags & ~MXGEFW_FLAGS_FIRST;
 			cum_len_next = cum_len + seglen;
 			cum_len_next = cum_len + seglen;
-#ifdef NETIF_F_TSO
 			if (mss) {	/* TSO */
 			if (mss) {	/* TSO */
 				(req - rdma_count)->rdma_count = rdma_count + 1;
 				(req - rdma_count)->rdma_count = rdma_count + 1;
 
 
@@ -2124,7 +2117,6 @@ again:
 					    (small * MXGEFW_FLAGS_SMALL);
 					    (small * MXGEFW_FLAGS_SMALL);
 				}
 				}
 			}
 			}
-#endif				/* NETIF_F_TSO */
 			req->addr_high = high_swapped;
 			req->addr_high = high_swapped;
 			req->addr_low = htonl(low);
 			req->addr_low = htonl(low);
 			req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
 			req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
@@ -2161,14 +2153,12 @@ again:
 	}
 	}
 
 
 	(req - rdma_count)->rdma_count = rdma_count;
 	(req - rdma_count)->rdma_count = rdma_count;
-#ifdef NETIF_F_TSO
 	if (mss)
 	if (mss)
 		do {
 		do {
 			req--;
 			req--;
 			req->flags |= MXGEFW_FLAGS_TSO_LAST;
 			req->flags |= MXGEFW_FLAGS_TSO_LAST;
 		} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
 		} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
 					 MXGEFW_FLAGS_FIRST)));
 					 MXGEFW_FLAGS_FIRST)));
-#endif
 	idx = ((count - 1) + tx->req) & tx->mask;
 	idx = ((count - 1) + tx->req) & tx->mask;
 	tx->info[idx].last = 1;
 	tx->info[idx].last = 1;
 	if (tx->wc_fifo == NULL)
 	if (tx->wc_fifo == NULL)

+ 15 - 2
drivers/net/netxen/netxen_nic.h

@@ -63,11 +63,14 @@
 
 
 #include "netxen_nic_hw.h"
 #include "netxen_nic_hw.h"
 
 
-#define NETXEN_NIC_BUILD_NO     "2"
 #define _NETXEN_NIC_LINUX_MAJOR 3
 #define _NETXEN_NIC_LINUX_MAJOR 3
 #define _NETXEN_NIC_LINUX_MINOR 3
 #define _NETXEN_NIC_LINUX_MINOR 3
 #define _NETXEN_NIC_LINUX_SUBVERSION 3
 #define _NETXEN_NIC_LINUX_SUBVERSION 3
-#define NETXEN_NIC_LINUX_VERSIONID  "3.3.3" "-" NETXEN_NIC_BUILD_NO
+#define NETXEN_NIC_LINUX_VERSIONID  "3.3.3"
+
+#define NUM_FLASH_SECTORS (64)
+#define FLASH_SECTOR_SIZE (64 * 1024)
+#define FLASH_TOTAL_SIZE  (NUM_FLASH_SECTORS * FLASH_SECTOR_SIZE)
 
 
 #define RCV_DESC_RINGSIZE	\
 #define RCV_DESC_RINGSIZE	\
 	(sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
 	(sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
@@ -85,6 +88,7 @@
 #define NETXEN_RCV_PRODUCER_OFFSET	0
 #define NETXEN_RCV_PRODUCER_OFFSET	0
 #define NETXEN_RCV_PEG_DB_ID		2
 #define NETXEN_RCV_PEG_DB_ID		2
 #define NETXEN_HOST_DUMMY_DMA_SIZE 1024
 #define NETXEN_HOST_DUMMY_DMA_SIZE 1024
+#define FLASH_SUCCESS 0
 
 
 #define ADDR_IN_WINDOW1(off)	\
 #define ADDR_IN_WINDOW1(off)	\
 	((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
 	((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
@@ -1028,6 +1032,15 @@ void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
 void netxen_load_firmware(struct netxen_adapter *adapter);
 void netxen_load_firmware(struct netxen_adapter *adapter);
 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
+int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, 
+				u8 *bytes, size_t size);
+int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr, 
+				u8 *bytes, size_t size);
+int netxen_flash_unlock(struct netxen_adapter *adapter);
+int netxen_backup_crbinit(struct netxen_adapter *adapter);
+int netxen_flash_erase_secondary(struct netxen_adapter *adapter);
+int netxen_flash_erase_primary(struct netxen_adapter *adapter);
+
 int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data);
 int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data);
 int netxen_rom_se(struct netxen_adapter *adapter, int addr);
 int netxen_rom_se(struct netxen_adapter *adapter, int addr);
 int netxen_do_rom_se(struct netxen_adapter *adapter, int addr);
 int netxen_do_rom_se(struct netxen_adapter *adapter, int addr);

+ 81 - 15
drivers/net/netxen/netxen_nic_ethtool.c

@@ -32,6 +32,7 @@
  */
  */
 
 
 #include <linux/types.h>
 #include <linux/types.h>
+#include <linux/delay.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <asm/io.h>
 #include <asm/io.h>
@@ -94,17 +95,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
 
 
 static int netxen_nic_get_eeprom_len(struct net_device *dev)
 static int netxen_nic_get_eeprom_len(struct net_device *dev)
 {
 {
-	struct netxen_port *port = netdev_priv(dev);
-	struct netxen_adapter *adapter = port->adapter;
-	int n;
-
-	if ((netxen_rom_fast_read(adapter, 0, &n) == 0)
-	    && (n & NETXEN_ROM_ROUNDUP)) {
-		n &= ~NETXEN_ROM_ROUNDUP;
-		if (n < NETXEN_MAX_EEPROM_LEN)
-			return n;
-	}
-	return 0;
+	return FLASH_TOTAL_SIZE;
 }
 }
 
 
 static void
 static void
@@ -440,18 +431,92 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
 	struct netxen_port *port = netdev_priv(dev);
 	struct netxen_port *port = netdev_priv(dev);
 	struct netxen_adapter *adapter = port->adapter;
 	struct netxen_adapter *adapter = port->adapter;
 	int offset;
 	int offset;
+	int ret;
 
 
 	if (eeprom->len == 0)
 	if (eeprom->len == 0)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16);
 	eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16);
-	for (offset = 0; offset < eeprom->len; offset++)
-		if (netxen_rom_fast_read
-		    (adapter, (8 * offset) + 8, (int *)eeprom->data) == -1)
-			return -EIO;
+	offset = eeprom->offset;
+
+	ret = netxen_rom_fast_read_words(adapter, offset, bytes, 
+						eeprom->len);
+	if (ret < 0)
+		return ret;
+
 	return 0;
 	return 0;
 }
 }
 
 
+static int
+netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+			u8 * bytes)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	int offset = eeprom->offset;
+	static int flash_start;
+	static int ready_to_flash;
+	int ret;
+
+	if (flash_start == 0) {
+		ret = netxen_flash_unlock(adapter);
+		if (ret < 0) {
+			printk(KERN_ERR "%s: Flash unlock failed.\n",
+				netxen_nic_driver_name);
+			return ret;
+		}
+		printk(KERN_INFO "%s: flash unlocked. \n", 
+			netxen_nic_driver_name);
+		ret = netxen_flash_erase_secondary(adapter);
+		if (ret != FLASH_SUCCESS) {
+			printk(KERN_ERR "%s: Flash erase failed.\n", 
+				netxen_nic_driver_name);
+			return ret;
+		}
+		printk(KERN_INFO "%s: secondary flash erased successfully.\n", 
+			netxen_nic_driver_name);
+		flash_start = 1;
+		return 0;
+	}
+
+	if (offset == BOOTLD_START) {
+		ret = netxen_flash_erase_primary(adapter);
+		if (ret != FLASH_SUCCESS) {
+			printk(KERN_ERR "%s: Flash erase failed.\n", 
+				netxen_nic_driver_name);
+			return ret;
+		}
+
+		ret = netxen_rom_se(adapter, USER_START);
+		if (ret != FLASH_SUCCESS)
+			return ret;
+		ret = netxen_rom_se(adapter, FIXED_START);
+		if (ret != FLASH_SUCCESS)
+			return ret;
+
+		printk(KERN_INFO "%s: primary flash erased successfully\n", 
+			netxen_nic_driver_name);
+
+		ret = netxen_backup_crbinit(adapter);
+		if (ret != FLASH_SUCCESS) {
+			printk(KERN_ERR "%s: CRBinit backup failed.\n", 
+				netxen_nic_driver_name);
+			return ret;
+		}
+		printk(KERN_INFO "%s: CRBinit backup done.\n", 
+			netxen_nic_driver_name);
+		ready_to_flash = 1;
+	}
+
+	if (!ready_to_flash) {
+		printk(KERN_ERR "%s: Invalid write sequence, returning...\n",
+			netxen_nic_driver_name);
+		return -EINVAL;
+	}
+
+	return netxen_rom_fast_write_words(adapter, offset, bytes, eeprom->len);
+}
+
 static void
 static void
 netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
 netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
 {
 {
@@ -721,6 +786,7 @@ struct ethtool_ops netxen_nic_ethtool_ops = {
 	.get_link = netxen_nic_get_link,
 	.get_link = netxen_nic_get_link,
 	.get_eeprom_len = netxen_nic_get_eeprom_len,
 	.get_eeprom_len = netxen_nic_get_eeprom_len,
 	.get_eeprom = netxen_nic_get_eeprom,
 	.get_eeprom = netxen_nic_get_eeprom,
+	.set_eeprom = netxen_nic_set_eeprom,
 	.get_ringparam = netxen_nic_get_ringparam,
 	.get_ringparam = netxen_nic_get_ringparam,
 	.get_pauseparam = netxen_nic_get_pauseparam,
 	.get_pauseparam = netxen_nic_get_pauseparam,
 	.set_pauseparam = netxen_nic_set_pauseparam,
 	.set_pauseparam = netxen_nic_set_pauseparam,

+ 274 - 5
drivers/net/netxen/netxen_nic_init.c

@@ -110,6 +110,7 @@ static void crb_addr_transform_setup(void)
 	crb_addr_transform(CAM);
 	crb_addr_transform(CAM);
 	crb_addr_transform(C2C1);
 	crb_addr_transform(C2C1);
 	crb_addr_transform(C2C0);
 	crb_addr_transform(C2C0);
+	crb_addr_transform(SMB);
 }
 }
 
 
 int netxen_init_firmware(struct netxen_adapter *adapter)
 int netxen_init_firmware(struct netxen_adapter *adapter)
@@ -276,6 +277,7 @@ unsigned long netxen_decode_crb_addr(unsigned long addr)
 
 
 static long rom_max_timeout = 10000;
 static long rom_max_timeout = 10000;
 static long rom_lock_timeout = 1000000;
 static long rom_lock_timeout = 1000000;
+static long rom_write_timeout = 700;
 
 
 static inline int rom_lock(struct netxen_adapter *adapter)
 static inline int rom_lock(struct netxen_adapter *adapter)
 {
 {
@@ -404,7 +406,7 @@ do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
 {
 {
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
-	udelay(100);		/* prevent bursting on CRB */
+	udelay(70);		/* prevent bursting on CRB */
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
 	if (netxen_wait_rom_done(adapter)) {
 	if (netxen_wait_rom_done(adapter)) {
@@ -413,13 +415,46 @@ do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
 	}
 	}
 	/* reset abyte_cnt and dummy_byte_cnt */
 	/* reset abyte_cnt and dummy_byte_cnt */
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
-	udelay(100);		/* prevent bursting on CRB */
+	udelay(70);		/* prevent bursting on CRB */
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
 
 
 	*valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
 	*valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
 	return 0;
 	return 0;
 }
 }
 
 
+static inline int 
+do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+			u8 *bytes, size_t size)
+{
+	int addridx;
+	int ret = 0;
+
+	for (addridx = addr; addridx < (addr + size); addridx += 4) {
+		ret = do_rom_fast_read(adapter, addridx, (int *)bytes);
+		if (ret != 0)
+			break;
+		bytes += 4;
+	}
+
+	return ret;
+}
+
+int
+netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, 
+				u8 *bytes, size_t size)
+{
+	int ret;
+
+	ret = rom_lock(adapter);
+	if (ret < 0)
+		return ret;
+
+	ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+	netxen_rom_unlock(adapter);
+	return ret;
+}
+
 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
 {
 {
 	int ret;
 	int ret;
@@ -443,6 +478,152 @@ int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data)
 	netxen_rom_unlock(adapter);
 	netxen_rom_unlock(adapter);
 	return ret;
 	return ret;
 }
 }
+
+static inline int do_rom_fast_write_words(struct netxen_adapter *adapter, 
+						int addr, u8 *bytes, size_t size)
+{
+	int addridx = addr;
+	int ret = 0;
+
+	while (addridx < (addr + size)) {
+		int last_attempt = 0;
+		int timeout = 0;
+		int data;
+
+		data = *(u32*)bytes;
+
+		ret = do_rom_fast_write(adapter, addridx, data);
+		if (ret < 0)
+			return ret;
+			
+		while(1) {
+			int data1;
+
+			do_rom_fast_read(adapter, addridx, &data1);
+			if (data1 == data)
+				break;
+
+			if (timeout++ >= rom_write_timeout) {
+				if (last_attempt++ < 4) {
+					ret = do_rom_fast_write(adapter, 
+								addridx, data);
+					if (ret < 0)
+						return ret;
+				}
+				else {
+					printk(KERN_INFO "Data write did not "
+					   "succeed at address 0x%x\n", addridx);
+					break;
+				}
+			}
+		}
+
+		bytes += 4;
+		addridx += 4;
+	}
+
+	return ret;
+}
+
+int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr, 
+					u8 *bytes, size_t size)
+{
+	int ret = 0;
+
+	ret = rom_lock(adapter);
+	if (ret < 0)
+		return ret;
+
+	ret = do_rom_fast_write_words(adapter, addr, bytes, size);
+	netxen_rom_unlock(adapter);
+
+	return ret;
+}
+
+int netxen_rom_wrsr(struct netxen_adapter *adapter, int data)
+{
+	int ret;
+
+	ret = netxen_rom_wren(adapter);
+	if (ret < 0)
+		return ret;
+
+	netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_ROM_WDATA, data);
+	netxen_crb_writelit_adapter(adapter, 
+					NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0x1);
+
+	ret = netxen_wait_rom_done(adapter);
+	if (ret < 0)
+		return ret;
+
+	return netxen_rom_wip_poll(adapter);
+}
+
+int netxen_rom_rdsr(struct netxen_adapter *adapter)
+{
+	int ret;
+
+	ret = rom_lock(adapter);
+	if (ret < 0)
+		return ret;
+
+	ret = netxen_do_rom_rdsr(adapter);
+	netxen_rom_unlock(adapter);
+	return ret;
+}
+
+int netxen_backup_crbinit(struct netxen_adapter *adapter)
+{
+	int ret = FLASH_SUCCESS;
+	int val;
+	char *buffer = kmalloc(FLASH_SECTOR_SIZE, GFP_KERNEL);
+
+	if (!buffer)
+		return -ENOMEM;	
+	/* unlock sector 63 */
+	val = netxen_rom_rdsr(adapter);
+	val = val & 0xe3;
+	ret = netxen_rom_wrsr(adapter, val);
+	if (ret != FLASH_SUCCESS)
+		goto out_kfree;
+
+	ret = netxen_rom_wip_poll(adapter);
+	if (ret != FLASH_SUCCESS)
+		goto out_kfree;
+
+	/* copy  sector 0 to sector 63 */
+	ret = netxen_rom_fast_read_words(adapter, CRBINIT_START, 
+						buffer, FLASH_SECTOR_SIZE);
+	if (ret != FLASH_SUCCESS)
+		goto out_kfree;
+
+	ret = netxen_rom_fast_write_words(adapter, FIXED_START, 
+						buffer, FLASH_SECTOR_SIZE);
+	if (ret != FLASH_SUCCESS)
+		goto out_kfree;
+
+	/* lock sector 63 */
+	val = netxen_rom_rdsr(adapter);
+	if (!(val & 0x8)) {
+		val |= (0x1 << 2);
+		/* lock sector 63 */
+		if (netxen_rom_wrsr(adapter, val) == 0) {
+			ret = netxen_rom_wip_poll(adapter);
+			if (ret != FLASH_SUCCESS)
+				goto out_kfree;
+
+			/* lock SR writes */
+			ret = netxen_rom_wip_poll(adapter);
+			if (ret != FLASH_SUCCESS)
+				goto out_kfree;
+		}
+	}
+
+out_kfree:
+	kfree(buffer);
+	return ret;
+}
+
 int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
 int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
 {
 {
 	netxen_rom_wren(adapter);
 	netxen_rom_wren(adapter);
@@ -457,6 +638,27 @@ int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
 	return netxen_rom_wip_poll(adapter);
 	return netxen_rom_wip_poll(adapter);
 }
 }
 
 
+void check_erased_flash(struct netxen_adapter *adapter, int addr)
+{
+	int i;
+	int val;
+	int count = 0, erased_errors = 0;
+	int range;
+
+	range = (addr == USER_START) ? FIXED_START : addr + FLASH_SECTOR_SIZE;
+	
+	for (i = addr; i < range; i += 4) {
+		netxen_rom_fast_read(adapter, i, &val);
+		if (val != 0xffffffff)
+			erased_errors++;
+		count++;
+	}
+
+	if (erased_errors)
+		printk(KERN_INFO "0x%x out of 0x%x words fail to be erased "
+			"for sector address: %x\n", erased_errors, count, addr);
+}
+
 int netxen_rom_se(struct netxen_adapter *adapter, int addr)
 int netxen_rom_se(struct netxen_adapter *adapter, int addr)
 {
 {
 	int ret = 0;
 	int ret = 0;
@@ -465,6 +667,68 @@ int netxen_rom_se(struct netxen_adapter *adapter, int addr)
 	}
 	}
 	ret = netxen_do_rom_se(adapter, addr);
 	ret = netxen_do_rom_se(adapter, addr);
 	netxen_rom_unlock(adapter);
 	netxen_rom_unlock(adapter);
+	msleep(30);
+	check_erased_flash(adapter, addr);
+
+	return ret;
+}
+
+int
+netxen_flash_erase_sections(struct netxen_adapter *adapter, int start, int end)
+{
+	int ret = FLASH_SUCCESS;
+	int i;
+
+	for (i = start; i < end; i++) {
+		ret = netxen_rom_se(adapter, i * FLASH_SECTOR_SIZE);
+		if (ret)
+			break;
+		ret = netxen_rom_wip_poll(adapter);
+		if (ret < 0)
+			return ret;
+	}
+
+	return ret;
+}
+
+int
+netxen_flash_erase_secondary(struct netxen_adapter *adapter)
+{
+	int ret = FLASH_SUCCESS;
+	int start, end;
+
+	start = SECONDARY_START / FLASH_SECTOR_SIZE;
+	end   = USER_START / FLASH_SECTOR_SIZE;
+	ret = netxen_flash_erase_sections(adapter, start, end);
+
+	return ret;
+}
+
+int
+netxen_flash_erase_primary(struct netxen_adapter *adapter)
+{
+	int ret = FLASH_SUCCESS;
+	int start, end;
+
+	start = PRIMARY_START / FLASH_SECTOR_SIZE;
+	end   = SECONDARY_START / FLASH_SECTOR_SIZE;
+	ret = netxen_flash_erase_sections(adapter, start, end);
+
+	return ret;
+}
+
+int netxen_flash_unlock(struct netxen_adapter *adapter)
+{
+	int ret = 0;
+
+	ret = netxen_rom_wrsr(adapter, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = netxen_rom_wren(adapter);
+	if (ret < 0)
+		return ret;
+
 	return ret;
 	return ret;
 }
 }
 
 
@@ -543,9 +807,13 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
 		}
 		}
 		for (i = 0; i < n; i++) {
 		for (i = 0; i < n; i++) {
 
 
-			off =
-			    netxen_decode_crb_addr((unsigned long)buf[i].addr) +
-			    NETXEN_PCI_CRBSPACE;
+			off = netxen_decode_crb_addr((unsigned long)buf[i].addr);
+			if (off == NETXEN_ADDR_ERROR) {
+				printk(KERN_ERR"CRB init value out of range %lx\n",
+					buf[i].addr);
+				continue;
+			}
+			off += NETXEN_PCI_CRBSPACE;
 			/* skipping cold reboot MAGIC */
 			/* skipping cold reboot MAGIC */
 			if (off == NETXEN_CAM_RAM(0x1fc))
 			if (off == NETXEN_CAM_RAM(0x1fc))
 				continue;
 				continue;
@@ -662,6 +930,7 @@ void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
 	int loops = 0;
 	int loops = 0;
 
 
 	if (!pegtune_val) {
 	if (!pegtune_val) {
+		val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
 		while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
 		while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
 			udelay(100);
 			udelay(100);
 			schedule();
 			schedule();

+ 0 - 666
drivers/net/oaknet.c

@@ -1,666 +0,0 @@
-/*
- *
- *    Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
- *
- *    Module name: oaknet.c
- *
- *    Description:
- *      Driver for the National Semiconductor DP83902AV Ethernet controller
- *      on-board the IBM PowerPC "Oak" evaluation board. Adapted from the
- *      various other 8390 drivers written by Donald Becker and Paul Gortmaker.
- *
- *      Additional inspiration from the "tcd8390.c" driver from TiVo, Inc.
- *      and "enetLib.c" from IBM.
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-
-#include <asm/board.h>
-#include <asm/io.h>
-
-#include "8390.h"
-
-
-/* Preprocessor Defines */
-
-#if !defined(TRUE) || TRUE != 1
-#define	TRUE	1
-#endif
-
-#if !defined(FALSE) || FALSE != 0
-#define	FALSE	0
-#endif
-
-#define	OAKNET_START_PG		0x20	/* First page of TX buffer */
-#define	OAKNET_STOP_PG		0x40	/* Last pagge +1 of RX ring */
-
-#define	OAKNET_WAIT		(2 * HZ / 100)	/* 20 ms */
-
-/* Experimenting with some fixes for a broken driver... */
-
-#define	OAKNET_DISINT
-#define	OAKNET_HEADCHECK
-#define	OAKNET_RWFIX
-
-
-/* Global Variables */
-
-static const char *name = "National DP83902AV";
-
-static struct net_device *oaknet_devs;
-
-
-/* Function Prototypes */
-
-static int	 oaknet_open(struct net_device *dev);
-static int	 oaknet_close(struct net_device *dev);
-
-static void	 oaknet_reset_8390(struct net_device *dev);
-static void	 oaknet_get_8390_hdr(struct net_device *dev,
-				     struct e8390_pkt_hdr *hdr, int ring_page);
-static void	 oaknet_block_input(struct net_device *dev, int count,
-				    struct sk_buff *skb, int ring_offset);
-static void	 oaknet_block_output(struct net_device *dev, int count,
-				     const unsigned char *buf, int start_page);
-
-static void	 oaknet_dma_error(struct net_device *dev, const char *name);
-
-
-/*
- * int oaknet_init()
- *
- * Description:
- *   This routine performs all the necessary platform-specific initiali-
- *   zation and set-up for the IBM "Oak" evaluation board's National
- *   Semiconductor DP83902AV "ST-NIC" Ethernet controller.
- *
- * Input(s):
- *   N/A
- *
- * Output(s):
- *   N/A
- *
- * Returns:
- *   0 if OK, otherwise system error number on error.
- *
- */
-static int __init oaknet_init(void)
-{
-	register int i;
-	int reg0, regd;
-	int ret = -ENOMEM;
-	struct net_device *dev;
-#if 0
-	unsigned long ioaddr = OAKNET_IO_BASE;
-#else
-	unsigned long ioaddr = ioremap(OAKNET_IO_BASE, OAKNET_IO_SIZE);
-#endif
-	bd_t *bip = (bd_t *)__res;
-
-	if (!ioaddr)
-		return -ENOMEM;
-
-	dev = alloc_ei_netdev();
-	if (!dev)
-		goto out_unmap;
-
-	ret = -EBUSY;
-	if (!request_region(OAKNET_IO_BASE, OAKNET_IO_SIZE, name))
-		goto out_dev;
-
-	/* Quick register check to see if the device is really there. */
-
-	ret = -ENODEV;
-	if ((reg0 = ei_ibp(ioaddr)) == 0xFF)
-		goto out_region;
-
-	/*
-	 * That worked. Now a more thorough check, using the multicast
-	 * address registers, that the device is definitely out there
-	 * and semi-functional.
-	 */
-
-	ei_obp(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
-	regd = ei_ibp(ioaddr + 0x0D);
-	ei_obp(0xFF, ioaddr + 0x0D);
-	ei_obp(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
-	ei_ibp(ioaddr + EN0_COUNTER0);
-
-	/* It's no good. Fix things back up and leave. */
-
-	ret = -ENODEV;
-	if (ei_ibp(ioaddr + EN0_COUNTER0) != 0) {
-		ei_obp(reg0, ioaddr);
-		ei_obp(regd, ioaddr + 0x0D);
-		goto out_region;
-	}
-
-	SET_MODULE_OWNER(dev);
-
-	/*
-	 * This controller is on an embedded board, so the base address
-	 * and interrupt assignments are pre-assigned and unchageable.
-	 */
-
-	dev->base_addr = ioaddr;
-	dev->irq = OAKNET_INT;
-
-	/*
-	 * Disable all chip interrupts for now and ACK all pending
-	 * interrupts.
-	 */
-
-	ei_obp(0x0, ioaddr + EN0_IMR);
-	ei_obp(0xFF, ioaddr + EN0_ISR);
-
-	/* Attempt to get the interrupt line */
-
-	ret = -EAGAIN;
-	if (request_irq(dev->irq, ei_interrupt, 0, name, dev)) {
-		printk("%s: unable to request interrupt %d.\n",
-		       name, dev->irq);
-		goto out_region;
-	}
-
-	/* Tell the world about what and where we've found. */
-
-	printk("%s: %s at", dev->name, name);
-	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
-		dev->dev_addr[i] = bip->bi_enetaddr[i];
-		printk("%c%.2x", (i ? ':' : ' '), dev->dev_addr[i]);
-	}
-	printk(", found at %#lx, using IRQ %d.\n", dev->base_addr, dev->irq);
-
-	/* Set up some required driver fields and then we're done. */
-
-	ei_status.name		= name;
-	ei_status.word16	= FALSE;
-	ei_status.tx_start_page	= OAKNET_START_PG;
-	ei_status.rx_start_page = OAKNET_START_PG + TX_PAGES;
-	ei_status.stop_page	= OAKNET_STOP_PG;
-
-	ei_status.reset_8390	= &oaknet_reset_8390;
-	ei_status.block_input	= &oaknet_block_input;
-	ei_status.block_output	= &oaknet_block_output;
-	ei_status.get_8390_hdr	= &oaknet_get_8390_hdr;
-
-	dev->open = oaknet_open;
-	dev->stop = oaknet_close;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = ei_poll;
-#endif
-
-	NS8390_init(dev, FALSE);
-	ret = register_netdev(dev);
-	if (ret)
-		goto out_irq;
-
-	oaknet_devs = dev;
-	return 0;
-
-out_irq;
-	free_irq(dev->irq, dev);
-out_region:
-	release_region(OAKNET_IO_BASE, OAKNET_IO_SIZE);
-out_dev:
-	free_netdev(dev);
-out_unmap:
-	iounmap(ioaddr);
-	return ret;
-}
-
-/*
- * static int oaknet_open()
- *
- * Description:
- *   This routine is a modest wrapper around ei_open, the 8390-generic,
- *   driver open routine. This just increments the module usage count
- *   and passes along the status from ei_open.
- *
- * Input(s):
- *  *dev - Pointer to the device structure for this driver.
- *
- * Output(s):
- *  *dev - Pointer to the device structure for this driver, potentially
- *         modified by ei_open.
- *
- * Returns:
- *   0 if OK, otherwise < 0 on error.
- *
- */
-static int
-oaknet_open(struct net_device *dev)
-{
-	int status = ei_open(dev);
-	return (status);
-}
-
-/*
- * static int oaknet_close()
- *
- * Description:
- *   This routine is a modest wrapper around ei_close, the 8390-generic,
- *   driver close routine. This just decrements the module usage count
- *   and passes along the status from ei_close.
- *
- * Input(s):
- *  *dev - Pointer to the device structure for this driver.
- *
- * Output(s):
- *  *dev - Pointer to the device structure for this driver, potentially
- *         modified by ei_close.
- *
- * Returns:
- *   0 if OK, otherwise < 0 on error.
- *
- */
-static int
-oaknet_close(struct net_device *dev)
-{
-	int status = ei_close(dev);
-	return (status);
-}
-
-/*
- * static void oaknet_reset_8390()
- *
- * Description:
- *   This routine resets the DP83902 chip.
- *
- * Input(s):
- *  *dev - Pointer to the device structure for this driver.
- *
- * Output(s):
- *   N/A
- *
- * Returns:
- *   N/A
- *
- */
-static void
-oaknet_reset_8390(struct net_device *dev)
-{
-	int base = E8390_BASE;
-
-	/*
-	 * We have no provision of reseting the controller as is done
-	 * in other drivers, such as "ne.c". However, the following
-	 * seems to work well enough in the TiVo driver.
-	 */
-
-	printk("Resetting %s...\n", dev->name);
-	ei_obp(E8390_STOP | E8390_NODMA | E8390_PAGE0, base + E8390_CMD);
-	ei_status.txing = 0;
-	ei_status.dmaing = 0;
-}
-
-/*
- * static void oaknet_get_8390_hdr()
- *
- * Description:
- *   This routine grabs the 8390-specific header. It's similar to the
- *   block input routine, but we don't need to be concerned with ring wrap
- *   as the header will be at the start of a page, so we optimize accordingly.
- *
- * Input(s):
- *  *dev       - Pointer to the device structure for this driver.
- *  *hdr       - Pointer to storage for the 8390-specific packet header.
- *   ring_page - ?
- *
- * Output(s):
- *  *hdr       - Pointer to the 8390-specific packet header for the just-
- *               received frame.
- *
- * Returns:
- *   N/A
- *
- */
-static void
-oaknet_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
-		    int ring_page)
-{
-	int base = dev->base_addr;
-
-	/*
-	 * This should NOT happen. If it does, it is the LAST thing you'll
-	 * see.
-	 */
-
-	if (ei_status.dmaing) {
-		oaknet_dma_error(dev, "oaknet_get_8390_hdr");
-		return;
-	}
-
-	ei_status.dmaing |= 0x01;
-	outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, base + OAKNET_CMD);
-	outb_p(sizeof(struct e8390_pkt_hdr), base + EN0_RCNTLO);
-	outb_p(0, base + EN0_RCNTHI);
-	outb_p(0, base + EN0_RSARLO);		/* On page boundary */
-	outb_p(ring_page, base + EN0_RSARHI);
-	outb_p(E8390_RREAD + E8390_START, base + OAKNET_CMD);
-
-	if (ei_status.word16)
-		insw(base + OAKNET_DATA, hdr,
-		     sizeof(struct e8390_pkt_hdr) >> 1);
-	else
-		insb(base + OAKNET_DATA, hdr,
-		     sizeof(struct e8390_pkt_hdr));
-
-	/* Byte-swap the packet byte count */
-
-	hdr->count = le16_to_cpu(hdr->count);
-
-	outb_p(ENISR_RDC, base + EN0_ISR);	/* ACK Remote DMA interrupt */
-	ei_status.dmaing &= ~0x01;
-}
-
-/*
- * XXX - Document me.
- */
-static void
-oaknet_block_input(struct net_device *dev, int count, struct sk_buff *skb,
-		   int ring_offset)
-{
-	int base = OAKNET_BASE;
-	char *buf = skb->data;
-
-	/*
-	 * This should NOT happen. If it does, it is the LAST thing you'll
-	 * see.
-	 */
-
-	if (ei_status.dmaing) {
-		oaknet_dma_error(dev, "oaknet_block_input");
-		return;
-	}
-
-#ifdef OAKNET_DISINT
-	save_flags(flags);
-	cli();
-#endif
-
-	ei_status.dmaing |= 0x01;
-	ei_obp(E8390_NODMA + E8390_PAGE0 + E8390_START, base + E8390_CMD);
-	ei_obp(count & 0xff, base + EN0_RCNTLO);
-	ei_obp(count >> 8, base + EN0_RCNTHI);
-	ei_obp(ring_offset & 0xff, base + EN0_RSARLO);
-	ei_obp(ring_offset >> 8, base + EN0_RSARHI);
-	ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
-	if (ei_status.word16) {
-		ei_isw(base + E8390_DATA, buf, count >> 1);
-		if (count & 0x01) {
-			buf[count - 1] = ei_ib(base + E8390_DATA);
-#ifdef OAKNET_HEADCHECK
-			bytes++;
-#endif
-		}
-	} else {
-		ei_isb(base + E8390_DATA, buf, count);
-	}
-#ifdef OAKNET_HEADCHECK
-	/*
-	 * This was for the ALPHA version only, but enough people have
-	 * been encountering problems so it is still here.  If you see
-	 * this message you either 1) have a slightly incompatible clone
-	 * or 2) have noise/speed problems with your bus.
-	 */
-
-	/* DMA termination address check... */
-	{
-		int addr, tries = 20;
-		do {
-			/* DON'T check for 'ei_ibp(EN0_ISR) & ENISR_RDC' here
-			   -- it's broken for Rx on some cards! */
-			int high = ei_ibp(base + EN0_RSARHI);
-			int low = ei_ibp(base + EN0_RSARLO);
-			addr = (high << 8) + low;
-			if (((ring_offset + bytes) & 0xff) == low)
-				break;
-		} while (--tries > 0);
-	 	if (tries <= 0)
-			printk("%s: RX transfer address mismatch,"
-			       "%#4.4x (expected) vs. %#4.4x (actual).\n",
-			       dev->name, ring_offset + bytes, addr);
-	}
-#endif
-	ei_obp(ENISR_RDC, base + EN0_ISR);	/* ACK Remote DMA interrupt */
-	ei_status.dmaing &= ~0x01;
-
-#ifdef OAKNET_DISINT
-	restore_flags(flags);
-#endif
-}
-
-/*
- * static void oaknet_block_output()
- *
- * Description:
- *   This routine...
- *
- * Input(s):
- *  *dev        - Pointer to the device structure for this driver.
- *   count      - Number of bytes to be transferred.
- *  *buf        -
- *   start_page -
- *
- * Output(s):
- *   N/A
- *
- * Returns:
- *   N/A
- *
- */
-static void
-oaknet_block_output(struct net_device *dev, int count,
-		    const unsigned char *buf, int start_page)
-{
-	int base = E8390_BASE;
-#if 0
-	int bug;
-#endif
-	unsigned long start;
-#ifdef OAKNET_DISINT
-	unsigned long flags;
-#endif
-#ifdef OAKNET_HEADCHECK
-	int retries = 0;
-#endif
-
-	/* Round the count up for word writes. */
-
-	if (ei_status.word16 && (count & 0x1))
-		count++;
-
-	/*
-	 * This should NOT happen. If it does, it is the LAST thing you'll
-	 * see.
-	 */
-
-	if (ei_status.dmaing) {
-		oaknet_dma_error(dev, "oaknet_block_output");
-		return;
-	}
-
-#ifdef OAKNET_DISINT
-	save_flags(flags);
-	cli();
-#endif
-
-	ei_status.dmaing |= 0x01;
-
-	/* Make sure we are in page 0. */
-
-	ei_obp(E8390_PAGE0 + E8390_START + E8390_NODMA, base + E8390_CMD);
-
-#ifdef OAKNET_HEADCHECK
-retry:
-#endif
-
-#if 0
-	/*
-	 * The 83902 documentation states that the processor needs to
-	 * do a "dummy read" before doing the remote write to work
-	 * around a chip bug they don't feel like fixing.
-	 */
-
-	bug = 0;
-	while (1) {
-		unsigned int rdhi;
-		unsigned int rdlo;
-
-		/* Now the normal output. */
-		ei_obp(ENISR_RDC, base + EN0_ISR);
-		ei_obp(count & 0xff, base + EN0_RCNTLO);
-		ei_obp(count >> 8,   base + EN0_RCNTHI);
-		ei_obp(0x00, base + EN0_RSARLO);
-		ei_obp(start_page, base + EN0_RSARHI);
-
-		if (bug++)
-			break;
-
-		/* Perform the dummy read */
-		rdhi = ei_ibp(base + EN0_CRDAHI);
-		rdlo = ei_ibp(base + EN0_CRDALO);
-		ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
-
-		while (1) {
-			unsigned int nrdhi;
-			unsigned int nrdlo;
-			nrdhi = ei_ibp(base + EN0_CRDAHI);
-			nrdlo = ei_ibp(base + EN0_CRDALO);
-			if ((rdhi != nrdhi) || (rdlo != nrdlo))
-				break;
-		}
-	}
-#else
-#ifdef OAKNET_RWFIX
-	/*
-	 * Handle the read-before-write bug the same way as the
-	 * Crynwr packet driver -- the Nat'l Semi. method doesn't work.
-	 * Actually this doesn't always work either, but if you have
-	 * problems with your 83902 this is better than nothing!
-	 */
-
-	ei_obp(0x42, base + EN0_RCNTLO);
-	ei_obp(0x00, base + EN0_RCNTHI);
-	ei_obp(0x42, base + EN0_RSARLO);
-	ei_obp(0x00, base + EN0_RSARHI);
-	ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
-	/* Make certain that the dummy read has occurred. */
-	udelay(6);
-#endif
-
-	ei_obp(ENISR_RDC, base + EN0_ISR);
-
-	/* Now the normal output. */
-	ei_obp(count & 0xff, base + EN0_RCNTLO);
-	ei_obp(count >> 8,   base + EN0_RCNTHI);
-	ei_obp(0x00, base + EN0_RSARLO);
-	ei_obp(start_page, base + EN0_RSARHI);
-#endif /* 0/1 */
-
-	ei_obp(E8390_RWRITE + E8390_START, base + E8390_CMD);
-	if (ei_status.word16) {
-		ei_osw(E8390_BASE + E8390_DATA, buf, count >> 1);
-	} else {
-		ei_osb(E8390_BASE + E8390_DATA, buf, count);
-	}
-
-#ifdef OAKNET_DISINT
-	restore_flags(flags);
-#endif
-
-	start = jiffies;
-
-#ifdef OAKNET_HEADCHECK
-	/*
-	 * This was for the ALPHA version only, but enough people have
-	 * been encountering problems so it is still here.
-	 */
-
-	{
-		/* DMA termination address check... */
-		int addr, tries = 20;
-		do {
-			int high = ei_ibp(base + EN0_RSARHI);
-			int low = ei_ibp(base + EN0_RSARLO);
-			addr = (high << 8) + low;
-			if ((start_page << 8) + count == addr)
-				break;
-		} while (--tries > 0);
-
-		if (tries <= 0) {
-			printk("%s: Tx packet transfer address mismatch,"
-			       "%#4.4x (expected) vs. %#4.4x (actual).\n",
-			       dev->name, (start_page << 8) + count, addr);
-			if (retries++ == 0)
-				goto retry;
-		}
-	}
-#endif
-
-	while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) {
-		if (time_after(jiffies, start + OAKNET_WAIT)) {
-			printk("%s: timeout waiting for Tx RDC.\n", dev->name);
-			oaknet_reset_8390(dev);
-			NS8390_init(dev, TRUE);
-			break;
-		}
-	}
-
-	ei_obp(ENISR_RDC, base + EN0_ISR);	/* Ack intr. */
-	ei_status.dmaing &= ~0x01;
-}
-
-/*
- * static void oaknet_dma_error()
- *
- * Description:
- *   This routine prints out a last-ditch informative message to the console
- *   indicating that a DMA error occurred. If you see this, it's the last
- *   thing you'll see.
- *
- * Input(s):
- *  *dev  - Pointer to the device structure for this driver.
- *  *name - Informative text (e.g. function name) indicating where the
- *          DMA error occurred.
- *
- * Output(s):
- *   N/A
- *
- * Returns:
- *   N/A
- *
- */
-static void
-oaknet_dma_error(struct net_device *dev, const char *name)
-{
-	printk(KERN_EMERG "%s: DMAing conflict in %s."
-	       "[DMAstat:%d][irqlock:%d][intr:%ld]\n",
-	       dev->name, name, ei_status.dmaing, ei_status.irqlock,
-	       dev->interrupt);
-}
-
-/*
- * Oak Ethernet module unload interface.
- */
-static void __exit oaknet_cleanup_module (void)
-{
-	/* Convert to loop once driver supports multiple devices. */
-	unregister_netdev(oaknet_dev);
-	free_irq(oaknet_devs->irq, oaknet_devs);
-	release_region(oaknet_devs->base_addr, OAKNET_IO_SIZE);
-	iounmap(ioaddr);
-	free_netdev(oaknet_devs);
-}
-
-module_init(oaknet_init);
-module_exit(oaknet_cleanup_module);
-MODULE_LICENSE("GPL");

+ 1019 - 0
drivers/net/pasemi_mac.c

@@ -0,0 +1,1019 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/checksum.h>
+
+#include "pasemi_mac.h"
+
+
+/* TODO list
+ *
+ * - Get rid of pci_{read,write}_config(), map registers with ioremap
+ *   for performance
+ * - PHY support
+ * - Multicast support
+ * - Large MTU support
+ * - Other performance improvements
+ */
+
+
+/* Must be a power of two */
+#define RX_RING_SIZE 512
+#define TX_RING_SIZE 512
+
+#define TX_DESC(mac, num)	((mac)->tx->desc[(num) & (TX_RING_SIZE-1)])
+#define TX_DESC_INFO(mac, num)	((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)])
+#define RX_DESC(mac, num)	((mac)->rx->desc[(num) & (RX_RING_SIZE-1)])
+#define RX_DESC_INFO(mac, num)	((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)])
+#define RX_BUFF(mac, num)	((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
+
+#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+
+/* XXXOJN these should come out of the device tree some day */
+#define PAS_DMA_CAP_BASE   0xe00d0040
+#define PAS_DMA_CAP_SIZE   0x100
+#define PAS_DMA_COM_BASE   0xe00d0100
+#define PAS_DMA_COM_SIZE   0x100
+
+static struct pasdma_status *dma_status;
+
+static int pasemi_get_mac_addr(struct pasemi_mac *mac)
+{
+	struct pci_dev *pdev = mac->pdev;
+	struct device_node *dn = pci_device_to_OF_node(pdev);
+	const u8 *maddr;
+	u8 addr[6];
+
+	if (!dn) {
+		dev_dbg(&pdev->dev,
+			  "No device node for mac, not configuring\n");
+		return -ENOENT;
+	}
+
+	maddr = get_property(dn, "mac-address", NULL);
+	if (maddr == NULL) {
+		dev_warn(&pdev->dev,
+			 "no mac address in device tree, not configuring\n");
+		return -ENOENT;
+	}
+
+	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
+		   &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+		dev_warn(&pdev->dev,
+			 "can't parse mac address, not configuring\n");
+		return -EINVAL;
+	}
+
+	memcpy(mac->mac_addr, addr, sizeof(addr));
+	return 0;
+}
+
+static int pasemi_mac_setup_rx_resources(struct net_device *dev)
+{
+	struct pasemi_mac_rxring *ring;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	int chan_id = mac->dma_rxch;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+	if (!ring)
+		goto out_ring;
+
+	spin_lock_init(&ring->lock);
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
+				  RX_RING_SIZE, GFP_KERNEL);
+
+	if (!ring->desc_info)
+		goto out_desc_info;
+
+	/* Allocate descriptors */
+	ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
+					RX_RING_SIZE *
+					sizeof(struct pas_dma_xct_descr),
+					&ring->dma, GFP_KERNEL);
+
+	if (!ring->desc)
+		goto out_desc;
+
+	memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
+
+	ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
+					   RX_RING_SIZE * sizeof(u64),
+					   &ring->buf_dma, GFP_KERNEL);
+	if (!ring->buffers)
+		goto out_buffers;
+
+	memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
+			       PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
+			       PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
+			       PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
+			       PAS_DMA_RXCHAN_CFG_HBU(1));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
+			       PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
+			       PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
+			       PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
+
+	ring->next_to_fill = 0;
+	ring->next_to_clean = 0;
+
+	snprintf(ring->irq_name, sizeof(ring->irq_name),
+		 "%s rx", dev->name);
+	mac->rx = ring;
+
+	return 0;
+
+out_buffers:
+	dma_free_coherent(&mac->dma_pdev->dev,
+			  RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
+			  mac->rx->desc, mac->rx->dma);
+out_desc:
+	kfree(ring->desc_info);
+out_desc_info:
+	kfree(ring);
+out_ring:
+	return -ENOMEM;
+}
+
+
+static int pasemi_mac_setup_tx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	u32 val;
+	int chan_id = mac->dma_txch;
+	struct pasemi_mac_txring *ring;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		goto out_ring;
+
+	spin_lock_init(&ring->lock);
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
+				  TX_RING_SIZE, GFP_KERNEL);
+	if (!ring->desc_info)
+		goto out_desc_info;
+
+	/* Allocate descriptors */
+	ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
+					TX_RING_SIZE *
+					sizeof(struct pas_dma_xct_descr),
+					&ring->dma, GFP_KERNEL);
+	if (!ring->desc)
+		goto out_desc;
+
+	memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
+			       PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
+	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
+	val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
+
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
+			       PAS_DMA_TXCHAN_CFG_TY_IFACE |
+			       PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
+			       PAS_DMA_TXCHAN_CFG_UP |
+			       PAS_DMA_TXCHAN_CFG_WT(2));
+
+	ring->next_to_use = 0;
+	ring->next_to_clean = 0;
+
+	snprintf(ring->irq_name, sizeof(ring->irq_name),
+		 "%s tx", dev->name);
+	mac->tx = ring;
+
+	return 0;
+
+out_desc:
+	kfree(ring->desc_info);
+out_desc_info:
+	kfree(ring);
+out_ring:
+	return -ENOMEM;
+}
+
+static void pasemi_mac_free_tx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		info = &TX_DESC_INFO(mac, i);
+		dp = &TX_DESC(mac, i);
+		if (info->dma) {
+			if (info->skb) {
+				pci_unmap_single(mac->dma_pdev,
+						 info->dma,
+						 info->skb->len,
+						 PCI_DMA_TODEVICE);
+				dev_kfree_skb_any(info->skb);
+			}
+			info->dma = 0;
+			info->skb = NULL;
+			dp->mactx = 0;
+			dp->ptr = 0;
+		}
+	}
+
+	dma_free_coherent(&mac->dma_pdev->dev,
+			  TX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
+			  mac->tx->desc, mac->tx->dma);
+
+	kfree(mac->tx->desc_info);
+	kfree(mac->tx);
+	mac->tx = NULL;
+}
+
+static void pasemi_mac_free_rx_resources(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		info = &RX_DESC_INFO(mac, i);
+		dp = &RX_DESC(mac, i);
+		if (info->dma) {
+			if (info->skb) {
+				pci_unmap_single(mac->dma_pdev,
+						 info->dma,
+						 info->skb->len,
+						 PCI_DMA_FROMDEVICE);
+				dev_kfree_skb_any(info->skb);
+			}
+			info->dma = 0;
+			info->skb = NULL;
+			dp->macrx = 0;
+			dp->ptr = 0;
+		}
+	}
+
+	dma_free_coherent(&mac->dma_pdev->dev,
+			  RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
+			  mac->rx->desc, mac->rx->dma);
+
+	dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
+			  mac->rx->buffers, mac->rx->buf_dma);
+
+	kfree(mac->rx->desc_info);
+	kfree(mac->rx);
+	mac->rx = NULL;
+}
+
+static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int i;
+	int start = mac->rx->next_to_fill;
+	unsigned int count;
+
+	count = (mac->rx->next_to_clean + RX_RING_SIZE -
+		 mac->rx->next_to_fill) & (RX_RING_SIZE - 1);
+
+	/* Check to see if we're doing first-time setup */
+	if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
+		count = RX_RING_SIZE;
+
+	if (count <= 0)
+		return;
+
+	for (i = start; i < start + count; i++) {
+		struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i);
+		u64 *buff = &RX_BUFF(mac, i);
+		struct sk_buff *skb;
+		dma_addr_t dma;
+
+		skb = dev_alloc_skb(BUF_SIZE);
+
+		if (!skb) {
+			count = i - start;
+			break;
+		}
+
+		skb->dev = dev;
+
+		dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
+				     PCI_DMA_FROMDEVICE);
+
+		if (dma_mapping_error(dma)) {
+			dev_kfree_skb_irq(info->skb);
+			count = i - start;
+			break;
+		}
+
+		info->skb = skb;
+		info->dma = dma;
+		*buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
+	}
+
+	wmb();
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
+			       count);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_INCR(mac->dma_if),
+			       count);
+
+	mac->rx->next_to_fill += count;
+}
+
+static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
+{
+	unsigned int i;
+	int start, count;
+
+	spin_lock(&mac->rx->lock);
+
+	start = mac->rx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < (start + RX_RING_SIZE) && count < limit; i++) {
+		struct pas_dma_xct_descr *dp;
+		struct pasemi_mac_buffer *info;
+		struct sk_buff *skb;
+		unsigned int j, len;
+		dma_addr_t dma;
+
+		rmb();
+
+		dp = &RX_DESC(mac, i);
+
+		if (!(dp->macrx & XCT_MACRX_O))
+			break;
+
+		count++;
+
+		info = NULL;
+
+		/* We have to scan for our skb since there's no way
+		 * to back-map them from the descriptor, and if we
+		 * have several receive channels then they might not
+		 * show up in the same order as they were put on the
+		 * interface ring.
+		 */
+
+		dma = (dp->ptr & XCT_PTR_ADDR_M);
+		for (j = start; j < (start + RX_RING_SIZE); j++) {
+			info = &RX_DESC_INFO(mac, j);
+			if (info->dma == dma)
+				break;
+		}
+
+		BUG_ON(!info);
+		BUG_ON(info->dma != dma);
+
+		pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len,
+				 PCI_DMA_FROMDEVICE);
+
+		skb = info->skb;
+
+		len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
+
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, mac->netdev);
+
+		if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
+			skb->ip_summed = CHECKSUM_COMPLETE;
+			skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >>
+					   XCT_MACRX_CSUM_S;
+		} else
+			skb->ip_summed = CHECKSUM_NONE;
+
+		mac->stats.rx_bytes += len;
+		mac->stats.rx_packets++;
+
+		netif_receive_skb(skb);
+
+		info->dma = 0;
+		info->skb = NULL;
+		dp->ptr = 0;
+		dp->macrx = 0;
+	}
+
+	mac->rx->next_to_clean += count;
+	pasemi_mac_replenish_rx_ring(mac->netdev);
+
+	spin_unlock(&mac->rx->lock);
+
+	return count;
+}
+
+static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
+{
+	int i;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+	int start, count;
+	int flags;
+
+	spin_lock_irqsave(&mac->tx->lock, flags);
+
+	start = mac->tx->next_to_clean;
+	count = 0;
+
+	for (i = start; i < mac->tx->next_to_use; i++) {
+		dp = &TX_DESC(mac, i);
+		if (!dp || (dp->mactx & XCT_MACTX_O))
+			break;
+
+		count++;
+
+		info = &TX_DESC_INFO(mac, i);
+
+		pci_unmap_single(mac->dma_pdev, info->dma,
+				 info->skb->len, PCI_DMA_TODEVICE);
+		dev_kfree_skb_irq(info->skb);
+
+		info->skb = NULL;
+		info->dma = 0;
+		dp->mactx = 0;
+		dp->ptr = 0;
+	}
+	mac->tx->next_to_clean += count;
+	spin_unlock_irqrestore(&mac->tx->lock, flags);
+
+	return count;
+}
+
+
+static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+
+	if (!(*mac->rx_status & PAS_STATUS_INT))
+		return IRQ_NONE;
+
+	netif_rx_schedule(dev);
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
+
+	reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
+	      PAS_IOB_DMA_RXCH_RESET_DINTC;
+	if (*mac->rx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev,
+			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
+
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int reg;
+	int was_full;
+
+	was_full = mac->tx->next_to_clean - mac->tx->next_to_use == TX_RING_SIZE;
+
+	if (!(*mac->tx_status & PAS_STATUS_INT))
+		return IRQ_NONE;
+
+	pasemi_mac_clean_tx(mac);
+
+	reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
+	if (*mac->tx_status & PAS_STATUS_TIMER)
+		reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
+			       reg);
+
+	if (was_full)
+		netif_wake_queue(dev);
+
+	return IRQ_HANDLED;
+}
+
+static int pasemi_mac_open(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+	int ret;
+
+	/* enable rx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
+			       PAS_DMA_COM_RXCMD_EN);
+
+	/* enable tx section */
+	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
+			       PAS_DMA_COM_TXCMD_EN);
+
+	flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
+		PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
+		PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
+
+	flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
+		PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
+
+	flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
+			       PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
+
+	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+
+	ret = pasemi_mac_setup_rx_resources(dev);
+	if (ret)
+		goto out_rx_resources;
+
+	ret = pasemi_mac_setup_tx_resources(dev);
+	if (ret)
+		goto out_tx_resources;
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
+			       PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
+			       PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
+
+	/* enable rx if */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+			       PAS_DMA_RXINT_RCMDSTA_EN);
+
+	/* enable rx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+			       PAS_DMA_RXCHAN_CCMDSTA_EN |
+			       PAS_DMA_RXCHAN_CCMDSTA_DU);
+
+	/* enable tx channel */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_EN);
+
+	pasemi_mac_replenish_rx_ring(dev);
+
+	netif_start_queue(dev);
+	netif_poll_enable(dev);
+
+	ret = request_irq(mac->dma_pdev->irq + mac->dma_txch,
+			  &pasemi_mac_tx_intr, IRQF_DISABLED,
+			  mac->tx->irq_name, dev);
+	if (ret) {
+		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + mac->dma_txch, ret);
+		goto out_tx_int;
+	}
+
+	ret = request_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch,
+			  &pasemi_mac_rx_intr, IRQF_DISABLED,
+			  mac->rx->irq_name, dev);
+	if (ret) {
+		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+		       mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);
+		goto out_rx_int;
+	}
+
+	return 0;
+
+out_rx_int:
+	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
+out_tx_int:
+	netif_poll_disable(dev);
+	netif_stop_queue(dev);
+	pasemi_mac_free_tx_resources(dev);
+out_tx_resources:
+	pasemi_mac_free_rx_resources(dev);
+out_rx_resources:
+
+	return ret;
+}
+
+#define MAX_RETRIES 5000
+
+static int pasemi_mac_close(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int stat;
+	int retries;
+
+	netif_stop_queue(dev);
+
+	/* Clean out any pending buffers */
+	pasemi_mac_clean_tx(mac);
+	pasemi_mac_clean_rx(mac, RX_RING_SIZE);
+
+	/* Disable interface */
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+			       PAS_DMA_TXCHAN_TCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+		      PAS_DMA_RXINT_RCMDSTA_ST);
+	pci_write_config_dword(mac->dma_pdev,
+		      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+		      PAS_DMA_RXCHAN_CCMDSTA_ST);
+
+	for (retries = 0; retries < MAX_RETRIES; retries++) {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
+				      &stat);
+		if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
+			break;
+		cond_resched();
+	}
+
+	if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
+		dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
+	}
+
+	for (retries = 0; retries < MAX_RETRIES; retries++) {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
+				      &stat);
+		if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)
+			break;
+		cond_resched();
+	}
+
+	if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
+		dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
+	}
+
+	for (retries = 0; retries < MAX_RETRIES; retries++) {
+		pci_read_config_dword(mac->dma_pdev,
+				      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+				      &stat);
+		if (stat & PAS_DMA_RXINT_RCMDSTA_ACT)
+			break;
+		cond_resched();
+	}
+
+	if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT)) {
+		dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
+	}
+
+	/* Then, disable the channel. This must be done separately from
+	 * stopping, since you can't disable when active.
+	 */
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+
+	free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
+	free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);
+
+	/* Free resources */
+	pasemi_mac_free_rx_resources(dev);
+	pasemi_mac_free_tx_resources(dev);
+
+	return 0;
+}
+
+static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	struct pasemi_mac_txring *txring;
+	struct pasemi_mac_buffer *info;
+	struct pas_dma_xct_descr *dp;
+	u64 dflags;
+	dma_addr_t map;
+	int flags;
+
+	dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		switch (skb->nh.iph->protocol) {
+		case IPPROTO_TCP:
+			dflags |= XCT_MACTX_CSUM_TCP;
+			dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		case IPPROTO_UDP:
+			dflags |= XCT_MACTX_CSUM_UDP;
+			dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
+			dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+			break;
+		}
+	}
+
+	map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+
+	if (dma_mapping_error(map))
+		return NETDEV_TX_BUSY;
+
+	txring = mac->tx;
+
+	spin_lock_irqsave(&txring->lock, flags);
+
+	if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) {
+		spin_unlock_irqrestore(&txring->lock, flags);
+		pasemi_mac_clean_tx(mac);
+		spin_lock_irqsave(&txring->lock, flags);
+
+		if (txring->next_to_clean - txring->next_to_use ==
+		    TX_RING_SIZE) {
+			/* Still no room -- stop the queue and wait for tx
+			 * intr when there's room.
+			 */
+			netif_stop_queue(dev);
+			goto out_err;
+		}
+	}
+
+
+	dp = &TX_DESC(mac, txring->next_to_use);
+	info = &TX_DESC_INFO(mac, txring->next_to_use);
+
+	dp->mactx = dflags | XCT_MACTX_LLEN(skb->len);
+	dp->ptr   = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
+	info->dma = map;
+	info->skb = skb;
+
+	txring->next_to_use++;
+	mac->stats.tx_packets++;
+	mac->stats.tx_bytes += skb->len;
+
+	spin_unlock_irqrestore(&txring->lock, flags);
+
+	pci_write_config_dword(mac->dma_pdev,
+			       PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
+
+	return NETDEV_TX_OK;
+
+out_err:
+	spin_unlock_irqrestore(&txring->lock, flags);
+	pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE);
+	return NETDEV_TX_BUSY;
+}
+
+static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	return &mac->stats;
+}
+
+static void pasemi_mac_set_rx_mode(struct net_device *dev)
+{
+	struct pasemi_mac *mac = netdev_priv(dev);
+	unsigned int flags;
+
+	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
+
+	/* Set promiscuous */
+	if (dev->flags & IFF_PROMISC)
+		flags |= PAS_MAC_CFG_PCFG_PR;
+	else
+		flags &= ~PAS_MAC_CFG_PCFG_PR;
+
+	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
+}
+
+
+static int pasemi_mac_poll(struct net_device *dev, int *budget)
+{
+	int pkts, limit = min(*budget, dev->quota);
+	struct pasemi_mac *mac = netdev_priv(dev);
+
+	pkts = pasemi_mac_clean_rx(mac, limit);
+
+	if (pkts < limit) {
+		/* all done, no more packets present */
+		netif_rx_complete(dev);
+
+		/* re-enable receive interrupts */
+		pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
+				       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
+		return 0;
+	} else {
+		/* used up our quantum, so reschedule */
+		dev->quota -= pkts;
+		*budget -= pkts;
+		return 1;
+	}
+}
+
+static int __devinit
+pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int index = 0;
+	struct net_device *dev;
+	struct pasemi_mac *mac;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	dev = alloc_etherdev(sizeof(struct pasemi_mac));
+	if (dev == NULL) {
+		dev_err(&pdev->dev,
+			"pasemi_mac: Could not allocate ethernet device.\n");
+		err = -ENOMEM;
+		goto out_disable_device;
+	}
+
+	SET_MODULE_OWNER(dev);
+	pci_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	mac = netdev_priv(dev);
+
+	mac->pdev = pdev;
+	mac->netdev = dev;
+	mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
+
+	if (!mac->dma_pdev) {
+		dev_err(&pdev->dev, "Can't find DMA Controller\n");
+		err = -ENODEV;
+		goto out_free_netdev;
+	}
+
+	mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
+
+	if (!mac->iob_pdev) {
+		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
+		err = -ENODEV;
+		goto out_put_dma_pdev;
+	}
+
+	/* These should come out of the device tree eventually */
+	mac->dma_txch = index;
+	mac->dma_rxch = index;
+
+	/* We probe GMAC before XAUI, but the DMA interfaces are
+	 * in XAUI, GMAC order.
+	 */
+	if (index < 4)
+		mac->dma_if = index + 2;
+	else
+		mac->dma_if = index - 4;
+	index++;
+
+	switch (pdev->device) {
+	case 0xa005:
+		mac->type = MAC_TYPE_GMAC;
+		break;
+	case 0xa006:
+		mac->type = MAC_TYPE_XAUI;
+		break;
+	default:
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* get mac addr from device tree */
+	if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
+		err = -ENODEV;
+		goto out;
+	}
+	memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+
+	dev->open = pasemi_mac_open;
+	dev->stop = pasemi_mac_close;
+	dev->hard_start_xmit = pasemi_mac_start_tx;
+	dev->get_stats = pasemi_mac_get_stats;
+	dev->set_multicast_list = pasemi_mac_set_rx_mode;
+	dev->weight = 64;
+	dev->poll = pasemi_mac_poll;
+	dev->features = NETIF_F_HW_CSUM;
+
+	/* The dma status structure is located in the I/O bridge, and
+	 * is cache coherent.
+	 */
+	if (!dma_status)
+		/* XXXOJN This should come from the device tree */
+		dma_status = __ioremap(0xfd800000, 0x1000, 0);
+
+	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
+	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
+
+	err = register_netdev(dev);
+
+	if (err) {
+		dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
+			err);
+		goto out;
+	} else
+		printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
+		       "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
+		       mac->dma_if, mac->dma_txch, mac->dma_rxch,
+		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+	return err;
+
+out:
+	pci_dev_put(mac->iob_pdev);
+out_put_dma_pdev:
+	pci_dev_put(mac->dma_pdev);
+out_free_netdev:
+	free_netdev(dev);
+out_disable_device:
+	pci_disable_device(pdev);
+	return err;
+
+}
+
+static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct pasemi_mac *mac;
+
+	if (!netdev)
+		return;
+
+	mac = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	pci_disable_device(pdev);
+	pci_dev_put(mac->dma_pdev);
+	pci_dev_put(mac->iob_pdev);
+
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+}
+
+static struct pci_device_id pasemi_mac_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
+
+static struct pci_driver pasemi_mac_driver = {
+	.name		= "pasemi_mac",
+	.id_table	= pasemi_mac_pci_tbl,
+	.probe		= pasemi_mac_probe,
+	.remove		= __devexit_p(pasemi_mac_remove),
+};
+
+static void __exit pasemi_mac_cleanup_module(void)
+{
+	pci_unregister_driver(&pasemi_mac_driver);
+	__iounmap(dma_status);
+	dma_status = NULL;
+}
+
+int pasemi_mac_init_module(void)
+{
+	return pci_register_driver(&pasemi_mac_driver);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
+MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
+
+module_init(pasemi_mac_init_module);
+module_exit(pasemi_mac_cleanup_module);

+ 460 - 0
drivers/net/pasemi_mac.h

@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2006 PA Semi, Inc
+ *
+ * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and
+ * hardware register layouts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef PASEMI_MAC_H
+#define PASEMI_MAC_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+struct pasemi_mac_txring {
+	spinlock_t	 lock;
+	struct pas_dma_xct_descr	*desc;
+	dma_addr_t	 dma;
+	unsigned int	 size;
+	unsigned int	 next_to_use;
+	unsigned int	 next_to_clean;
+	struct pasemi_mac_buffer *desc_info;
+	char		 irq_name[10];  /* "eth%d tx" */
+};
+
+struct pasemi_mac_rxring {
+	spinlock_t	 lock;
+	struct pas_dma_xct_descr	*desc;	/* RX channel descriptor ring */
+	dma_addr_t	 dma;
+	u64		*buffers;	/* RX interface buffer ring */
+	dma_addr_t	 buf_dma;
+	unsigned int	 size;
+	unsigned int	 next_to_fill;
+	unsigned int	 next_to_clean;
+	struct pasemi_mac_buffer *desc_info;
+	char		 irq_name[10];  /* "eth%d rx" */
+};
+
+struct pasemi_mac {
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct pci_dev *dma_pdev;
+	struct pci_dev *iob_pdev;
+	struct net_device_stats stats;
+
+	/* Pointer to the cacheable per-channel status registers */
+	u64	*rx_status;
+	u64	*tx_status;
+
+	u8		type;
+#define MAC_TYPE_GMAC	1
+#define MAC_TYPE_XAUI	2
+	u32	dma_txch;
+	u32	dma_if;
+	u32	dma_rxch;
+
+	u8		mac_addr[6];
+
+	struct timer_list	rxtimer;
+
+	struct pasemi_mac_txring *tx;
+	struct pasemi_mac_rxring *rx;
+};
+
+/* Software status descriptor (desc_info) */
+struct pasemi_mac_buffer {
+	struct sk_buff *skb;
+	dma_addr_t	dma;
+};
+
+
+/* status register layout in IOB region, at 0xfb800000 */
+struct pasdma_status {
+	u64 rx_sta[64];
+	u64 tx_sta[20];
+};
+
+/* descriptor structure */
+struct pas_dma_xct_descr {
+	union {
+		u64	mactx;
+		u64	macrx;
+	};
+	union {
+		u64	ptr;
+		u64	rxb;
+	};
+};
+
+/* MAC CFG register offsets */
+
+enum {
+	PAS_MAC_CFG_PCFG = 0x80,
+	PAS_MAC_CFG_TXP = 0x98,
+	PAS_MAC_IPC_CHNL = 0x208,
+};
+
+/* MAC CFG register fields */
+#define PAS_MAC_CFG_PCFG_PE		0x80000000
+#define PAS_MAC_CFG_PCFG_CE		0x40000000
+#define PAS_MAC_CFG_PCFG_BU		0x20000000
+#define PAS_MAC_CFG_PCFG_TT		0x10000000
+#define PAS_MAC_CFG_PCFG_TSR_M		0x0c000000
+#define PAS_MAC_CFG_PCFG_TSR_10M	0x00000000
+#define PAS_MAC_CFG_PCFG_TSR_100M	0x04000000
+#define PAS_MAC_CFG_PCFG_TSR_1G		0x08000000
+#define PAS_MAC_CFG_PCFG_TSR_10G	0x0c000000
+#define PAS_MAC_CFG_PCFG_T24		0x02000000
+#define PAS_MAC_CFG_PCFG_PR		0x01000000
+#define PAS_MAC_CFG_PCFG_CRO_M		0x00ff0000
+#define PAS_MAC_CFG_PCFG_CRO_S	16
+#define PAS_MAC_CFG_PCFG_IPO_M		0x0000ff00
+#define PAS_MAC_CFG_PCFG_IPO_S	8
+#define PAS_MAC_CFG_PCFG_S1		0x00000080
+#define PAS_MAC_CFG_PCFG_IO_M		0x00000060
+#define PAS_MAC_CFG_PCFG_IO_MAC		0x00000000
+#define PAS_MAC_CFG_PCFG_IO_OFF		0x00000020
+#define PAS_MAC_CFG_PCFG_IO_IND_ETH	0x00000040
+#define PAS_MAC_CFG_PCFG_IO_IND_IP	0x00000060
+#define PAS_MAC_CFG_PCFG_LP		0x00000010
+#define PAS_MAC_CFG_PCFG_TS		0x00000008
+#define PAS_MAC_CFG_PCFG_HD		0x00000004
+#define PAS_MAC_CFG_PCFG_SPD_M		0x00000003
+#define PAS_MAC_CFG_PCFG_SPD_10M	0x00000000
+#define PAS_MAC_CFG_PCFG_SPD_100M	0x00000001
+#define PAS_MAC_CFG_PCFG_SPD_1G		0x00000002
+#define PAS_MAC_CFG_PCFG_SPD_10G	0x00000003
+#define PAS_MAC_CFG_TXP_FCF		0x01000000
+#define PAS_MAC_CFG_TXP_FCE		0x00800000
+#define PAS_MAC_CFG_TXP_FC		0x00400000
+#define PAS_MAC_CFG_TXP_FPC_M		0x00300000
+#define PAS_MAC_CFG_TXP_FPC_S		20
+#define PAS_MAC_CFG_TXP_FPC(x)		(((x) << PAS_MAC_CFG_TXP_FPC_S) & \
+					 PAS_MAC_CFG_TXP_FPC_M)
+#define PAS_MAC_CFG_TXP_RT		0x00080000
+#define PAS_MAC_CFG_TXP_BL		0x00040000
+#define PAS_MAC_CFG_TXP_SL_M		0x00030000
+#define PAS_MAC_CFG_TXP_SL_S		16
+#define PAS_MAC_CFG_TXP_SL(x)		(((x) << PAS_MAC_CFG_TXP_SL_S) & \
+					 PAS_MAC_CFG_TXP_SL_M)
+#define PAS_MAC_CFG_TXP_COB_M		0x0000f000
+#define PAS_MAC_CFG_TXP_COB_S		12
+#define PAS_MAC_CFG_TXP_COB(x)		(((x) << PAS_MAC_CFG_TXP_COB_S) & \
+					 PAS_MAC_CFG_TXP_COB_M)
+#define PAS_MAC_CFG_TXP_TIFT_M		0x00000f00
+#define PAS_MAC_CFG_TXP_TIFT_S		8
+#define PAS_MAC_CFG_TXP_TIFT(x)		(((x) << PAS_MAC_CFG_TXP_TIFT_S) & \
+					 PAS_MAC_CFG_TXP_TIFT_M)
+#define PAS_MAC_CFG_TXP_TIFG_M		0x000000ff
+#define PAS_MAC_CFG_TXP_TIFG_S		0
+#define PAS_MAC_CFG_TXP_TIFG(x)		(((x) << PAS_MAC_CFG_TXP_TIFG_S) & \
+					 PAS_MAC_CFG_TXP_TIFG_M)
+
+#define PAS_MAC_IPC_CHNL_DCHNO_M	0x003f0000
+#define PAS_MAC_IPC_CHNL_DCHNO_S	16
+#define PAS_MAC_IPC_CHNL_DCHNO(x)	(((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \
+					 PAS_MAC_IPC_CHNL_DCHNO_M)
+#define PAS_MAC_IPC_CHNL_BCH_M		0x0000003f
+#define PAS_MAC_IPC_CHNL_BCH_S		0
+#define PAS_MAC_IPC_CHNL_BCH(x)		(((x) << PAS_MAC_IPC_CHNL_BCH_S) & \
+					 PAS_MAC_IPC_CHNL_BCH_M)
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+enum {
+	PAS_DMA_COM_TXCMD = 0x100,	/* Transmit Command Register  */
+	PAS_DMA_COM_TXSTA = 0x104,	/* Transmit Status Register   */
+	PAS_DMA_COM_RXCMD = 0x108,	/* Receive Command Register   */
+	PAS_DMA_COM_RXSTA = 0x10c,	/* Receive Status Register    */
+};
+#define PAS_DMA_COM_TXCMD_EN	0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA_ACT	0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD_EN	0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA_ACT	0x00000001 /* active */
+
+
+/* Per-interface and per-channel registers */
+#define _PAS_DMA_RXINT_STRIDE		0x20
+#define PAS_DMA_RXINT_RCMDSTA(i)	(0x200+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_RCMDSTA_EN	0x00000001
+#define    PAS_DMA_RXINT_RCMDSTA_ST	0x00000002
+#define    PAS_DMA_RXINT_RCMDSTA_OO	0x00000100
+#define    PAS_DMA_RXINT_RCMDSTA_BP	0x00000200
+#define    PAS_DMA_RXINT_RCMDSTA_DR	0x00000400
+#define    PAS_DMA_RXINT_RCMDSTA_BT	0x00000800
+#define    PAS_DMA_RXINT_RCMDSTA_TB	0x00001000
+#define    PAS_DMA_RXINT_RCMDSTA_ACT	0x00010000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_M	0xfffe0000
+#define    PAS_DMA_RXINT_RCMDSTA_DROPS_S	17
+#define PAS_DMA_RXINT_INCR(i)		(0x210+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_INCR_INCR_M	0x0000ffff
+#define    PAS_DMA_RXINT_INCR_INCR_S	0
+#define    PAS_DMA_RXINT_INCR_INCR(x)	((x) & 0x0000ffff)
+#define PAS_DMA_RXINT_BASEL(i)		(0x218+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEL_BRBL(x)	((x) & ~0x3f)
+#define PAS_DMA_RXINT_BASEU(i)		(0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
+#define    PAS_DMA_RXINT_BASEU_BRBH(x)	((x) & 0xfff)
+#define    PAS_DMA_RXINT_BASEU_SIZ_M	0x3fff0000	/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXINT_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXINT_BASEU_SIZ(x)	(((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
+					 PAS_DMA_RXINT_BASEU_SIZ_M)
+
+
+#define _PAS_DMA_TXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_TXCHAN_TCMDSTA	0x300	/* Command / Status		*/
+#define _PAS_DMA_TXCHAN_CFG	0x304	/* Configuration		*/
+#define _PAS_DMA_TXCHAN_DSCRBU	0x308	/* Descriptor BU Allocation	*/
+#define _PAS_DMA_TXCHAN_INCR	0x310	/* Descriptor increment		*/
+#define _PAS_DMA_TXCHAN_CNT	0x314	/* Descriptor count/offset	*/
+#define _PAS_DMA_TXCHAN_BASEL	0x318	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_TXCHAN_BASEU	0x31c	/*			(high)	*/
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_TCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ACT	0x00010000	/* Active */
+#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_CFG_TY_IFACE	0x00000000	/* Type = interface */
+#define    PAS_DMA_TXCHAN_CFG_TATTR_M	0x0000003c
+#define    PAS_DMA_TXCHAN_CFG_TATTR_S	2
+#define    PAS_DMA_TXCHAN_CFG_TATTR(x)	(((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+					 PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define    PAS_DMA_TXCHAN_CFG_WT_M	0x000001c0
+#define    PAS_DMA_TXCHAN_CFG_WT_S	6
+#define    PAS_DMA_TXCHAN_CFG_WT(x)	(((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+					 PAS_DMA_TXCHAN_CFG_WT_M)
+#define    PAS_DMA_TXCHAN_CFG_CF	0x00001000	/* Clean first line */
+#define    PAS_DMA_TXCHAN_CFG_CL	0x00002000	/* Clean last line */
+#define    PAS_DMA_TXCHAN_CFG_UP	0x00004000	/* update tx descr when sent */
+#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define _PAS_DMA_RXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_RXCHAN_CCMDSTA	0x800	/* Command / Status		*/
+#define _PAS_DMA_RXCHAN_CFG	0x804	/* Configuration		*/
+#define _PAS_DMA_RXCHAN_INCR	0x810	/* Descriptor increment		*/
+#define _PAS_DMA_RXCHAN_CNT	0x814	/* Descriptor count/offset	*/
+#define _PAS_DMA_RXCHAN_BASEL	0x818	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_RXCHAN_BASEU	0x81c	/*			(high)	*/
+#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_RXCHAN_CCMDSTA_ACT	0x00010000	/* Active */
+#define    PAS_DMA_RXCHAN_CCMDSTA_DU	0x00020000
+#define PAS_DMA_RXCHAN_CFG(c)     (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_CFG_HBU_M	0x00000380
+#define    PAS_DMA_RXCHAN_CFG_HBU_S	7
+#define    PAS_DMA_RXCHAN_CFG_HBU(x)	(((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
+					 PAS_DMA_RXCHAN_CFG_HBU_M)
+#define PAS_DMA_RXCHAN_INCR(c)    (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define PAS_DMA_RXCHAN_BASEL(c)   (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_RXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_RXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_RXCHAN_BASEU(c)   (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_RXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_RXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_RXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_RXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_RXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_RXCHAN_BASEU_SIZ_M)
+
+#define    PAS_STATUS_PCNT_M		0x000000000000ffffull
+#define    PAS_STATUS_PCNT_S		0
+#define    PAS_STATUS_DCNT_M		0x00000000ffff0000ull
+#define    PAS_STATUS_DCNT_S		16
+#define    PAS_STATUS_BPCNT_M		0x0000ffff00000000ull
+#define    PAS_STATUS_BPCNT_S		32
+#define    PAS_STATUS_TIMER		0x1000000000000000ull
+#define    PAS_STATUS_ERROR		0x2000000000000000ull
+#define    PAS_STATUS_SOFT		0x4000000000000000ull
+#define    PAS_STATUS_INT		0x8000000000000000ull
+
+#define PAS_IOB_DMA_RXCH_CFG(i)		(0x1100 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i)		(0x1200 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i)	(0x1300 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i)	(0x1400 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i)	(0x1500 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_RXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_RXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_RXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_RXCH_RESET_PINTC		0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i)	(0x1600 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S	0
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_TXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_TXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_TXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_TXCH_RESET_PINTC		0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG		0x1700
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M	0x00ffffff
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S	0
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)	(((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+						 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+/* Transmit descriptor fields */
+#define	XCT_MACTX_T		0x8000000000000000ull
+#define	XCT_MACTX_ST		0x4000000000000000ull
+#define XCT_MACTX_NORES		0x0000000000000000ull
+#define XCT_MACTX_8BRES		0x1000000000000000ull
+#define XCT_MACTX_24BRES	0x2000000000000000ull
+#define XCT_MACTX_40BRES	0x3000000000000000ull
+#define XCT_MACTX_I		0x0800000000000000ull
+#define XCT_MACTX_O		0x0400000000000000ull
+#define XCT_MACTX_E		0x0200000000000000ull
+#define XCT_MACTX_VLAN_M	0x0180000000000000ull
+#define XCT_MACTX_VLAN_NOP	0x0000000000000000ull
+#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000ull
+#define XCT_MACTX_VLAN_INSERT   0x0100000000000000ull
+#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000ull
+#define XCT_MACTX_CRC_M		0x0060000000000000ull
+#define XCT_MACTX_CRC_NOP	0x0000000000000000ull
+#define XCT_MACTX_CRC_INSERT	0x0020000000000000ull
+#define XCT_MACTX_CRC_PAD	0x0040000000000000ull
+#define XCT_MACTX_CRC_REPLACE	0x0060000000000000ull
+#define XCT_MACTX_SS		0x0010000000000000ull
+#define XCT_MACTX_LLEN_M	0x00007fff00000000ull
+#define XCT_MACTX_LLEN_S	32ull
+#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & \
+				 XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M		0x00000000f8000000ull
+#define XCT_MACTX_IPH_S		27ull
+#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & \
+				 XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M		0x0000000007c00000ull
+#define XCT_MACTX_IPO_S		22ull
+#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & \
+				 XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M	0x0000000000000060ull
+#define XCT_MACTX_CSUM_NOP	0x0000000000000000ull
+#define XCT_MACTX_CSUM_TCP	0x0000000000000040ull
+#define XCT_MACTX_CSUM_UDP	0x0000000000000060ull
+#define XCT_MACTX_V6		0x0000000000000010ull
+#define XCT_MACTX_C		0x0000000000000004ull
+#define XCT_MACTX_AL2		0x0000000000000002ull
+
+/* Receive descriptor fields */
+#define	XCT_MACRX_T		0x8000000000000000ull
+#define	XCT_MACRX_ST		0x4000000000000000ull
+#define XCT_MACRX_NORES		0x0000000000000000ull
+#define XCT_MACRX_8BRES		0x1000000000000000ull
+#define XCT_MACRX_24BRES	0x2000000000000000ull
+#define XCT_MACRX_40BRES	0x3000000000000000ull
+#define XCT_MACRX_O		0x0400000000000000ull
+#define XCT_MACRX_E		0x0200000000000000ull
+#define XCT_MACRX_FF		0x0100000000000000ull
+#define XCT_MACRX_PF		0x0080000000000000ull
+#define XCT_MACRX_OB		0x0040000000000000ull
+#define XCT_MACRX_OD		0x0020000000000000ull
+#define XCT_MACRX_FS		0x0010000000000000ull
+#define XCT_MACRX_NB_M		0x000fc00000000000ull
+#define XCT_MACRX_NB_S		46ULL
+#define XCT_MACRX_NB(x)		((((long)(x)) << XCT_MACRX_NB_S) & \
+				 XCT_MACRX_NB_M)
+#define XCT_MACRX_LLEN_M	0x00003fff00000000ull
+#define XCT_MACRX_LLEN_S	32ULL
+#define XCT_MACRX_LLEN(x)	((((long)(x)) << XCT_MACRX_LLEN_S) & \
+				 XCT_MACRX_LLEN_M)
+#define XCT_MACRX_CRC		0x0000000080000000ull
+#define XCT_MACRX_LEN_M		0x0000000060000000ull
+#define XCT_MACRX_LEN_TOOSHORT	0x0000000020000000ull
+#define XCT_MACRX_LEN_BELOWMIN	0x0000000040000000ull
+#define XCT_MACRX_LEN_TRUNC	0x0000000060000000ull
+#define XCT_MACRX_CAST_M	0x0000000018000000ull
+#define XCT_MACRX_CAST_UNI	0x0000000000000000ull
+#define XCT_MACRX_CAST_MULTI	0x0000000008000000ull
+#define XCT_MACRX_CAST_BROAD	0x0000000010000000ull
+#define XCT_MACRX_CAST_PAUSE	0x0000000018000000ull
+#define XCT_MACRX_VLC_M		0x0000000006000000ull
+#define XCT_MACRX_FM		0x0000000001000000ull
+#define XCT_MACRX_HTY_M		0x0000000000c00000ull
+#define XCT_MACRX_HTY_IPV4_OK	0x0000000000000000ull
+#define XCT_MACRX_HTY_IPV6 	0x0000000000400000ull
+#define XCT_MACRX_HTY_IPV4_BAD	0x0000000000800000ull
+#define XCT_MACRX_HTY_NONIP	0x0000000000c00000ull
+#define XCT_MACRX_IPP_M		0x00000000003f0000ull
+#define XCT_MACRX_IPP_S		16
+#define XCT_MACRX_CSUM_M	0x000000000000ffffull
+#define XCT_MACRX_CSUM_S	0
+
+#define XCT_PTR_T		0x8000000000000000ull
+#define XCT_PTR_LEN_M		0x7ffff00000000000ull
+#define XCT_PTR_LEN_S		44
+#define XCT_PTR_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & \
+				 XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M		0x00000fffffffffffull
+#define XCT_PTR_ADDR_S		0
+#define XCT_PTR_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & \
+				 XCT_PTR_ADDR_M)
+
+/* Receive interface buffer fields */
+#define XCT_RXB_LEN_M		0x0ffff00000000000ull
+#define XCT_RXB_LEN_S		44
+#define XCT_RXB_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
+#define XCT_RXB_ADDR_M		0x00000fffffffffffull
+#define XCT_RXB_ADDR_S		0
+#define XCT_RXB_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
+
+
+#endif /* PASEMI_MAC_H */

+ 301 - 62
drivers/net/qla3xxx.c

@@ -22,6 +22,7 @@
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/ioport.h>
 #include <linux/ioport.h>
 #include <linux/ip.h>
 #include <linux/ip.h>
+#include <linux/in.h>
 #include <linux/if_arp.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_ether.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
@@ -63,6 +64,7 @@ MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
 
 
 static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
 static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
+	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
 	/* required last entry */
 	/* required last entry */
 	{0,}
 	{0,}
 };
 };
@@ -1475,6 +1477,10 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
 			 2) << 7))
 			 2) << 7))
 		return -1;
 		return -1;
 
 
+	if (qdev->device_id == QL3032_DEVICE_ID)
+		ql_write_page0_reg(qdev, 
+			&port_regs->macMIIMgmtControlReg, 0x0f00000);
+
 	/* Divide 125MHz clock by 28 to meet PHY timing requirements */
 	/* Divide 125MHz clock by 28 to meet PHY timing requirements */
 	reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
 	reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
 
 
@@ -1706,18 +1712,42 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
 				   struct ob_mac_iocb_rsp *mac_rsp)
 				   struct ob_mac_iocb_rsp *mac_rsp)
 {
 {
 	struct ql_tx_buf_cb *tx_cb;
 	struct ql_tx_buf_cb *tx_cb;
+	int i;
 
 
 	tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
 	tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
 	pci_unmap_single(qdev->pdev,
 	pci_unmap_single(qdev->pdev,
-			 pci_unmap_addr(tx_cb, mapaddr),
-			 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
-	dev_kfree_skb_irq(tx_cb->skb);
+			 pci_unmap_addr(&tx_cb->map[0], mapaddr),
+			 pci_unmap_len(&tx_cb->map[0], maplen),
+			 PCI_DMA_TODEVICE);
+	tx_cb->seg_count--;
+	if (tx_cb->seg_count) {
+		for (i = 1; i < tx_cb->seg_count; i++) {
+			pci_unmap_page(qdev->pdev,
+				       pci_unmap_addr(&tx_cb->map[i],
+						      mapaddr),
+				       pci_unmap_len(&tx_cb->map[i], maplen),
+				       PCI_DMA_TODEVICE);
+		}
+	}
 	qdev->stats.tx_packets++;
 	qdev->stats.tx_packets++;
 	qdev->stats.tx_bytes += tx_cb->skb->len;
 	qdev->stats.tx_bytes += tx_cb->skb->len;
+	dev_kfree_skb_irq(tx_cb->skb);
 	tx_cb->skb = NULL;
 	tx_cb->skb = NULL;
 	atomic_inc(&qdev->tx_count);
 	atomic_inc(&qdev->tx_count);
 }
 }
 
 
+/*
+ * The difference between 3022 and 3032 for inbound completions:
+ * 3022 uses two buffers per completion.  The first buffer contains 
+ * (some) header info, the second the remainder of the headers plus 
+ * the data.  For this chip we reserve some space at the top of the 
+ * receive buffer so that the header info in buffer one can be 
+ * prepended to the buffer two.  Buffer two is the sent up while 
+ * buffer one is returned to the hardware to be reused.
+ * 3032 receives all of it's data and headers in one buffer for a 
+ * simpler process.  3032 also supports checksum verification as
+ * can be seen in ql_process_macip_rx_intr().
+ */
 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
 				   struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
 				   struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
 {
 {
@@ -1740,14 +1770,17 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
 	qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
 	qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
 	qdev->small_buf_release_cnt++;
 	qdev->small_buf_release_cnt++;
 
 
-	/* start of first buffer */
-	lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
-	lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
-	qdev->lrg_buf_release_cnt++;
-	if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
-		qdev->lrg_buf_index = 0;
-	curr_ial_ptr++;		/* 64-bit pointers require two incs. */
-	curr_ial_ptr++;
+	if (qdev->device_id == QL3022_DEVICE_ID) {
+		/* start of first buffer (3022 only) */
+		lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+		lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
+		qdev->lrg_buf_release_cnt++;
+		if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
+			qdev->lrg_buf_index = 0;
+		}
+		curr_ial_ptr++;	/* 64-bit pointers require two incs. */
+		curr_ial_ptr++;
+	}
 
 
 	/* start of second buffer */
 	/* start of second buffer */
 	lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
 	lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1778,7 +1811,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
 	qdev->ndev->last_rx = jiffies;
 	qdev->ndev->last_rx = jiffies;
 	lrg_buf_cb2->skb = NULL;
 	lrg_buf_cb2->skb = NULL;
 
 
-	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+	if (qdev->device_id == QL3022_DEVICE_ID)
+		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
 }
 }
 
 
@@ -1790,7 +1824,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
 	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
 	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
 	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
 	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
 	u32 *curr_ial_ptr;
 	u32 *curr_ial_ptr;
-	struct sk_buff *skb1, *skb2;
+	struct sk_buff *skb1 = NULL, *skb2;
 	struct net_device *ndev = qdev->ndev;
 	struct net_device *ndev = qdev->ndev;
 	u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
 	u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
 	u16 size = 0;
 	u16 size = 0;
@@ -1806,16 +1840,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
 	qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
 	qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
 	qdev->small_buf_release_cnt++;
 	qdev->small_buf_release_cnt++;
 
 
-	/* start of first buffer */
-	lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
-	lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
-
-	qdev->lrg_buf_release_cnt++;
-	if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
-		qdev->lrg_buf_index = 0;
-	skb1 = lrg_buf_cb1->skb;
-	curr_ial_ptr++;		/* 64-bit pointers require two incs. */
-	curr_ial_ptr++;
+	if (qdev->device_id == QL3022_DEVICE_ID) {
+		/* start of first buffer on 3022 */
+		lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+		lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
+		qdev->lrg_buf_release_cnt++;
+		if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+			qdev->lrg_buf_index = 0;
+		skb1 = lrg_buf_cb1->skb;
+		curr_ial_ptr++;	/* 64-bit pointers require two incs. */
+		curr_ial_ptr++;
+		size = ETH_HLEN;
+		if (*((u16 *) skb1->data) != 0xFFFF)
+			size += VLAN_ETH_HLEN - ETH_HLEN;
+	}
 
 
 	/* start of second buffer */
 	/* start of second buffer */
 	lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
 	lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1825,18 +1863,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
 	if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
 	if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
 		qdev->lrg_buf_index = 0;
 		qdev->lrg_buf_index = 0;
 
 
-	qdev->stats.rx_packets++;
-	qdev->stats.rx_bytes += length;
-
-	/*
-	 * Copy the ethhdr from first buffer to second. This
-	 * is necessary for IP completions.
-	 */
-	if (*((u16 *) skb1->data) != 0xFFFF)
-		size = VLAN_ETH_HLEN;
-	else
-		size = ETH_HLEN;
-
 	skb_put(skb2, length);	/* Just the second buffer length here. */
 	skb_put(skb2, length);	/* Just the second buffer length here. */
 	pci_unmap_single(qdev->pdev,
 	pci_unmap_single(qdev->pdev,
 			 pci_unmap_addr(lrg_buf_cb2, mapaddr),
 			 pci_unmap_addr(lrg_buf_cb2, mapaddr),
@@ -1844,16 +1870,40 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
 			 PCI_DMA_FROMDEVICE);
 			 PCI_DMA_FROMDEVICE);
 	prefetch(skb2->data);
 	prefetch(skb2->data);
 
 
-	memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
-	skb2->dev = qdev->ndev;
 	skb2->ip_summed = CHECKSUM_NONE;
 	skb2->ip_summed = CHECKSUM_NONE;
+	if (qdev->device_id == QL3022_DEVICE_ID) {
+		/*
+		 * Copy the ethhdr from first buffer to second. This
+		 * is necessary for 3022 IP completions.
+		 */
+		memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
+	} else {
+		u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
+		if (checksum & 
+			(IB_IP_IOCB_RSP_3032_ICE | 
+			 IB_IP_IOCB_RSP_3032_CE | 
+			 IB_IP_IOCB_RSP_3032_NUC)) {
+			printk(KERN_ERR
+			       "%s: Bad checksum for this %s packet, checksum = %x.\n",
+			       __func__,
+			       ((checksum & 
+				IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
+				"UDP"),checksum);
+		} else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
+			skb2->ip_summed = CHECKSUM_UNNECESSARY;
+		} 
+	}
+	skb2->dev = qdev->ndev;
 	skb2->protocol = eth_type_trans(skb2, qdev->ndev);
 	skb2->protocol = eth_type_trans(skb2, qdev->ndev);
 
 
 	netif_receive_skb(skb2);
 	netif_receive_skb(skb2);
+	qdev->stats.rx_packets++;
+	qdev->stats.rx_bytes += length;
 	ndev->last_rx = jiffies;
 	ndev->last_rx = jiffies;
 	lrg_buf_cb2->skb = NULL;
 	lrg_buf_cb2->skb = NULL;
 
 
-	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+	if (qdev->device_id == QL3022_DEVICE_ID)
+		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
 }
 }
 
 
@@ -1880,12 +1930,14 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
 			break;
 			break;
 
 
 		case OPCODE_IB_MAC_IOCB:
 		case OPCODE_IB_MAC_IOCB:
+		case OPCODE_IB_3032_MAC_IOCB:
 			ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
 			ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
 					       net_rsp);
 					       net_rsp);
 			(*rx_cleaned)++;
 			(*rx_cleaned)++;
 			break;
 			break;
 
 
 		case OPCODE_IB_IP_IOCB:
 		case OPCODE_IB_IP_IOCB:
+		case OPCODE_IB_3032_IP_IOCB:
 			ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
 			ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
 						 net_rsp);
 						 net_rsp);
 			(*rx_cleaned)++;
 			(*rx_cleaned)++;
@@ -2032,13 +2084,96 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
 	return IRQ_RETVAL(handled);
 	return IRQ_RETVAL(handled);
 }
 }
 
 
+/*
+ * Get the total number of segments needed for the 
+ * given number of fragments.  This is necessary because
+ * outbound address lists (OAL) will be used when more than
+ * two frags are given.  Each address list has 5 addr/len 
+ * pairs.  The 5th pair in each AOL is used to  point to
+ * the next AOL if more frags are coming.  
+ * That is why the frags:segment count  ratio is not linear.
+ */
+static int ql_get_seg_count(unsigned short frags)
+{
+	switch(frags) {
+	case 0:	return 1;	/* just the skb->data seg */
+	case 1:	return 2;	/* skb->data + 1 frag */
+	case 2:	return 3;	/* skb->data + 2 frags */
+	case 3:	return 5;	/* skb->data + 1 frag + 1 AOL containting 2 frags */
+	case 4:	return 6;
+	case 5:	return 7;
+	case 6:	return 8;
+	case 7:	return 10;
+	case 8:	return 11;
+	case 9:	return 12;
+	case 10: return 13;
+	case 11: return 15;
+	case 12: return 16;
+	case 13: return 17;
+	case 14: return 18;
+	case 15: return 20;
+	case 16: return 21;
+	case 17: return 22;
+	case 18: return 23;
+	}
+	return -1;
+}
+
+static void ql_hw_csum_setup(struct sk_buff *skb,
+			     struct ob_mac_iocb_req *mac_iocb_ptr)
+{
+	struct ethhdr *eth;
+	struct iphdr *ip = NULL;
+	u8 offset = ETH_HLEN;
+
+	eth = (struct ethhdr *)(skb->data);
+
+	if (eth->h_proto == __constant_htons(ETH_P_IP)) {
+		ip = (struct iphdr *)&skb->data[ETH_HLEN];
+	} else if (eth->h_proto == htons(ETH_P_8021Q) &&
+		   ((struct vlan_ethhdr *)skb->data)->
+		   h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
+		ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
+		offset = VLAN_ETH_HLEN;
+	}
+
+	if (ip) {
+		if (ip->protocol == IPPROTO_TCP) {
+			mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
+			mac_iocb_ptr->ip_hdr_off = offset;
+			mac_iocb_ptr->ip_hdr_len = ip->ihl;
+		} else if (ip->protocol == IPPROTO_UDP) {
+			mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
+			mac_iocb_ptr->ip_hdr_off = offset;
+			mac_iocb_ptr->ip_hdr_len = ip->ihl;
+		}
+	}
+}
+
+/*
+ * The difference between 3022 and 3032 sends:
+ * 3022 only supports a simple single segment transmission.
+ * 3032 supports checksumming and scatter/gather lists (fragments).
+ * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 
+ * in the IOCB plus a chain of outbound address lists (OAL) that 
+ * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th) 
+ * will used to point to an OAL when more ALP entries are required.  
+ * The IOCB is always the top of the chain followed by one or more 
+ * OALs (when necessary).
+ */
 static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
 static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
 {
 {
 	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
 	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 	struct ql_tx_buf_cb *tx_cb;
 	struct ql_tx_buf_cb *tx_cb;
+	u32 tot_len = skb->len;
+	struct oal *oal;
+	struct oal_entry *oal_entry;
+	int len;
 	struct ob_mac_iocb_req *mac_iocb_ptr;
 	struct ob_mac_iocb_req *mac_iocb_ptr;
 	u64 map;
 	u64 map;
+	int seg_cnt, seg = 0;
+	int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
 
 
 	if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
 	if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
 		if (!netif_queue_stopped(ndev))
 		if (!netif_queue_stopped(ndev))
@@ -2046,21 +2181,79 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
 		return NETDEV_TX_BUSY;
 		return NETDEV_TX_BUSY;
 	}
 	}
 	tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
 	tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
+	seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
+	if(seg_cnt == -1) {
+		printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
+		return NETDEV_TX_OK;
+
+	}
 	mac_iocb_ptr = tx_cb->queue_entry;
 	mac_iocb_ptr = tx_cb->queue_entry;
 	memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
 	memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
 	mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
 	mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
 	mac_iocb_ptr->flags |= qdev->mb_bit_mask;
 	mac_iocb_ptr->flags |= qdev->mb_bit_mask;
 	mac_iocb_ptr->transaction_id = qdev->req_producer_index;
 	mac_iocb_ptr->transaction_id = qdev->req_producer_index;
-	mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
+	mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
 	tx_cb->skb = skb;
 	tx_cb->skb = skb;
-	map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-	mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
-	mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
-	mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
-	pci_unmap_addr_set(tx_cb, mapaddr, map);
-	pci_unmap_len_set(tx_cb, maplen, skb->len);
-	atomic_dec(&qdev->tx_count);
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		ql_hw_csum_setup(skb, mac_iocb_ptr);
+	len = skb_headlen(skb);
+	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+	oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+	oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+	oal_entry->len = cpu_to_le32(len);
+	pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+	pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
+	seg++;
+
+	if (!skb_shinfo(skb)->nr_frags) {
+		/* Terminate the last segment. */
+		oal_entry->len =
+		    cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
+	} else {
+		int i;
+		oal = tx_cb->oal;
+		for (i=0; i<frag_cnt; i++,seg++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+			oal_entry++;
+			if ((seg == 2 && seg_cnt > 3) ||	/* Check for continuation */
+			    (seg == 7 && seg_cnt > 8) ||	/* requirements. It's strange */
+			    (seg == 12 && seg_cnt > 13) ||	/* but necessary. */
+			    (seg == 17 && seg_cnt > 18)) {
+				/* Continuation entry points to outbound address list. */
+				map = pci_map_single(qdev->pdev, oal,
+						     sizeof(struct oal),
+						     PCI_DMA_TODEVICE);
+				oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+				oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+				oal_entry->len =
+				    cpu_to_le32(sizeof(struct oal) |
+						OAL_CONT_ENTRY);
+				pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
+						   map);
+				pci_unmap_len_set(&tx_cb->map[seg], maplen,
+						  len);
+				oal_entry = (struct oal_entry *)oal;
+				oal++;
+				seg++;
+			}
 
 
+			map =
+			    pci_map_page(qdev->pdev, frag->page,
+					 frag->page_offset, frag->size,
+					 PCI_DMA_TODEVICE);
+			oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+			oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+			oal_entry->len = cpu_to_le32(frag->size);
+			pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+			pci_unmap_len_set(&tx_cb->map[seg], maplen,
+					  frag->size);
+		}
+		/* Terminate the last segment. */
+		oal_entry->len =
+		    cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
+	}
+	wmb();
 	qdev->req_producer_index++;
 	qdev->req_producer_index++;
 	if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
 	if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
 		qdev->req_producer_index = 0;
 		qdev->req_producer_index = 0;
@@ -2074,8 +2267,10 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
 		printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
 		printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
 		       ndev->name, qdev->req_producer_index, skb->len);
 		       ndev->name, qdev->req_producer_index, skb->len);
 
 
+	atomic_dec(&qdev->tx_count);
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
 }
 }
+
 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
 {
 {
 	qdev->req_q_size =
 	qdev->req_q_size =
@@ -2359,7 +2554,22 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
 	return 0;
 	return 0;
 }
 }
 
 
-static void ql_create_send_free_list(struct ql3_adapter *qdev)
+static void ql_free_send_free_list(struct ql3_adapter *qdev)
+{
+	struct ql_tx_buf_cb *tx_cb;
+	int i;
+
+	tx_cb = &qdev->tx_buf[0];
+	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+		if (tx_cb->oal) {
+			kfree(tx_cb->oal);
+			tx_cb->oal = NULL;
+		}
+		tx_cb++;
+	}
+}
+
+static int ql_create_send_free_list(struct ql3_adapter *qdev)
 {
 {
 	struct ql_tx_buf_cb *tx_cb;
 	struct ql_tx_buf_cb *tx_cb;
 	int i;
 	int i;
@@ -2368,11 +2578,16 @@ static void ql_create_send_free_list(struct ql3_adapter *qdev)
 
 
 	/* Create free list of transmit buffers */
 	/* Create free list of transmit buffers */
 	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
 	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+
 		tx_cb = &qdev->tx_buf[i];
 		tx_cb = &qdev->tx_buf[i];
 		tx_cb->skb = NULL;
 		tx_cb->skb = NULL;
 		tx_cb->queue_entry = req_q_curr;
 		tx_cb->queue_entry = req_q_curr;
 		req_q_curr++;
 		req_q_curr++;
+		tx_cb->oal = kmalloc(512, GFP_KERNEL);
+		if (tx_cb->oal == NULL)
+			return -1;
 	}
 	}
+	return 0;
 }
 }
 
 
 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
@@ -2447,12 +2662,14 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
 
 
 	/* Initialize the large buffer queue. */
 	/* Initialize the large buffer queue. */
 	ql_init_large_buffers(qdev);
 	ql_init_large_buffers(qdev);
-	ql_create_send_free_list(qdev);
+	if (ql_create_send_free_list(qdev))
+		goto err_free_list;
 
 
 	qdev->rsp_current = qdev->rsp_q_virt_addr;
 	qdev->rsp_current = qdev->rsp_q_virt_addr;
 
 
 	return 0;
 	return 0;
-
+err_free_list:
+	ql_free_send_free_list(qdev);
 err_small_buffers:
 err_small_buffers:
 	ql_free_buffer_queues(qdev);
 	ql_free_buffer_queues(qdev);
 err_buffer_queues:
 err_buffer_queues:
@@ -2468,6 +2685,7 @@ err_req_rsp:
 
 
 static void ql_free_mem_resources(struct ql3_adapter *qdev)
 static void ql_free_mem_resources(struct ql3_adapter *qdev)
 {
 {
+	ql_free_send_free_list(qdev);
 	ql_free_large_buffers(qdev);
 	ql_free_large_buffers(qdev);
 	ql_free_small_buffers(qdev);
 	ql_free_small_buffers(qdev);
 	ql_free_buffer_queues(qdev);
 	ql_free_buffer_queues(qdev);
@@ -2766,11 +2984,20 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
 	}
 	}
 
 
 	/* Enable Ethernet Function */
 	/* Enable Ethernet Function */
-	value =
-	    (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
-	     PORT_CONTROL_HH);
-	ql_write_page0_reg(qdev, &port_regs->portControl,
-			   ((value << 16) | value));
+	if (qdev->device_id == QL3032_DEVICE_ID) {
+		value =
+		    (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
+		     QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
+		ql_write_page0_reg(qdev, &port_regs->functionControl,
+				   ((value << 16) | value));
+	} else {
+		value =
+		    (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
+		     PORT_CONTROL_HH);
+		ql_write_page0_reg(qdev, &port_regs->portControl,
+				   ((value << 16) | value));
+	}
+
 
 
 out:
 out:
 	return status;
 	return status;
@@ -2917,8 +3144,10 @@ static void ql_display_dev_info(struct net_device *ndev)
 	struct pci_dev *pdev = qdev->pdev;
 	struct pci_dev *pdev = qdev->pdev;
 
 
 	printk(KERN_INFO PFX
 	printk(KERN_INFO PFX
-	       "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
-	       DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot);
+	       "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
+	       DRV_NAME, qdev->index, qdev->chip_rev_id,
+	       (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
+	       qdev->pci_slot);
 	printk(KERN_INFO PFX
 	printk(KERN_INFO PFX
 	       "%s Interface.\n",
 	       "%s Interface.\n",
 	       test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
 	       test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
@@ -3212,15 +3441,22 @@ static void ql_reset_work(struct work_struct *work)
 		 * Loop through the active list and return the skb.
 		 * Loop through the active list and return the skb.
 		 */
 		 */
 		for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
 		for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+			int j;
 			tx_cb = &qdev->tx_buf[i];
 			tx_cb = &qdev->tx_buf[i];
 			if (tx_cb->skb) {
 			if (tx_cb->skb) {
-
 				printk(KERN_DEBUG PFX
 				printk(KERN_DEBUG PFX
 				       "%s: Freeing lost SKB.\n",
 				       "%s: Freeing lost SKB.\n",
 				       qdev->ndev->name);
 				       qdev->ndev->name);
 				pci_unmap_single(qdev->pdev,
 				pci_unmap_single(qdev->pdev,
-					pci_unmap_addr(tx_cb, mapaddr),
-					pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
+					 pci_unmap_addr(&tx_cb->map[0], mapaddr),
+					 pci_unmap_len(&tx_cb->map[0], maplen),
+					 PCI_DMA_TODEVICE);
+				for(j=1;j<tx_cb->seg_count;j++) {
+					pci_unmap_page(qdev->pdev,
+					       pci_unmap_addr(&tx_cb->map[j],mapaddr),
+					       pci_unmap_len(&tx_cb->map[j],maplen),
+					       PCI_DMA_TODEVICE);
+				}
 				dev_kfree_skb(tx_cb->skb);
 				dev_kfree_skb(tx_cb->skb);
 				tx_cb->skb = NULL;
 				tx_cb->skb = NULL;
 			}
 			}
@@ -3379,21 +3615,24 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
 	SET_MODULE_OWNER(ndev);
 	SET_MODULE_OWNER(ndev);
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 
 
-	if (pci_using_dac)
-		ndev->features |= NETIF_F_HIGHDMA;
-
 	pci_set_drvdata(pdev, ndev);
 	pci_set_drvdata(pdev, ndev);
 
 
 	qdev = netdev_priv(ndev);
 	qdev = netdev_priv(ndev);
 	qdev->index = cards_found;
 	qdev->index = cards_found;
 	qdev->ndev = ndev;
 	qdev->ndev = ndev;
 	qdev->pdev = pdev;
 	qdev->pdev = pdev;
+	qdev->device_id = pci_entry->device;
 	qdev->port_link_state = LS_DOWN;
 	qdev->port_link_state = LS_DOWN;
 	if (msi)
 	if (msi)
 		qdev->msi = 1;
 		qdev->msi = 1;
 
 
 	qdev->msg_enable = netif_msg_init(debug, default_msg);
 	qdev->msg_enable = netif_msg_init(debug, default_msg);
 
 
+	if (pci_using_dac)
+		ndev->features |= NETIF_F_HIGHDMA;
+	if (qdev->device_id == QL3032_DEVICE_ID)
+		ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
+
 	qdev->mem_map_registers =
 	qdev->mem_map_registers =
 	    ioremap_nocache(pci_resource_start(pdev, 1),
 	    ioremap_nocache(pci_resource_start(pdev, 1),
 			    pci_resource_len(qdev->pdev, 1));
 			    pci_resource_len(qdev->pdev, 1));

+ 78 - 10
drivers/net/qla3xxx.h

@@ -21,7 +21,9 @@
 
 
 #define OPCODE_UPDATE_NCB_IOCB      0xF0
 #define OPCODE_UPDATE_NCB_IOCB      0xF0
 #define OPCODE_IB_MAC_IOCB          0xF9
 #define OPCODE_IB_MAC_IOCB          0xF9
+#define OPCODE_IB_3032_MAC_IOCB     0x09
 #define OPCODE_IB_IP_IOCB           0xFA
 #define OPCODE_IB_IP_IOCB           0xFA
+#define OPCODE_IB_3032_IP_IOCB      0x0A
 #define OPCODE_IB_TCP_IOCB          0xFB
 #define OPCODE_IB_TCP_IOCB          0xFB
 #define OPCODE_DUMP_PROTO_IOCB      0xFE
 #define OPCODE_DUMP_PROTO_IOCB      0xFE
 #define OPCODE_BUFFER_ALERT_IOCB    0xFB
 #define OPCODE_BUFFER_ALERT_IOCB    0xFB
@@ -37,18 +39,23 @@
 struct ob_mac_iocb_req {
 struct ob_mac_iocb_req {
 	u8 opcode;
 	u8 opcode;
 	u8 flags;
 	u8 flags;
-#define OB_MAC_IOCB_REQ_MA  0xC0
-#define OB_MAC_IOCB_REQ_F   0x20
-#define OB_MAC_IOCB_REQ_X   0x10
+#define OB_MAC_IOCB_REQ_MA  0xe0
+#define OB_MAC_IOCB_REQ_F   0x10
+#define OB_MAC_IOCB_REQ_X   0x08
 #define OB_MAC_IOCB_REQ_D   0x02
 #define OB_MAC_IOCB_REQ_D   0x02
 #define OB_MAC_IOCB_REQ_I   0x01
 #define OB_MAC_IOCB_REQ_I   0x01
-	__le16 reserved0;
+	u8 flags1;
+#define OB_3032MAC_IOCB_REQ_IC	0x04
+#define OB_3032MAC_IOCB_REQ_TC	0x02
+#define OB_3032MAC_IOCB_REQ_UC	0x01
+	u8 reserved0;
 
 
 	__le32 transaction_id;
 	__le32 transaction_id;
 	__le16 data_len;
 	__le16 data_len;
-	__le16 reserved1;
+	u8 ip_hdr_off;
+	u8 ip_hdr_len;
+	__le32 reserved1;
 	__le32 reserved2;
 	__le32 reserved2;
-	__le32 reserved3;
 	__le32 buf_addr0_low;
 	__le32 buf_addr0_low;
 	__le32 buf_addr0_high;
 	__le32 buf_addr0_high;
 	__le32 buf_0_len;
 	__le32 buf_0_len;
@@ -58,8 +65,8 @@ struct ob_mac_iocb_req {
 	__le32 buf_addr2_low;
 	__le32 buf_addr2_low;
 	__le32 buf_addr2_high;
 	__le32 buf_addr2_high;
 	__le32 buf_2_len;
 	__le32 buf_2_len;
+	__le32 reserved3;
 	__le32 reserved4;
 	__le32 reserved4;
-	__le32 reserved5;
 };
 };
 /*
 /*
  * The following constants define control bits for buffer
  * The following constants define control bits for buffer
@@ -74,6 +81,7 @@ struct ob_mac_iocb_rsp {
 	u8 opcode;
 	u8 opcode;
 	u8 flags;
 	u8 flags;
 #define OB_MAC_IOCB_RSP_P   0x08
 #define OB_MAC_IOCB_RSP_P   0x08
+#define OB_MAC_IOCB_RSP_L   0x04
 #define OB_MAC_IOCB_RSP_S   0x02
 #define OB_MAC_IOCB_RSP_S   0x02
 #define OB_MAC_IOCB_RSP_I   0x01
 #define OB_MAC_IOCB_RSP_I   0x01
 
 
@@ -85,6 +93,7 @@ struct ob_mac_iocb_rsp {
 
 
 struct ib_mac_iocb_rsp {
 struct ib_mac_iocb_rsp {
 	u8 opcode;
 	u8 opcode;
+#define IB_MAC_IOCB_RSP_V   0x80
 	u8 flags;
 	u8 flags;
 #define IB_MAC_IOCB_RSP_S   0x80
 #define IB_MAC_IOCB_RSP_S   0x80
 #define IB_MAC_IOCB_RSP_H1  0x40
 #define IB_MAC_IOCB_RSP_H1  0x40
@@ -138,6 +147,7 @@ struct ob_ip_iocb_req {
 struct ob_ip_iocb_rsp {
 struct ob_ip_iocb_rsp {
 	u8 opcode;
 	u8 opcode;
 	u8 flags;
 	u8 flags;
+#define OB_MAC_IOCB_RSP_H       0x10
 #define OB_MAC_IOCB_RSP_E       0x08
 #define OB_MAC_IOCB_RSP_E       0x08
 #define OB_MAC_IOCB_RSP_L       0x04
 #define OB_MAC_IOCB_RSP_L       0x04
 #define OB_MAC_IOCB_RSP_S       0x02
 #define OB_MAC_IOCB_RSP_S       0x02
@@ -220,6 +230,10 @@ struct ob_tcp_iocb_rsp {
 
 
 struct ib_ip_iocb_rsp {
 struct ib_ip_iocb_rsp {
 	u8 opcode;
 	u8 opcode;
+#define IB_IP_IOCB_RSP_3032_V   0x80
+#define IB_IP_IOCB_RSP_3032_O   0x40
+#define IB_IP_IOCB_RSP_3032_I   0x20
+#define IB_IP_IOCB_RSP_3032_R   0x10
 	u8 flags;
 	u8 flags;
 #define IB_IP_IOCB_RSP_S        0x80
 #define IB_IP_IOCB_RSP_S        0x80
 #define IB_IP_IOCB_RSP_H1       0x40
 #define IB_IP_IOCB_RSP_H1       0x40
@@ -230,6 +244,12 @@ struct ib_ip_iocb_rsp {
 
 
 	__le16 length;
 	__le16 length;
 	__le16 checksum;
 	__le16 checksum;
+#define IB_IP_IOCB_RSP_3032_ICE		0x01
+#define IB_IP_IOCB_RSP_3032_CE		0x02
+#define IB_IP_IOCB_RSP_3032_NUC		0x04
+#define IB_IP_IOCB_RSP_3032_UDP		0x08
+#define IB_IP_IOCB_RSP_3032_TCP		0x10
+#define IB_IP_IOCB_RSP_3032_IPE		0x20
 	__le16 reserved;
 	__le16 reserved;
 #define IB_IP_IOCB_RSP_R        0x01
 #define IB_IP_IOCB_RSP_R        0x01
 	__le32 ial_low;
 	__le32 ial_low;
@@ -524,6 +544,21 @@ enum {
 	IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
 	IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
 	IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
 	IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
 	IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
 	IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
+	IP_ADDR_INDEX_REG_6 = 0x0008,
+	IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
+	IP_ADDR_INDEX_REG_E = 0x0040, 
+};
+enum {
+	QL3032_PORT_CONTROL_DS = 0x0001,
+	QL3032_PORT_CONTROL_HH = 0x0002,
+	QL3032_PORT_CONTROL_EIv6 = 0x0004,
+	QL3032_PORT_CONTROL_EIv4 = 0x0008,
+	QL3032_PORT_CONTROL_ET = 0x0010,
+	QL3032_PORT_CONTROL_EF = 0x0020,
+	QL3032_PORT_CONTROL_DRM = 0x0040,
+	QL3032_PORT_CONTROL_RLB = 0x0080,
+	QL3032_PORT_CONTROL_RCB = 0x0100,
+	QL3032_PORT_CONTROL_KIE = 0x0200,
 };
 };
 
 
 enum {
 enum {
@@ -657,7 +692,8 @@ struct ql3xxx_port_registers {
 	u32 internalRamWDataReg;
 	u32 internalRamWDataReg;
 	u32 reclaimedBufferAddrRegLow;
 	u32 reclaimedBufferAddrRegLow;
 	u32 reclaimedBufferAddrRegHigh;
 	u32 reclaimedBufferAddrRegHigh;
-	u32 reserved[2];
+	u32 tcpConfiguration;
+	u32 functionControl;
 	u32 fpgaRevID;
 	u32 fpgaRevID;
 	u32 localRamAddr;
 	u32 localRamAddr;
 	u32 localRamDataAutoIncr;
 	u32 localRamDataAutoIncr;
@@ -963,6 +999,7 @@ struct eeprom_data {
 
 
 #define QL3XXX_VENDOR_ID    0x1077
 #define QL3XXX_VENDOR_ID    0x1077
 #define QL3022_DEVICE_ID    0x3022
 #define QL3022_DEVICE_ID    0x3022
+#define QL3032_DEVICE_ID    0x3032
 
 
 /* MTU & Frame Size stuff */
 /* MTU & Frame Size stuff */
 #define NORMAL_MTU_SIZE 		ETH_DATA_LEN
 #define NORMAL_MTU_SIZE 		ETH_DATA_LEN
@@ -1038,11 +1075,41 @@ struct ql_rcv_buf_cb {
 	int index;
 	int index;
 };
 };
 
 
+/*
+ * Original IOCB has 3 sg entries:
+ * first points to skb-data area
+ * second points to first frag
+ * third points to next oal.
+ * OAL has 5 entries:
+ * 1 thru 4 point to frags
+ * fifth points to next oal.
+ */ 
+#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
+
+struct oal_entry {
+	u32 dma_lo;
+	u32 dma_hi;
+	u32 len;
+#define OAL_LAST_ENTRY   0x80000000	/* Last valid buffer in list. */
+#define OAL_CONT_ENTRY   0x40000000	/* points to an OAL. (continuation) */
+	u32 reserved;
+};
+
+struct oal {
+	struct oal_entry oal_entry[5];
+};
+
+struct map_list {
+	 DECLARE_PCI_UNMAP_ADDR(mapaddr);
+	 DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
 struct ql_tx_buf_cb {
 struct ql_tx_buf_cb {
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 	struct ob_mac_iocb_req *queue_entry ;
 	struct ob_mac_iocb_req *queue_entry ;
-	 DECLARE_PCI_UNMAP_ADDR(mapaddr);
-	 DECLARE_PCI_UNMAP_LEN(maplen);
+	int seg_count;
+	struct oal *oal;
+	struct map_list map[MAX_SKB_FRAGS+1]; 
 };
 };
 
 
 /* definitions for type field */
 /* definitions for type field */
@@ -1189,6 +1256,7 @@ struct ql3_adapter {
 	struct delayed_work reset_work;
 	struct delayed_work reset_work;
 	struct delayed_work tx_timeout_work;
 	struct delayed_work tx_timeout_work;
 	u32 max_frame_size;
 	u32 max_frame_size;
+	u32 device_id;
 };
 };
 
 
 #endif				/* _QLA3XXX_H_ */
 #endif				/* _QLA3XXX_H_ */

+ 4 - 3
drivers/net/s2io-regs.h

@@ -15,7 +15,7 @@
 
 
 #define TBD 0
 #define TBD 0
 
 
-typedef struct _XENA_dev_config {
+struct XENA_dev_config {
 /* Convention: mHAL_XXX is mask, vHAL_XXX is value */
 /* Convention: mHAL_XXX is mask, vHAL_XXX is value */
 
 
 /* General Control-Status Registers */
 /* General Control-Status Registers */
@@ -300,6 +300,7 @@ typedef struct _XENA_dev_config {
 	u64 gpio_control;
 	u64 gpio_control;
 #define GPIO_CTRL_GPIO_0		BIT(8)
 #define GPIO_CTRL_GPIO_0		BIT(8)
 	u64 misc_control;
 	u64 misc_control;
+#define FAULT_BEHAVIOUR			BIT(0)
 #define EXT_REQ_EN			BIT(1)
 #define EXT_REQ_EN			BIT(1)
 #define MISC_LINK_STABILITY_PRD(val)   vBIT(val,29,3)
 #define MISC_LINK_STABILITY_PRD(val)   vBIT(val,29,3)
 
 
@@ -851,9 +852,9 @@ typedef struct _XENA_dev_config {
 #define SPI_CONTROL_DONE		BIT(6)
 #define SPI_CONTROL_DONE		BIT(6)
 	u64 spi_data;
 	u64 spi_data;
 #define SPI_DATA_WRITE(data,len)	vBIT(data,0,len)
 #define SPI_DATA_WRITE(data,len)	vBIT(data,0,len)
-} XENA_dev_config_t;
+};
 
 
-#define XENA_REG_SPACE	sizeof(XENA_dev_config_t)
+#define XENA_REG_SPACE	sizeof(struct XENA_dev_config)
 #define	XENA_EEPROM_SPACE (0x01 << 11)
 #define	XENA_EEPROM_SPACE (0x01 << 11)
 
 
 #endif				/* _REGS_H */
 #endif				/* _REGS_H */

File diff suppressed because it is too large
+ 200 - 277
drivers/net/s2io.c


+ 115 - 108
drivers/net/s2io.h

@@ -30,6 +30,8 @@
 #undef SUCCESS
 #undef SUCCESS
 #define SUCCESS 0
 #define SUCCESS 0
 #define FAILURE -1
 #define FAILURE -1
+#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
+#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
 
 
 #define CHECKBIT(value, nbit) (value & (1 << nbit))
 #define CHECKBIT(value, nbit) (value & (1 << nbit))
 
 
@@ -37,7 +39,7 @@
 #define MAX_FLICKER_TIME	60000 /* 60 Secs */
 #define MAX_FLICKER_TIME	60000 /* 60 Secs */
 
 
 /* Maximum outstanding splits to be configured into xena. */
 /* Maximum outstanding splits to be configured into xena. */
-typedef enum xena_max_outstanding_splits {
+enum {
 	XENA_ONE_SPLIT_TRANSACTION = 0,
 	XENA_ONE_SPLIT_TRANSACTION = 0,
 	XENA_TWO_SPLIT_TRANSACTION = 1,
 	XENA_TWO_SPLIT_TRANSACTION = 1,
 	XENA_THREE_SPLIT_TRANSACTION = 2,
 	XENA_THREE_SPLIT_TRANSACTION = 2,
@@ -46,7 +48,7 @@ typedef enum xena_max_outstanding_splits {
 	XENA_TWELVE_SPLIT_TRANSACTION = 5,
 	XENA_TWELVE_SPLIT_TRANSACTION = 5,
 	XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
 	XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
 	XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
 	XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
-} xena_max_outstanding_splits;
+};
 #define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
 #define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
 
 
 /*  OS concerned variables and constants */
 /*  OS concerned variables and constants */
@@ -77,7 +79,7 @@ static int debug_level = ERR_DBG;
 #define S2IO_JUMBO_SIZE 9600
 #define S2IO_JUMBO_SIZE 9600
 
 
 /* Driver statistics maintained by driver */
 /* Driver statistics maintained by driver */
-typedef struct {
+struct swStat {
 	unsigned long long single_ecc_errs;
 	unsigned long long single_ecc_errs;
 	unsigned long long double_ecc_errs;
 	unsigned long long double_ecc_errs;
 	unsigned long long parity_err_cnt;
 	unsigned long long parity_err_cnt;
@@ -92,10 +94,10 @@ typedef struct {
 	unsigned long long flush_max_pkts;
 	unsigned long long flush_max_pkts;
 	unsigned long long sum_avg_pkts_aggregated;
 	unsigned long long sum_avg_pkts_aggregated;
 	unsigned long long num_aggregations;
 	unsigned long long num_aggregations;
-} swStat_t;
+};
 
 
 /* Xpak releated alarm and warnings */
 /* Xpak releated alarm and warnings */
-typedef struct {
+struct xpakStat {
 	u64 alarm_transceiver_temp_high;
 	u64 alarm_transceiver_temp_high;
 	u64 alarm_transceiver_temp_low;
 	u64 alarm_transceiver_temp_low;
 	u64 alarm_laser_bias_current_high;
 	u64 alarm_laser_bias_current_high;
@@ -110,11 +112,11 @@ typedef struct {
 	u64 warn_laser_output_power_low;
 	u64 warn_laser_output_power_low;
 	u64 xpak_regs_stat;
 	u64 xpak_regs_stat;
 	u32 xpak_timer_count;
 	u32 xpak_timer_count;
-} xpakStat_t;
+};
 
 
 
 
 /* The statistics block of Xena */
 /* The statistics block of Xena */
-typedef struct stat_block {
+struct stat_block {
 /* Tx MAC statistics counters. */
 /* Tx MAC statistics counters. */
 	__le32 tmac_data_octets;
 	__le32 tmac_data_octets;
 	__le32 tmac_frms;
 	__le32 tmac_frms;
@@ -290,9 +292,9 @@ typedef struct stat_block {
 	__le32 reserved_14;
 	__le32 reserved_14;
 	__le32 link_fault_cnt;
 	__le32 link_fault_cnt;
 	u8  buffer[20];
 	u8  buffer[20];
-	swStat_t sw_stat;
-	xpakStat_t xpak_stat;
-} StatInfo_t;
+	struct swStat sw_stat;
+	struct xpakStat xpak_stat;
+};
 
 
 /*
 /*
  * Structures representing different init time configuration
  * Structures representing different init time configuration
@@ -315,7 +317,7 @@ static int fifo_map[][MAX_TX_FIFOS] = {
 };
 };
 
 
 /* Maintains Per FIFO related information. */
 /* Maintains Per FIFO related information. */
-typedef struct tx_fifo_config {
+struct tx_fifo_config {
 #define	MAX_AVAILABLE_TXDS	8192
 #define	MAX_AVAILABLE_TXDS	8192
 	u32 fifo_len;		/* specifies len of FIFO upto 8192, ie no of TxDLs */
 	u32 fifo_len;		/* specifies len of FIFO upto 8192, ie no of TxDLs */
 /* Priority definition */
 /* Priority definition */
@@ -332,11 +334,11 @@ typedef struct tx_fifo_config {
 	u8 f_no_snoop;
 	u8 f_no_snoop;
 #define NO_SNOOP_TXD                0x01
 #define NO_SNOOP_TXD                0x01
 #define NO_SNOOP_TXD_BUFFER          0x02
 #define NO_SNOOP_TXD_BUFFER          0x02
-} tx_fifo_config_t;
+};
 
 
 
 
 /* Maintains per Ring related information */
 /* Maintains per Ring related information */
-typedef struct rx_ring_config {
+struct rx_ring_config {
 	u32 num_rxd;		/*No of RxDs per Rx Ring */
 	u32 num_rxd;		/*No of RxDs per Rx Ring */
 #define RX_RING_PRI_0               0	/* highest */
 #define RX_RING_PRI_0               0	/* highest */
 #define RX_RING_PRI_1               1
 #define RX_RING_PRI_1               1
@@ -357,7 +359,7 @@ typedef struct rx_ring_config {
 	u8 f_no_snoop;
 	u8 f_no_snoop;
 #define NO_SNOOP_RXD                0x01
 #define NO_SNOOP_RXD                0x01
 #define NO_SNOOP_RXD_BUFFER         0x02
 #define NO_SNOOP_RXD_BUFFER         0x02
-} rx_ring_config_t;
+};
 
 
 /* This structure provides contains values of the tunable parameters
 /* This structure provides contains values of the tunable parameters
  * of the H/W
  * of the H/W
@@ -367,7 +369,7 @@ struct config_param {
 	u32 tx_fifo_num;	/*Number of Tx FIFOs */
 	u32 tx_fifo_num;	/*Number of Tx FIFOs */
 
 
 	u8 fifo_mapping[MAX_TX_FIFOS];
 	u8 fifo_mapping[MAX_TX_FIFOS];
-	tx_fifo_config_t tx_cfg[MAX_TX_FIFOS];	/*Per-Tx FIFO config */
+	struct tx_fifo_config tx_cfg[MAX_TX_FIFOS];	/*Per-Tx FIFO config */
 	u32 max_txds;		/*Max no. of Tx buffer descriptor per TxDL */
 	u32 max_txds;		/*Max no. of Tx buffer descriptor per TxDL */
 	u64 tx_intr_type;
 	u64 tx_intr_type;
 	/* Specifies if Tx Intr is UTILZ or PER_LIST type. */
 	/* Specifies if Tx Intr is UTILZ or PER_LIST type. */
@@ -376,7 +378,7 @@ struct config_param {
 	u32 rx_ring_num;	/*Number of receive rings */
 	u32 rx_ring_num;	/*Number of receive rings */
 #define MAX_RX_BLOCKS_PER_RING  150
 #define MAX_RX_BLOCKS_PER_RING  150
 
 
-	rx_ring_config_t rx_cfg[MAX_RX_RINGS];	/*Per-Rx Ring config */
+	struct rx_ring_config rx_cfg[MAX_RX_RINGS];	/*Per-Rx Ring config */
 	u8 bimodal;		/*Flag for setting bimodal interrupts*/
 	u8 bimodal;		/*Flag for setting bimodal interrupts*/
 
 
 #define HEADER_ETHERNET_II_802_3_SIZE 14
 #define HEADER_ETHERNET_II_802_3_SIZE 14
@@ -395,14 +397,14 @@ struct config_param {
 };
 };
 
 
 /* Structure representing MAC Addrs */
 /* Structure representing MAC Addrs */
-typedef struct mac_addr {
+struct mac_addr {
 	u8 mac_addr[ETH_ALEN];
 	u8 mac_addr[ETH_ALEN];
-} macaddr_t;
+};
 
 
 /* Structure that represent every FIFO element in the BAR1
 /* Structure that represent every FIFO element in the BAR1
  * Address location.
  * Address location.
  */
  */
-typedef struct _TxFIFO_element {
+struct TxFIFO_element {
 	u64 TxDL_Pointer;
 	u64 TxDL_Pointer;
 
 
 	u64 List_Control;
 	u64 List_Control;
@@ -413,10 +415,10 @@ typedef struct _TxFIFO_element {
 #define TX_FIFO_SPECIAL_FUNC           BIT(23)
 #define TX_FIFO_SPECIAL_FUNC           BIT(23)
 #define TX_FIFO_DS_NO_SNOOP            BIT(31)
 #define TX_FIFO_DS_NO_SNOOP            BIT(31)
 #define TX_FIFO_BUFF_NO_SNOOP          BIT(30)
 #define TX_FIFO_BUFF_NO_SNOOP          BIT(30)
-} TxFIFO_element_t;
+};
 
 
 /* Tx descriptor structure */
 /* Tx descriptor structure */
-typedef struct _TxD {
+struct TxD {
 	u64 Control_1;
 	u64 Control_1;
 /* bit mask */
 /* bit mask */
 #define TXD_LIST_OWN_XENA       BIT(7)
 #define TXD_LIST_OWN_XENA       BIT(7)
@@ -447,16 +449,16 @@ typedef struct _TxD {
 
 
 	u64 Buffer_Pointer;
 	u64 Buffer_Pointer;
 	u64 Host_Control;	/* reserved for host */
 	u64 Host_Control;	/* reserved for host */
-} TxD_t;
+};
 
 
 /* Structure to hold the phy and virt addr of every TxDL. */
 /* Structure to hold the phy and virt addr of every TxDL. */
-typedef struct list_info_hold {
+struct list_info_hold {
 	dma_addr_t list_phy_addr;
 	dma_addr_t list_phy_addr;
 	void *list_virt_addr;
 	void *list_virt_addr;
-} list_info_hold_t;
+};
 
 
 /* Rx descriptor structure for 1 buffer mode */
 /* Rx descriptor structure for 1 buffer mode */
-typedef struct _RxD_t {
+struct RxD_t {
 	u64 Host_Control;	/* reserved for host */
 	u64 Host_Control;	/* reserved for host */
 	u64 Control_1;
 	u64 Control_1;
 #define RXD_OWN_XENA            BIT(7)
 #define RXD_OWN_XENA            BIT(7)
@@ -481,21 +483,21 @@ typedef struct _RxD_t {
 #define SET_NUM_TAG(val)       vBIT(val,16,32)
 #define SET_NUM_TAG(val)       vBIT(val,16,32)
 
 
 
 
-} RxD_t;
+};
 /* Rx descriptor structure for 1 buffer mode */
 /* Rx descriptor structure for 1 buffer mode */
-typedef struct _RxD1_t {
-	struct _RxD_t h;
+struct RxD1 {
+	struct RxD_t h;
 
 
 #define MASK_BUFFER0_SIZE_1       vBIT(0x3FFF,2,14)
 #define MASK_BUFFER0_SIZE_1       vBIT(0x3FFF,2,14)
 #define SET_BUFFER0_SIZE_1(val)   vBIT(val,2,14)
 #define SET_BUFFER0_SIZE_1(val)   vBIT(val,2,14)
 #define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
 #define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
 	(u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
 	(u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
 	u64 Buffer0_ptr;
 	u64 Buffer0_ptr;
-} RxD1_t;
+};
 /* Rx descriptor structure for 3 or 2 buffer mode */
 /* Rx descriptor structure for 3 or 2 buffer mode */
 
 
-typedef struct _RxD3_t {
-	struct _RxD_t h;
+struct RxD3 {
+	struct RxD_t h;
 
 
 #define MASK_BUFFER0_SIZE_3       vBIT(0xFF,2,14)
 #define MASK_BUFFER0_SIZE_3       vBIT(0xFF,2,14)
 #define MASK_BUFFER1_SIZE_3       vBIT(0xFFFF,16,16)
 #define MASK_BUFFER1_SIZE_3       vBIT(0xFFFF,16,16)
@@ -515,15 +517,15 @@ typedef struct _RxD3_t {
 	u64 Buffer0_ptr;
 	u64 Buffer0_ptr;
 	u64 Buffer1_ptr;
 	u64 Buffer1_ptr;
 	u64 Buffer2_ptr;
 	u64 Buffer2_ptr;
-} RxD3_t;
+};
 
 
 
 
 /* Structure that represents the Rx descriptor block which contains
 /* Structure that represents the Rx descriptor block which contains
  * 128 Rx descriptors.
  * 128 Rx descriptors.
  */
  */
-typedef struct _RxD_block {
+struct RxD_block {
 #define MAX_RXDS_PER_BLOCK_1            127
 #define MAX_RXDS_PER_BLOCK_1            127
-	RxD1_t rxd[MAX_RXDS_PER_BLOCK_1];
+	struct RxD1 rxd[MAX_RXDS_PER_BLOCK_1];
 
 
 	u64 reserved_0;
 	u64 reserved_0;
 #define END_OF_BLOCK    0xFEFFFFFFFFFFFFFFULL
 #define END_OF_BLOCK    0xFEFFFFFFFFFFFFFFULL
@@ -533,22 +535,22 @@ typedef struct _RxD_block {
 	u64 pNext_RxD_Blk_physical;	/* Buff0_ptr.In a 32 bit arch
 	u64 pNext_RxD_Blk_physical;	/* Buff0_ptr.In a 32 bit arch
 					 * the upper 32 bits should
 					 * the upper 32 bits should
 					 * be 0 */
 					 * be 0 */
-} RxD_block_t;
+};
 
 
 #define SIZE_OF_BLOCK	4096
 #define SIZE_OF_BLOCK	4096
 
 
-#define RXD_MODE_1	0
-#define RXD_MODE_3A	1
-#define RXD_MODE_3B	2
+#define RXD_MODE_1	0 /* One Buffer mode */
+#define RXD_MODE_3A	1 /* Three Buffer mode */
+#define RXD_MODE_3B	2 /* Two Buffer mode */
 
 
 /* Structure to hold virtual addresses of Buf0 and Buf1 in
 /* Structure to hold virtual addresses of Buf0 and Buf1 in
  * 2buf mode. */
  * 2buf mode. */
-typedef struct bufAdd {
+struct buffAdd {
 	void *ba_0_org;
 	void *ba_0_org;
 	void *ba_1_org;
 	void *ba_1_org;
 	void *ba_0;
 	void *ba_0;
 	void *ba_1;
 	void *ba_1;
-} buffAdd_t;
+};
 
 
 /* Structure which stores all the MAC control parameters */
 /* Structure which stores all the MAC control parameters */
 
 
@@ -556,43 +558,46 @@ typedef struct bufAdd {
  * from which the Rx Interrupt processor can start picking
  * from which the Rx Interrupt processor can start picking
  * up the RxDs for processing.
  * up the RxDs for processing.
  */
  */
-typedef struct _rx_curr_get_info_t {
+struct rx_curr_get_info {
 	u32 block_index;
 	u32 block_index;
 	u32 offset;
 	u32 offset;
 	u32 ring_len;
 	u32 ring_len;
-} rx_curr_get_info_t;
+};
 
 
-typedef rx_curr_get_info_t rx_curr_put_info_t;
+struct rx_curr_put_info {
+	u32 block_index;
+	u32 offset;
+	u32 ring_len;
+};
 
 
 /* This structure stores the offset of the TxDl in the FIFO
 /* This structure stores the offset of the TxDl in the FIFO
  * from which the Tx Interrupt processor can start picking
  * from which the Tx Interrupt processor can start picking
  * up the TxDLs for send complete interrupt processing.
  * up the TxDLs for send complete interrupt processing.
  */
  */
-typedef struct {
+struct tx_curr_get_info {
 	u32 offset;
 	u32 offset;
 	u32 fifo_len;
 	u32 fifo_len;
-} tx_curr_get_info_t;
-
-typedef tx_curr_get_info_t tx_curr_put_info_t;
+};
 
 
+struct tx_curr_put_info {
+	u32 offset;
+	u32 fifo_len;
+};
 
 
-typedef struct rxd_info {
+struct rxd_info {
 	void *virt_addr;
 	void *virt_addr;
 	dma_addr_t dma_addr;
 	dma_addr_t dma_addr;
-}rxd_info_t;
+};
 
 
 /* Structure that holds the Phy and virt addresses of the Blocks */
 /* Structure that holds the Phy and virt addresses of the Blocks */
-typedef struct rx_block_info {
+struct rx_block_info {
 	void *block_virt_addr;
 	void *block_virt_addr;
 	dma_addr_t block_dma_addr;
 	dma_addr_t block_dma_addr;
-	rxd_info_t *rxds;
-} rx_block_info_t;
-
-/* pre declaration of the nic structure */
-typedef struct s2io_nic nic_t;
+	struct rxd_info *rxds;
+};
 
 
 /* Ring specific structure */
 /* Ring specific structure */
-typedef struct ring_info {
+struct ring_info {
 	/* The ring number */
 	/* The ring number */
 	int ring_no;
 	int ring_no;
 
 
@@ -600,7 +605,7 @@ typedef struct ring_info {
 	 *  Place holders for the virtual and physical addresses of
 	 *  Place holders for the virtual and physical addresses of
 	 *  all the Rx Blocks
 	 *  all the Rx Blocks
 	 */
 	 */
-	rx_block_info_t rx_blocks[MAX_RX_BLOCKS_PER_RING];
+	struct rx_block_info rx_blocks[MAX_RX_BLOCKS_PER_RING];
 	int block_count;
 	int block_count;
 	int pkt_cnt;
 	int pkt_cnt;
 
 
@@ -608,26 +613,24 @@ typedef struct ring_info {
 	 * Put pointer info which indictes which RxD has to be replenished
 	 * Put pointer info which indictes which RxD has to be replenished
 	 * with a new buffer.
 	 * with a new buffer.
 	 */
 	 */
-	rx_curr_put_info_t rx_curr_put_info;
+	struct rx_curr_put_info rx_curr_put_info;
 
 
 	/*
 	/*
 	 * Get pointer info which indictes which is the last RxD that was
 	 * Get pointer info which indictes which is the last RxD that was
 	 * processed by the driver.
 	 * processed by the driver.
 	 */
 	 */
-	rx_curr_get_info_t rx_curr_get_info;
+	struct rx_curr_get_info rx_curr_get_info;
 
 
-#ifndef CONFIG_S2IO_NAPI
 	/* Index to the absolute position of the put pointer of Rx ring */
 	/* Index to the absolute position of the put pointer of Rx ring */
 	int put_pos;
 	int put_pos;
-#endif
 
 
 	/* Buffer Address store. */
 	/* Buffer Address store. */
-	buffAdd_t **ba;
-	nic_t *nic;
-} ring_info_t;
+	struct buffAdd **ba;
+	struct s2io_nic *nic;
+};
 
 
 /* Fifo specific structure */
 /* Fifo specific structure */
-typedef struct fifo_info {
+struct fifo_info {
 	/* FIFO number */
 	/* FIFO number */
 	int fifo_no;
 	int fifo_no;
 
 
@@ -635,40 +638,40 @@ typedef struct fifo_info {
 	int max_txds;
 	int max_txds;
 
 
 	/* Place holder of all the TX List's Phy and Virt addresses. */
 	/* Place holder of all the TX List's Phy and Virt addresses. */
-	list_info_hold_t *list_info;
+	struct list_info_hold *list_info;
 
 
 	/*
 	/*
 	 * Current offset within the tx FIFO where driver would write
 	 * Current offset within the tx FIFO where driver would write
 	 * new Tx frame
 	 * new Tx frame
 	 */
 	 */
-	tx_curr_put_info_t tx_curr_put_info;
+	struct tx_curr_put_info tx_curr_put_info;
 
 
 	/*
 	/*
 	 * Current offset within tx FIFO from where the driver would start freeing
 	 * Current offset within tx FIFO from where the driver would start freeing
 	 * the buffers
 	 * the buffers
 	 */
 	 */
-	tx_curr_get_info_t tx_curr_get_info;
+	struct tx_curr_get_info tx_curr_get_info;
 
 
-	nic_t *nic;
-}fifo_info_t;
+	struct s2io_nic *nic;
+};
 
 
 /* Information related to the Tx and Rx FIFOs and Rings of Xena
 /* Information related to the Tx and Rx FIFOs and Rings of Xena
  * is maintained in this structure.
  * is maintained in this structure.
  */
  */
-typedef struct mac_info {
+struct mac_info {
 /* tx side stuff */
 /* tx side stuff */
 	/* logical pointer of start of each Tx FIFO */
 	/* logical pointer of start of each Tx FIFO */
-	TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS];
+	struct TxFIFO_element __iomem *tx_FIFO_start[MAX_TX_FIFOS];
 
 
 	/* Fifo specific structure */
 	/* Fifo specific structure */
-	fifo_info_t fifos[MAX_TX_FIFOS];
+	struct fifo_info fifos[MAX_TX_FIFOS];
 
 
 	/* Save virtual address of TxD page with zero DMA addr(if any) */
 	/* Save virtual address of TxD page with zero DMA addr(if any) */
 	void *zerodma_virt_addr;
 	void *zerodma_virt_addr;
 
 
 /* rx side stuff */
 /* rx side stuff */
 	/* Ring specific structure */
 	/* Ring specific structure */
-	ring_info_t rings[MAX_RX_RINGS];
+	struct ring_info rings[MAX_RX_RINGS];
 
 
 	u16 rmac_pause_time;
 	u16 rmac_pause_time;
 	u16 mc_pause_threshold_q0q3;
 	u16 mc_pause_threshold_q0q3;
@@ -677,14 +680,14 @@ typedef struct mac_info {
 	void *stats_mem;	/* orignal pointer to allocated mem */
 	void *stats_mem;	/* orignal pointer to allocated mem */
 	dma_addr_t stats_mem_phy;	/* Physical address of the stat block */
 	dma_addr_t stats_mem_phy;	/* Physical address of the stat block */
 	u32 stats_mem_sz;
 	u32 stats_mem_sz;
-	StatInfo_t *stats_info;	/* Logical address of the stat block */
-} mac_info_t;
+	struct stat_block *stats_info;	/* Logical address of the stat block */
+};
 
 
 /* structure representing the user defined MAC addresses */
 /* structure representing the user defined MAC addresses */
-typedef struct {
+struct usr_addr {
 	char addr[ETH_ALEN];
 	char addr[ETH_ALEN];
 	int usage_cnt;
 	int usage_cnt;
-} usr_addr_t;
+};
 
 
 /* Default Tunable parameters of the NIC. */
 /* Default Tunable parameters of the NIC. */
 #define DEFAULT_FIFO_0_LEN 4096
 #define DEFAULT_FIFO_0_LEN 4096
@@ -717,7 +720,7 @@ struct msix_info_st {
 };
 };
 
 
 /* Data structure to represent a LRO session */
 /* Data structure to represent a LRO session */
-typedef struct lro {
+struct lro {
 	struct sk_buff	*parent;
 	struct sk_buff	*parent;
 	struct sk_buff  *last_frag;
 	struct sk_buff  *last_frag;
 	u8		*l2h;
 	u8		*l2h;
@@ -733,20 +736,18 @@ typedef struct lro {
 	u32		cur_tsval;
 	u32		cur_tsval;
 	u32		cur_tsecr;
 	u32		cur_tsecr;
 	u8		saw_ts;
 	u8		saw_ts;
-}lro_t;
+};
 
 
 /* Structure representing one instance of the NIC */
 /* Structure representing one instance of the NIC */
 struct s2io_nic {
 struct s2io_nic {
 	int rxd_mode;
 	int rxd_mode;
-#ifdef CONFIG_S2IO_NAPI
 	/*
 	/*
 	 * Count of packets to be processed in a given iteration, it will be indicated
 	 * Count of packets to be processed in a given iteration, it will be indicated
 	 * by the quota field of the device structure when NAPI is enabled.
 	 * by the quota field of the device structure when NAPI is enabled.
 	 */
 	 */
 	int pkts_to_process;
 	int pkts_to_process;
-#endif
 	struct net_device *dev;
 	struct net_device *dev;
-	mac_info_t mac_control;
+	struct mac_info mac_control;
 	struct config_param config;
 	struct config_param config;
 	struct pci_dev *pdev;
 	struct pci_dev *pdev;
 	void __iomem *bar0;
 	void __iomem *bar0;
@@ -754,8 +755,8 @@ struct s2io_nic {
 #define MAX_MAC_SUPPORTED   16
 #define MAX_MAC_SUPPORTED   16
 #define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
 #define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
 
 
-	macaddr_t def_mac_addr[MAX_MAC_SUPPORTED];
-	macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED];
+	struct mac_addr def_mac_addr[MAX_MAC_SUPPORTED];
+	struct mac_addr pre_mac_addr[MAX_MAC_SUPPORTED];
 
 
 	struct net_device_stats stats;
 	struct net_device_stats stats;
 	int high_dma_flag;
 	int high_dma_flag;
@@ -775,9 +776,7 @@ struct s2io_nic {
 	atomic_t rx_bufs_left[MAX_RX_RINGS];
 	atomic_t rx_bufs_left[MAX_RX_RINGS];
 
 
 	spinlock_t tx_lock;
 	spinlock_t tx_lock;
-#ifndef CONFIG_S2IO_NAPI
 	spinlock_t put_lock;
 	spinlock_t put_lock;
-#endif
 
 
 #define PROMISC     1
 #define PROMISC     1
 #define ALL_MULTI   2
 #define ALL_MULTI   2
@@ -785,7 +784,7 @@ struct s2io_nic {
 #define MAX_ADDRS_SUPPORTED 64
 #define MAX_ADDRS_SUPPORTED 64
 	u16 usr_addr_count;
 	u16 usr_addr_count;
 	u16 mc_addr_count;
 	u16 mc_addr_count;
-	usr_addr_t usr_addrs[MAX_ADDRS_SUPPORTED];
+	struct usr_addr usr_addrs[MAX_ADDRS_SUPPORTED];
 
 
 	u16 m_cast_flg;
 	u16 m_cast_flg;
 	u16 all_multi_pos;
 	u16 all_multi_pos;
@@ -841,7 +840,7 @@ struct s2io_nic {
 	u8 device_type;
 	u8 device_type;
 
 
 #define MAX_LRO_SESSIONS	32
 #define MAX_LRO_SESSIONS	32
-	lro_t lro0_n[MAX_LRO_SESSIONS];
+	struct lro lro0_n[MAX_LRO_SESSIONS];
 	unsigned long	clubbed_frms_cnt;
 	unsigned long	clubbed_frms_cnt;
 	unsigned long	sending_both;
 	unsigned long	sending_both;
 	u8		lro;
 	u8		lro;
@@ -855,8 +854,9 @@ struct s2io_nic {
 	spinlock_t	rx_lock;
 	spinlock_t	rx_lock;
 	atomic_t	isr_cnt;
 	atomic_t	isr_cnt;
 	u64 *ufo_in_band_v;
 	u64 *ufo_in_band_v;
-#define VPD_PRODUCT_NAME_LEN 50
-	u8  product_name[VPD_PRODUCT_NAME_LEN];
+#define VPD_STRING_LEN 80
+	u8  product_name[VPD_STRING_LEN];
+	u8  serial_num[VPD_STRING_LEN];
 };
 };
 
 
 #define RESET_ERROR 1;
 #define RESET_ERROR 1;
@@ -975,43 +975,50 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
 static int init_shared_mem(struct s2io_nic *sp);
 static int init_shared_mem(struct s2io_nic *sp);
 static void free_shared_mem(struct s2io_nic *sp);
 static void free_shared_mem(struct s2io_nic *sp);
 static int init_nic(struct s2io_nic *nic);
 static int init_nic(struct s2io_nic *nic);
-static void rx_intr_handler(ring_info_t *ring_data);
-static void tx_intr_handler(fifo_info_t *fifo_data);
+static void rx_intr_handler(struct ring_info *ring_data);
+static void tx_intr_handler(struct fifo_info *fifo_data);
 static void alarm_intr_handler(struct s2io_nic *sp);
 static void alarm_intr_handler(struct s2io_nic *sp);
 
 
 static int s2io_starter(void);
 static int s2io_starter(void);
+static void s2io_closer(void);
 static void s2io_tx_watchdog(struct net_device *dev);
 static void s2io_tx_watchdog(struct net_device *dev);
 static void s2io_tasklet(unsigned long dev_addr);
 static void s2io_tasklet(unsigned long dev_addr);
 static void s2io_set_multicast(struct net_device *dev);
 static void s2io_set_multicast(struct net_device *dev);
-static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp);
-static void s2io_link(nic_t * sp, int link);
-#if defined(CONFIG_S2IO_NAPI)
+static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
+static void s2io_link(struct s2io_nic * sp, int link);
+static void s2io_reset(struct s2io_nic * sp);
 static int s2io_poll(struct net_device *dev, int *budget);
 static int s2io_poll(struct net_device *dev, int *budget);
-#endif
-static void s2io_init_pci(nic_t * sp);
+static void s2io_init_pci(struct s2io_nic * sp);
 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
 static void s2io_alarm_handle(unsigned long data);
 static void s2io_alarm_handle(unsigned long data);
-static int s2io_enable_msi(nic_t *nic);
+static int s2io_enable_msi(struct s2io_nic *nic);
 static irqreturn_t s2io_msi_handle(int irq, void *dev_id);
 static irqreturn_t s2io_msi_handle(int irq, void *dev_id);
 static irqreturn_t
 static irqreturn_t
 s2io_msix_ring_handle(int irq, void *dev_id);
 s2io_msix_ring_handle(int irq, void *dev_id);
 static irqreturn_t
 static irqreturn_t
 s2io_msix_fifo_handle(int irq, void *dev_id);
 s2io_msix_fifo_handle(int irq, void *dev_id);
 static irqreturn_t s2io_isr(int irq, void *dev_id);
 static irqreturn_t s2io_isr(int irq, void *dev_id);
-static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
+static int verify_xena_quiescence(struct s2io_nic *sp);
 static const struct ethtool_ops netdev_ethtool_ops;
 static const struct ethtool_ops netdev_ethtool_ops;
 static void s2io_set_link(struct work_struct *work);
 static void s2io_set_link(struct work_struct *work);
-static int s2io_set_swapper(nic_t * sp);
-static void s2io_card_down(nic_t *nic);
-static int s2io_card_up(nic_t *nic);
+static int s2io_set_swapper(struct s2io_nic * sp);
+static void s2io_card_down(struct s2io_nic *nic);
+static int s2io_card_up(struct s2io_nic *nic);
 static int get_xena_rev_id(struct pci_dev *pdev);
 static int get_xena_rev_id(struct pci_dev *pdev);
-static void restore_xmsi_data(nic_t *nic);
+static int wait_for_cmd_complete(void *addr, u64 busy_bit);
+static int s2io_add_isr(struct s2io_nic * sp);
+static void s2io_rem_isr(struct s2io_nic * sp);
+
+static void restore_xmsi_data(struct s2io_nic *nic);
 
 
-static int s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, RxD_t *rxdp, nic_t *sp);
-static void clear_lro_session(lro_t *lro);
+static int
+s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
+		      struct RxD_t *rxdp, struct s2io_nic *sp);
+static void clear_lro_session(struct lro *lro);
 static void queue_rx_frame(struct sk_buff *skb);
 static void queue_rx_frame(struct sk_buff *skb);
-static void update_L3L4_header(nic_t *sp, lro_t *lro);
-static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len);
+static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
+static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
+			   struct sk_buff *skb, u32 tcp_len);
 
 
 #define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
 #define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
 #define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
 #define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size

+ 1620 - 0
drivers/net/sc92031.c

@@ -0,0 +1,1620 @@
+/*  Silan SC92031 PCI Fast Ethernet Adapter driver
+ *
+ *  Based on vendor drivers:
+ *  Silan Fast Ethernet Netcard Driver:
+ *    MODULE_AUTHOR ("gaoyonghong");
+ *    MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
+ *    MODULE_LICENSE("GPL");
+ *  8139D Fast Ethernet driver:
+ *    (C) 2002 by gaoyonghong
+ *    MODULE_AUTHOR ("gaoyonghong");
+ *    MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
+ *    MODULE_LICENSE("GPL");
+ *  Both are almost identical and seem to be based on pci-skeleton.c
+ *
+ *  Rewritten for 2.6 by Cesar Eduardo Barros
+ */
+
+/* Note about set_mac_address: I don't know how to change the hardware
+ * matching, so you need to enable IFF_PROMISC when using it.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+
+#include <asm/irq.h>
+
+#define PCI_VENDOR_ID_SILAN		0x1904
+#define PCI_DEVICE_ID_SILAN_SC92031	0x2031
+#define PCI_DEVICE_ID_SILAN_8139D	0x8139
+
+#define SC92031_NAME "sc92031"
+#define SC92031_DESCRIPTION "Silan SC92031 PCI Fast Ethernet Adapter driver"
+#define SC92031_VERSION "2.0c"
+
+/* BAR 0 is MMIO, BAR 1 is PIO */
+#ifndef SC92031_USE_BAR
+#define SC92031_USE_BAR 0
+#endif
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
+static int multicast_filter_limit = 64;
+module_param(multicast_filter_limit, int, 0);
+MODULE_PARM_DESC(multicast_filter_limit,
+	"Maximum number of filtered multicast addresses");
+
+static int media;
+module_param(media, int, 0);
+MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
+	" 0x01 = 10M half, 0x02 = 10M full,"
+	" 0x04 = 100M half, 0x08 = 100M full)");
+
+/* Size of the in-memory receive ring. */
+#define  RX_BUF_LEN_IDX  3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
+#define  RX_BUF_LEN	(8192 << RX_BUF_LEN_IDX)
+
+/* Number of Tx descriptor registers. */
+#define  NUM_TX_DESC	   4
+
+/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
+#define  MAX_ETH_FRAME_SIZE	  1536
+
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define  TX_BUF_SIZE       MAX_ETH_FRAME_SIZE
+#define  TX_BUF_TOT_LEN    (TX_BUF_SIZE * NUM_TX_DESC)
+
+/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define  RX_FIFO_THRESH    7     /* Rx buffer level before first PCI xfer.  */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define  TX_TIMEOUT     (4*HZ)
+
+#define  SILAN_STATS_NUM    2    /* number of ETHTOOL_GSTATS */
+
+/* media options */
+#define  AUTOSELECT    0x00
+#define  M10_HALF      0x01
+#define  M10_FULL      0x02
+#define  M100_HALF     0x04
+#define  M100_FULL     0x08
+
+ /* Symbolic offsets to registers. */
+enum  silan_registers {
+   Config0    = 0x00,         // Config0
+   Config1    = 0x04,         // Config1
+   RxBufWPtr  = 0x08,         // Rx buffer writer poiter
+   IntrStatus = 0x0C,         // Interrupt status
+   IntrMask   = 0x10,         // Interrupt mask
+   RxbufAddr  = 0x14,         // Rx buffer start address
+   RxBufRPtr  = 0x18,         // Rx buffer read pointer
+   Txstatusall = 0x1C,        // Transmit status of all descriptors
+   TxStatus0  = 0x20,	      // Transmit status (Four 32bit registers).
+   TxAddr0    = 0x30,         // Tx descriptors (also four 32bit).
+   RxConfig   = 0x40,         // Rx configuration
+   MAC0	      = 0x44,	      // Ethernet hardware address.
+   MAR0	      = 0x4C,	      // Multicast filter.
+   RxStatus0  = 0x54,         // Rx status
+   TxConfig   = 0x5C,         // Tx configuration
+   PhyCtrl    = 0x60,         // physical control
+   FlowCtrlConfig = 0x64,     // flow control
+   Miicmd0    = 0x68,         // Mii command0 register
+   Miicmd1    = 0x6C,         // Mii command1 register
+   Miistatus  = 0x70,         // Mii status register
+   Timercnt   = 0x74,         // Timer counter register
+   TimerIntr  = 0x78,         // Timer interrupt register
+   PMConfig   = 0x7C,         // Power Manager configuration
+   CRC0       = 0x80,         // Power Manager CRC ( Two 32bit regisers)
+   Wakeup0    = 0x88,         // power Manager wakeup( Eight 64bit regiser)
+   LSBCRC0    = 0xC8,         // power Manager LSBCRC(Two 32bit regiser)
+   TestD0     = 0xD0,
+   TestD4     = 0xD4,
+   TestD8     = 0xD8,
+};
+
+#define MII_BMCR            0        // Basic mode control register
+#define MII_BMSR            1        // Basic mode status register
+#define MII_JAB             16
+#define MII_OutputStatus    24
+
+#define BMCR_FULLDPLX       0x0100    // Full duplex
+#define BMCR_ANRESTART      0x0200    // Auto negotiation restart
+#define BMCR_ANENABLE       0x1000    // Enable auto negotiation
+#define BMCR_SPEED100       0x2000    // Select 100Mbps
+#define BMSR_LSTATUS        0x0004    // Link status
+#define PHY_16_JAB_ENB      0x1000
+#define PHY_16_PORT_ENB     0x1
+
+enum IntrStatusBits {
+   LinkFail       = 0x80000000,
+   LinkOK         = 0x40000000,
+   TimeOut        = 0x20000000,
+   RxOverflow     = 0x0040,
+   RxOK           = 0x0020,
+   TxOK           = 0x0001,
+   IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
+};
+
+enum TxStatusBits {
+   TxCarrierLost = 0x20000000,
+   TxAborted     = 0x10000000,
+   TxOutOfWindow = 0x08000000,
+   TxNccShift    = 22,
+   EarlyTxThresShift = 16,
+   TxStatOK      = 0x8000,
+   TxUnderrun    = 0x4000,
+   TxOwn         = 0x2000,
+};
+
+enum RxStatusBits {
+   RxStatesOK   = 0x80000,
+   RxBadAlign   = 0x40000,
+   RxHugeFrame  = 0x20000,
+   RxSmallFrame = 0x10000,
+   RxCRCOK      = 0x8000,
+   RxCrlFrame   = 0x4000,
+   Rx_Broadcast = 0x2000,
+   Rx_Multicast = 0x1000,
+   RxAddrMatch  = 0x0800,
+   MiiErr       = 0x0400,
+};
+
+enum RxConfigBits {
+   RxFullDx    = 0x80000000,
+   RxEnb       = 0x40000000,
+   RxSmall     = 0x20000000,
+   RxHuge      = 0x10000000,
+   RxErr       = 0x08000000,
+   RxAllphys   = 0x04000000,
+   RxMulticast = 0x02000000,
+   RxBroadcast = 0x01000000,
+   RxLoopBack  = (1 << 23) | (1 << 22),
+   LowThresholdShift  = 12,
+   HighThresholdShift = 2,
+};
+
+enum TxConfigBits {
+   TxFullDx       = 0x80000000,
+   TxEnb          = 0x40000000,
+   TxEnbPad       = 0x20000000,
+   TxEnbHuge      = 0x10000000,
+   TxEnbFCS       = 0x08000000,
+   TxNoBackOff    = 0x04000000,
+   TxEnbPrem      = 0x02000000,
+   TxCareLostCrs  = 0x1000000,
+   TxExdCollNum   = 0xf00000,
+   TxDataRate     = 0x80000,
+};
+
+enum PhyCtrlconfigbits {
+   PhyCtrlAne         = 0x80000000,
+   PhyCtrlSpd100      = 0x40000000,
+   PhyCtrlSpd10       = 0x20000000,
+   PhyCtrlPhyBaseAddr = 0x1f000000,
+   PhyCtrlDux         = 0x800000,
+   PhyCtrlReset       = 0x400000,
+};
+
+enum FlowCtrlConfigBits {
+   FlowCtrlFullDX = 0x80000000,
+   FlowCtrlEnb    = 0x40000000,
+};
+
+enum Config0Bits {
+   Cfg0_Reset  = 0x80000000,
+   Cfg0_Anaoff = 0x40000000,
+   Cfg0_LDPS   = 0x20000000,
+};
+
+enum Config1Bits {
+   Cfg1_EarlyRx = 1 << 31,
+   Cfg1_EarlyTx = 1 << 30,
+
+   //rx buffer size
+   Cfg1_Rcv8K   = 0x0,
+   Cfg1_Rcv16K  = 0x1,
+   Cfg1_Rcv32K  = 0x3,
+   Cfg1_Rcv64K  = 0x7,
+   Cfg1_Rcv128K = 0xf,
+};
+
+enum MiiCmd0Bits {
+   Mii_Divider = 0x20000000,
+   Mii_WRITE   = 0x400000,
+   Mii_READ    = 0x200000,
+   Mii_SCAN    = 0x100000,
+   Mii_Tamod   = 0x80000,
+   Mii_Drvmod  = 0x40000,
+   Mii_mdc     = 0x20000,
+   Mii_mdoen   = 0x10000,
+   Mii_mdo     = 0x8000,
+   Mii_mdi     = 0x4000,
+};
+
+enum MiiStatusBits {
+    Mii_StatusBusy = 0x80000000,
+};
+
+enum PMConfigBits {
+   PM_Enable  = 1 << 31,
+   PM_LongWF  = 1 << 30,
+   PM_Magic   = 1 << 29,
+   PM_LANWake = 1 << 28,
+   PM_LWPTN   = (1 << 27 | 1<< 26),
+   PM_LinkUp  = 1 << 25,
+   PM_WakeUp  = 1 << 24,
+};
+
+/* Locking rules:
+ * priv->lock protects most of the fields of priv and most of the
+ * hardware registers. It does not have to protect against softirqs
+ * between sc92031_disable_interrupts and sc92031_enable_interrupts;
+ * it also does not need to be used in ->open and ->stop while the
+ * device interrupts are off.
+ * Not having to protect against softirqs is very useful due to heavy
+ * use of mdelay() at _sc92031_reset.
+ * Functions prefixed with _sc92031_ must be called with the lock held;
+ * functions prefixed with sc92031_ must be called without the lock held.
+ * Use mmiowb() before unlocking if the hardware was written to.
+ */
+
+/* Locking rules for the interrupt:
+ * - the interrupt and the tasklet never run at the same time
+ * - neither run between sc92031_disable_interrupts and
+ *   sc92031_enable_interrupt
+ */
+
+struct sc92031_priv {
+	spinlock_t		lock;
+	/* iomap.h cookie */
+	void __iomem		*port_base;
+	/* pci device structure */
+	struct pci_dev		*pdev;
+	/* tasklet */
+	struct tasklet_struct	tasklet;
+
+	/* CPU address of rx ring */
+	void			*rx_ring;
+	/* PCI address of rx ring */
+	dma_addr_t		rx_ring_dma_addr;
+	/* PCI address of rx ring read pointer */
+	dma_addr_t		rx_ring_tail;
+
+	/* tx ring write index */
+	unsigned		tx_head;
+	/* tx ring read index */
+	unsigned		tx_tail;
+	/* CPU address of tx bounce buffer */
+	void			*tx_bufs;
+	/* PCI address of tx bounce buffer */
+	dma_addr_t		tx_bufs_dma_addr;
+
+	/* copies of some hardware registers */
+	u32			intr_status;
+	atomic_t		intr_mask;
+	u32			rx_config;
+	u32			tx_config;
+	u32			pm_config;
+
+	/* copy of some flags from dev->flags */
+	unsigned int		mc_flags;
+
+	/* for ETHTOOL_GSTATS */
+	u64			tx_timeouts;
+	u64			rx_loss;
+
+	/* for dev->get_stats */
+	long			rx_value;
+	struct net_device_stats	stats;
+};
+
+/* I don't know which registers can be safely read; however, I can guess
+ * MAC0 is one of them. */
+static inline void _sc92031_dummy_read(void __iomem *port_base)
+{
+	ioread32(port_base + MAC0);
+}
+
+static u32 _sc92031_mii_wait(void __iomem *port_base)
+{
+	u32 mii_status;
+
+	do {
+		udelay(10);
+		mii_status = ioread32(port_base + Miistatus);
+	} while (mii_status & Mii_StatusBusy);
+
+	return mii_status;
+}
+
+static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
+{
+	iowrite32(Mii_Divider, port_base + Miicmd0);
+
+	_sc92031_mii_wait(port_base);
+
+	iowrite32(cmd1, port_base + Miicmd1);
+	iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
+
+	return _sc92031_mii_wait(port_base);
+}
+
+static void _sc92031_mii_scan(void __iomem *port_base)
+{
+	_sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
+}
+
+static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
+{
+	return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
+}
+
+static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
+{
+	_sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
+}
+
+static void sc92031_disable_interrupts(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+
+	/* tell the tasklet/interrupt not to enable interrupts */
+	atomic_set(&priv->intr_mask, 0);
+	wmb();
+
+	/* stop interrupts */
+	iowrite32(0, port_base + IntrMask);
+	_sc92031_dummy_read(port_base);
+	mmiowb();
+
+	/* wait for any concurrent interrupt/tasklet to finish */
+	synchronize_irq(dev->irq);
+	tasklet_disable(&priv->tasklet);
+}
+
+static void sc92031_enable_interrupts(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+
+	tasklet_enable(&priv->tasklet);
+
+	atomic_set(&priv->intr_mask, IntrBits);
+	wmb();
+
+	iowrite32(IntrBits, port_base + IntrMask);
+	mmiowb();
+}
+
+static void _sc92031_disable_tx_rx(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+
+	priv->rx_config &= ~RxEnb;
+	priv->tx_config &= ~TxEnb;
+	iowrite32(priv->rx_config, port_base + RxConfig);
+	iowrite32(priv->tx_config, port_base + TxConfig);
+}
+
+static void _sc92031_enable_tx_rx(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+
+	priv->rx_config |= RxEnb;
+	priv->tx_config |= TxEnb;
+	iowrite32(priv->rx_config, port_base + RxConfig);
+	iowrite32(priv->tx_config, port_base + TxConfig);
+}
+
+static void _sc92031_tx_clear(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+
+	while (priv->tx_head - priv->tx_tail > 0) {
+		priv->tx_tail++;
+		priv->stats.tx_dropped++;
+	}
+	priv->tx_head = priv->tx_tail = 0;
+}
+
+static void _sc92031_set_mar(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u32 mar0 = 0, mar1 = 0;
+
+	if ((dev->flags & IFF_PROMISC)
+			|| dev->mc_count > multicast_filter_limit
+			|| (dev->flags & IFF_ALLMULTI))
+		mar0 = mar1 = 0xffffffff;
+	else if (dev->flags & IFF_MULTICAST) {
+		struct dev_mc_list *mc_list;
+
+		for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
+			u32 crc;
+			unsigned bit = 0;
+
+			crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr);
+			crc >>= 24;
+
+			if (crc & 0x01)	bit |= 0x02;
+			if (crc & 0x02)	bit |= 0x01;
+			if (crc & 0x10)	bit |= 0x20;
+			if (crc & 0x20)	bit |= 0x10;
+			if (crc & 0x40)	bit |= 0x08;
+			if (crc & 0x80)	bit |= 0x04;
+
+			if (bit > 31)
+				mar0 |= 0x1 << (bit - 32);
+			else
+				mar1 |= 0x1 << bit;
+		}
+	}
+
+	iowrite32(mar0, port_base + MAR0);
+	iowrite32(mar1, port_base + MAR0 + 4);
+}
+
+static void _sc92031_set_rx_config(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	unsigned int old_mc_flags;
+	u32 rx_config_bits = 0;
+
+	old_mc_flags = priv->mc_flags;
+
+	if (dev->flags & IFF_PROMISC)
+		rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
+				| RxMulticast | RxAllphys;
+
+	if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+		rx_config_bits |= RxMulticast;
+
+	if (dev->flags & IFF_BROADCAST)
+		rx_config_bits |= RxBroadcast;
+
+	priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
+			| RxMulticast | RxAllphys);
+	priv->rx_config |= rx_config_bits;
+
+	priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
+			| IFF_MULTICAST | IFF_BROADCAST);
+
+	if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
+		iowrite32(priv->rx_config, port_base + RxConfig);
+}
+
+static bool _sc92031_check_media(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u16 bmsr;
+
+	bmsr = _sc92031_mii_read(port_base, MII_BMSR);
+	rmb();
+	if (bmsr & BMSR_LSTATUS) {
+		bool speed_100, duplex_full;
+		u32 flow_ctrl_config = 0;
+		u16 output_status = _sc92031_mii_read(port_base,
+				MII_OutputStatus);
+		_sc92031_mii_scan(port_base);
+
+		speed_100 = output_status & 0x2;
+		duplex_full = output_status & 0x4;
+
+		/* Initial Tx/Rx configuration */
+		priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
+		priv->tx_config = 0x48800000;
+
+		/* NOTE: vendor driver had dead code here to enable tx padding */
+
+		if (!speed_100)
+			priv->tx_config |= 0x80000;
+
+		// configure rx mode
+		_sc92031_set_rx_config(dev);
+
+		if (duplex_full) {
+			priv->rx_config |= RxFullDx;
+			priv->tx_config |= TxFullDx;
+			flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
+		} else {
+			priv->rx_config &= ~RxFullDx;
+			priv->tx_config &= ~TxFullDx;
+		}
+
+		_sc92031_set_mar(dev);
+		_sc92031_set_rx_config(dev);
+		_sc92031_enable_tx_rx(dev);
+		iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
+
+		netif_carrier_on(dev);
+
+		if (printk_ratelimit())
+			printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
+				dev->name,
+				speed_100 ? "100" : "10",
+				duplex_full ? "full" : "half");
+		return true;
+	} else {
+		_sc92031_mii_scan(port_base);
+
+		netif_carrier_off(dev);
+
+		_sc92031_disable_tx_rx(dev);
+
+		if (printk_ratelimit())
+			printk(KERN_INFO "%s: link down\n", dev->name);
+		return false;
+	}
+}
+
+static void _sc92031_phy_reset(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u32 phy_ctrl;
+
+	phy_ctrl = ioread32(port_base + PhyCtrl);
+	phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
+	phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
+
+	switch (media) {
+	default:
+	case AUTOSELECT:
+		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
+		break;
+	case M10_HALF:
+		phy_ctrl |= PhyCtrlSpd10;
+		break;
+	case M10_FULL:
+		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
+		break;
+	case M100_HALF:
+		phy_ctrl |= PhyCtrlSpd100;
+		break;
+	case M100_FULL:
+		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
+		break;
+	}
+
+	iowrite32(phy_ctrl, port_base + PhyCtrl);
+	mdelay(10);
+
+	phy_ctrl &= ~PhyCtrlReset;
+	iowrite32(phy_ctrl, port_base + PhyCtrl);
+	mdelay(1);
+
+	_sc92031_mii_write(port_base, MII_JAB,
+			PHY_16_JAB_ENB | PHY_16_PORT_ENB);
+	_sc92031_mii_scan(port_base);
+
+	netif_carrier_off(dev);
+	netif_stop_queue(dev);
+}
+
+static void _sc92031_reset(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+
+	/* disable PM */
+	iowrite32(0, port_base + PMConfig);
+
+	/* soft reset the chip */
+	iowrite32(Cfg0_Reset, port_base + Config0);
+	mdelay(200);
+
+	iowrite32(0, port_base + Config0);
+	mdelay(10);
+
+	/* disable interrupts */
+	iowrite32(0, port_base + IntrMask);
+
+	/* clear multicast address */
+	iowrite32(0, port_base + MAR0);
+	iowrite32(0, port_base + MAR0 + 4);
+
+	/* init rx ring */
+	iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
+	priv->rx_ring_tail = priv->rx_ring_dma_addr;
+
+	/* init tx ring */
+	_sc92031_tx_clear(dev);
+
+	/* clear old register values */
+	priv->intr_status = 0;
+	atomic_set(&priv->intr_mask, 0);
+	priv->rx_config = 0;
+	priv->tx_config = 0;
+	priv->mc_flags = 0;
+
+	/* configure rx buffer size */
+	/* NOTE: vendor driver had dead code here to enable early tx/rx */
+	iowrite32(Cfg1_Rcv64K, port_base + Config1);
+
+	_sc92031_phy_reset(dev);
+	_sc92031_check_media(dev);
+
+	/* calculate rx fifo overflow */
+	priv->rx_value = 0;
+
+	/* enable PM */
+	iowrite32(priv->pm_config, port_base + PMConfig);
+
+	/* clear intr register */
+	ioread32(port_base + IntrStatus);
+}
+
+static void _sc92031_tx_tasklet(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+
+	unsigned old_tx_tail;
+	unsigned entry;
+	u32 tx_status;
+
+	old_tx_tail = priv->tx_tail;
+	while (priv->tx_head - priv->tx_tail > 0) {
+		entry = priv->tx_tail % NUM_TX_DESC;
+		tx_status = ioread32(port_base + TxStatus0 + entry * 4);
+
+		if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
+			break;
+
+		priv->tx_tail++;
+
+		if (tx_status & TxStatOK) {
+			priv->stats.tx_bytes += tx_status & 0x1fff;
+			priv->stats.tx_packets++;
+			/* Note: TxCarrierLost is always asserted at 100mbps. */
+			priv->stats.collisions += (tx_status >> 22) & 0xf;
+		}
+
+		if (tx_status & (TxOutOfWindow | TxAborted)) {
+			priv->stats.tx_errors++;
+
+			if (tx_status & TxAborted)
+				priv->stats.tx_aborted_errors++;
+
+			if (tx_status & TxCarrierLost)
+				priv->stats.tx_carrier_errors++;
+
+			if (tx_status & TxOutOfWindow)
+				priv->stats.tx_window_errors++;
+		}
+
+		if (tx_status & TxUnderrun)
+			priv->stats.tx_fifo_errors++;
+	}
+
+	if (priv->tx_tail != old_tx_tail)
+		if (netif_queue_stopped(dev))
+			netif_wake_queue(dev);
+}
+
+static void _sc92031_rx_tasklet_error(u32 rx_status,
+		struct sc92031_priv *priv, unsigned rx_size)
+{
+	if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
+		priv->stats.rx_errors++;
+		priv->stats.rx_length_errors++;
+	}
+
+	if (!(rx_status & RxStatesOK)) {
+		priv->stats.rx_errors++;
+
+		if (rx_status & (RxHugeFrame | RxSmallFrame))
+			priv->stats.rx_length_errors++;
+
+		if (rx_status & RxBadAlign)
+			priv->stats.rx_frame_errors++;
+
+		if (!(rx_status & RxCRCOK))
+			priv->stats.rx_crc_errors++;
+	} else
+		priv->rx_loss++;
+}
+
+static void _sc92031_rx_tasklet(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+
+	dma_addr_t rx_ring_head;
+	unsigned rx_len;
+	unsigned rx_ring_offset;
+	void *rx_ring = priv->rx_ring;
+
+	rx_ring_head = ioread32(port_base + RxBufWPtr);
+	rmb();
+
+	/* rx_ring_head is only 17 bits in the RxBufWPtr register.
+	 * we need to change it to 32 bits physical address
+	 */
+	rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
+	rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
+	if (rx_ring_head < priv->rx_ring_dma_addr)
+		rx_ring_head += RX_BUF_LEN;
+
+	if (rx_ring_head >= priv->rx_ring_tail)
+		rx_len = rx_ring_head - priv->rx_ring_tail;
+	else
+		rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
+
+	if (!rx_len)
+		return;
+
+	if (unlikely(rx_len > RX_BUF_LEN)) {
+		if (printk_ratelimit())
+			printk(KERN_ERR "%s: rx packets length > rx buffer\n",
+					dev->name);
+		return;
+	}
+
+	rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
+
+	while (rx_len) {
+		u32 rx_status;
+		unsigned rx_size, rx_size_align, pkt_size;
+		struct sk_buff *skb;
+
+		rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
+		rmb();
+
+		rx_size = rx_status >> 20;
+		rx_size_align = (rx_size + 3) & ~3;	// for 4 bytes aligned
+		pkt_size = rx_size - 4;	// Omit the four octet CRC from the length.
+
+		rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
+
+		if (unlikely(rx_status == 0
+				|| rx_size > (MAX_ETH_FRAME_SIZE + 4)
+				|| rx_size < 16
+				|| !(rx_status & RxStatesOK))) {
+			_sc92031_rx_tasklet_error(rx_status, priv, rx_size);
+			break;
+		}
+
+		if (unlikely(rx_size_align + 4 > rx_len)) {
+			if (printk_ratelimit())
+				printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
+			break;
+		}
+
+		rx_len -= rx_size_align + 4;
+
+		skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
+		if (unlikely(!skb)) {
+			if (printk_ratelimit())
+				printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
+						dev->name, pkt_size);
+			goto next;
+		}
+
+		skb_reserve(skb, NET_IP_ALIGN);
+
+		if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
+			memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
+				rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
+			memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
+				rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
+		} else {
+			memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
+		}
+
+		skb->dev = dev;
+		skb->protocol = eth_type_trans(skb, dev);
+		dev->last_rx = jiffies;
+		netif_rx(skb);
+
+		priv->stats.rx_bytes += pkt_size;
+		priv->stats.rx_packets++;
+
+		if (rx_status & Rx_Multicast)
+			priv->stats.multicast++;
+
+	next:
+		rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
+	}
+	mb();
+
+	priv->rx_ring_tail = rx_ring_head;
+	iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
+}
+
+static void _sc92031_link_tasklet(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+
+	if (_sc92031_check_media(dev))
+		netif_wake_queue(dev);
+	else {
+		netif_stop_queue(dev);
+		priv->stats.tx_carrier_errors++;
+	}
+}
+
+static void sc92031_tasklet(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u32 intr_status, intr_mask;
+
+	intr_status = priv->intr_status;
+
+	spin_lock(&priv->lock);
+
+	if (unlikely(!netif_running(dev)))
+		goto out;
+
+	if (intr_status & TxOK)
+		_sc92031_tx_tasklet(dev);
+
+	if (intr_status & RxOK)
+		_sc92031_rx_tasklet(dev);
+
+	if (intr_status & RxOverflow)
+		priv->stats.rx_errors++;
+
+	if (intr_status & TimeOut) {
+		priv->stats.rx_errors++;
+		priv->stats.rx_length_errors++;
+	}
+
+	if (intr_status & (LinkFail | LinkOK))
+		_sc92031_link_tasklet(dev);
+
+out:
+	intr_mask = atomic_read(&priv->intr_mask);
+	rmb();
+
+	iowrite32(intr_mask, port_base + IntrMask);
+	mmiowb();
+
+	spin_unlock(&priv->lock);
+}
+
+static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u32 intr_status, intr_mask;
+
+	/* mask interrupts before clearing IntrStatus */
+	iowrite32(0, port_base + IntrMask);
+	_sc92031_dummy_read(port_base);
+
+	intr_status = ioread32(port_base + IntrStatus);
+	if (unlikely(intr_status == 0xffffffff))
+		return IRQ_NONE;	// hardware has gone missing
+
+	intr_status &= IntrBits;
+	if (!intr_status)
+		goto out_none;
+
+	priv->intr_status = intr_status;
+	tasklet_schedule(&priv->tasklet);
+
+	return IRQ_HANDLED;
+
+out_none:
+	intr_mask = atomic_read(&priv->intr_mask);
+	rmb();
+
+	iowrite32(intr_mask, port_base + IntrMask);
+	mmiowb();
+
+	return IRQ_NONE;
+}
+
+static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+
+	// FIXME I do not understand what is this trying to do.
+	if (netif_running(dev)) {
+		int temp;
+
+		spin_lock_bh(&priv->lock);
+
+		/* Update the error count. */
+		temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
+
+		if (temp == 0xffff) {
+			priv->rx_value += temp;
+			priv->stats.rx_fifo_errors = priv->rx_value;
+		} else {
+			priv->stats.rx_fifo_errors = temp + priv->rx_value;
+		}
+
+		spin_unlock_bh(&priv->lock);
+	}
+
+	return &priv->stats;
+}
+
+static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int err = 0;
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+
+	unsigned len;
+	unsigned entry;
+	u32 tx_status;
+
+	if (unlikely(skb->len > TX_BUF_SIZE)) {
+		err = -EMSGSIZE;
+		priv->stats.tx_dropped++;
+		goto out;
+	}
+
+	spin_lock_bh(&priv->lock);
+
+	if (unlikely(!netif_carrier_ok(dev))) {
+		err = -ENOLINK;
+		priv->stats.tx_dropped++;
+		goto out_unlock;
+	}
+
+	BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
+
+	entry = priv->tx_head++ % NUM_TX_DESC;
+
+	skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
+
+	len = skb->len;
+	if (unlikely(len < ETH_ZLEN)) {
+		memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
+				0, ETH_ZLEN - len);
+		len = ETH_ZLEN;
+	}
+
+	wmb();
+
+	if (len < 100)
+		tx_status = len;
+	else if (len < 300)
+		tx_status = 0x30000 | len;
+	else
+		tx_status = 0x50000 | len;
+
+	iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
+			port_base + TxAddr0 + entry * 4);
+	iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
+	mmiowb();
+
+	dev->trans_start = jiffies;
+
+	if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
+		netif_stop_queue(dev);
+
+out_unlock:
+	spin_unlock_bh(&priv->lock);
+
+out:
+	dev_kfree_skb(skb);
+
+	return err;
+}
+
+static int sc92031_open(struct net_device *dev)
+{
+	int err;
+	struct sc92031_priv *priv = netdev_priv(dev);
+	struct pci_dev *pdev = priv->pdev;
+
+	priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
+			&priv->rx_ring_dma_addr);
+	if (unlikely(!priv->rx_ring)) {
+		err = -ENOMEM;
+		goto out_alloc_rx_ring;
+	}
+
+	priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
+			&priv->tx_bufs_dma_addr);
+	if (unlikely(!priv->tx_bufs)) {
+		err = -ENOMEM;
+		goto out_alloc_tx_bufs;
+	}
+	priv->tx_head = priv->tx_tail = 0;
+
+	err = request_irq(pdev->irq, sc92031_interrupt,
+			SA_SHIRQ, dev->name, dev);
+	if (unlikely(err < 0))
+		goto out_request_irq;
+
+	priv->pm_config = 0;
+
+	/* Interrupts already disabled by sc92031_stop or sc92031_probe */
+	spin_lock(&priv->lock);
+
+	_sc92031_reset(dev);
+	mmiowb();
+
+	spin_unlock(&priv->lock);
+	sc92031_enable_interrupts(dev);
+
+	if (netif_carrier_ok(dev))
+		netif_start_queue(dev);
+	else
+		netif_tx_disable(dev);
+
+	return 0;
+
+out_request_irq:
+	pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
+			priv->tx_bufs_dma_addr);
+out_alloc_tx_bufs:
+	pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
+			priv->rx_ring_dma_addr);
+out_alloc_rx_ring:
+	return err;
+}
+
+static int sc92031_stop(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	struct pci_dev *pdev = priv->pdev;
+
+	netif_tx_disable(dev);
+
+	/* Disable interrupts, stop Tx and Rx. */
+	sc92031_disable_interrupts(dev);
+
+	spin_lock(&priv->lock);
+
+	_sc92031_disable_tx_rx(dev);
+	_sc92031_tx_clear(dev);
+	mmiowb();
+
+	spin_unlock(&priv->lock);
+
+	free_irq(pdev->irq, dev);
+	pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
+			priv->tx_bufs_dma_addr);
+	pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
+			priv->rx_ring_dma_addr);
+
+	return 0;
+}
+
+static void sc92031_set_multicast_list(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+
+	spin_lock_bh(&priv->lock);
+
+	_sc92031_set_mar(dev);
+	_sc92031_set_rx_config(dev);
+	mmiowb();
+
+	spin_unlock_bh(&priv->lock);
+}
+
+static void sc92031_tx_timeout(struct net_device *dev)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+
+	/* Disable interrupts by clearing the interrupt mask.*/
+	sc92031_disable_interrupts(dev);
+
+	spin_lock(&priv->lock);
+
+	priv->tx_timeouts++;
+
+	_sc92031_reset(dev);
+	mmiowb();
+
+	spin_unlock(&priv->lock);
+
+	/* enable interrupts */
+	sc92031_enable_interrupts(dev);
+
+	if (netif_carrier_ok(dev))
+		netif_wake_queue(dev);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sc92031_poll_controller(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
+		sc92031_tasklet((unsigned long)dev);
+	enable_irq(dev->irq);
+}
+#endif
+
+static int sc92031_ethtool_get_settings(struct net_device *dev,
+		struct ethtool_cmd *cmd)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u8 phy_address;
+	u32 phy_ctrl;
+	u16 output_status;
+
+	spin_lock_bh(&priv->lock);
+
+	phy_address = ioread32(port_base + Miicmd1) >> 27;
+	phy_ctrl = ioread32(port_base + PhyCtrl);
+
+	output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
+	_sc92031_mii_scan(port_base);
+	mmiowb();
+
+	spin_unlock_bh(&priv->lock);
+
+	cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
+			| SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
+			| SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
+
+	cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+
+	if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
+			== (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
+		cmd->advertising |= ADVERTISED_Autoneg;
+
+	if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
+		cmd->advertising |= ADVERTISED_10baseT_Half;
+
+	if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
+			== (PhyCtrlSpd10 | PhyCtrlDux))
+		cmd->advertising |= ADVERTISED_10baseT_Full;
+
+	if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
+		cmd->advertising |= ADVERTISED_100baseT_Half;
+
+	if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
+			== (PhyCtrlSpd100 | PhyCtrlDux))
+		cmd->advertising |= ADVERTISED_100baseT_Full;
+
+	if (phy_ctrl & PhyCtrlAne)
+		cmd->advertising |= ADVERTISED_Autoneg;
+
+	cmd->speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
+	cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
+	cmd->port = PORT_MII;
+	cmd->phy_address = phy_address;
+	cmd->transceiver = XCVR_INTERNAL;
+	cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+	return 0;
+}
+
+static int sc92031_ethtool_set_settings(struct net_device *dev,
+		struct ethtool_cmd *cmd)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u32 phy_ctrl;
+	u32 old_phy_ctrl;
+
+	if (!(cmd->speed == SPEED_10 || cmd->speed == SPEED_100))
+		return -EINVAL;
+	if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
+		return -EINVAL;
+	if (!(cmd->port == PORT_MII))
+		return -EINVAL;
+	if (!(cmd->phy_address == 0x1f))
+		return -EINVAL;
+	if (!(cmd->transceiver == XCVR_INTERNAL))
+		return -EINVAL;
+	if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
+		return -EINVAL;
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		if (!(cmd->advertising & (ADVERTISED_Autoneg
+				| ADVERTISED_100baseT_Full
+				| ADVERTISED_100baseT_Half
+				| ADVERTISED_10baseT_Full
+				| ADVERTISED_10baseT_Half)))
+			return -EINVAL;
+
+		phy_ctrl = PhyCtrlAne;
+
+		// FIXME: I'm not sure what the original code was trying to do
+		if (cmd->advertising & ADVERTISED_Autoneg)
+			phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
+		if (cmd->advertising & ADVERTISED_100baseT_Full)
+			phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
+		if (cmd->advertising & ADVERTISED_100baseT_Half)
+			phy_ctrl |= PhyCtrlSpd100;
+		if (cmd->advertising & ADVERTISED_10baseT_Full)
+			phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
+		if (cmd->advertising & ADVERTISED_10baseT_Half)
+			phy_ctrl |= PhyCtrlSpd10;
+	} else {
+		// FIXME: Whole branch guessed
+		phy_ctrl = 0;
+
+		if (cmd->speed == SPEED_10)
+			phy_ctrl |= PhyCtrlSpd10;
+		else /* cmd->speed == SPEED_100 */
+			phy_ctrl |= PhyCtrlSpd100;
+
+		if (cmd->duplex == DUPLEX_FULL)
+			phy_ctrl |= PhyCtrlDux;
+	}
+
+	spin_lock_bh(&priv->lock);
+
+	old_phy_ctrl = ioread32(port_base + PhyCtrl);
+	phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
+			| PhyCtrlSpd100 | PhyCtrlSpd10);
+	if (phy_ctrl != old_phy_ctrl)
+		iowrite32(phy_ctrl, port_base + PhyCtrl);
+
+	spin_unlock_bh(&priv->lock);
+
+	return 0;
+}
+
+static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
+		struct ethtool_drvinfo *drvinfo)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	struct pci_dev *pdev = priv->pdev;
+
+	strcpy(drvinfo->driver, SC92031_NAME);
+	strcpy(drvinfo->version, SC92031_VERSION);
+	strcpy(drvinfo->bus_info, pci_name(pdev));
+}
+
+static void sc92031_ethtool_get_wol(struct net_device *dev,
+		struct ethtool_wolinfo *wolinfo)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u32 pm_config;
+
+	spin_lock_bh(&priv->lock);
+	pm_config = ioread32(port_base + PMConfig);
+	spin_unlock_bh(&priv->lock);
+
+	// FIXME: Guessed
+	wolinfo->supported = WAKE_PHY | WAKE_MAGIC
+			| WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
+	wolinfo->wolopts = 0;
+
+	if (pm_config & PM_LinkUp)
+		wolinfo->wolopts |= WAKE_PHY;
+
+	if (pm_config & PM_Magic)
+		wolinfo->wolopts |= WAKE_MAGIC;
+
+	if (pm_config & PM_WakeUp)
+		// FIXME: Guessed
+		wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
+}
+
+static int sc92031_ethtool_set_wol(struct net_device *dev,
+		struct ethtool_wolinfo *wolinfo)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u32 pm_config;
+
+	spin_lock_bh(&priv->lock);
+
+	pm_config = ioread32(port_base + PMConfig)
+			& ~(PM_LinkUp | PM_Magic | PM_WakeUp);
+
+	if (wolinfo->wolopts & WAKE_PHY)
+		pm_config |= PM_LinkUp;
+
+	if (wolinfo->wolopts & WAKE_MAGIC)
+		pm_config |= PM_Magic;
+
+	// FIXME: Guessed
+	if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
+		pm_config |= PM_WakeUp;
+
+	priv->pm_config = pm_config;
+	iowrite32(pm_config, port_base + PMConfig);
+	mmiowb();
+
+	spin_unlock_bh(&priv->lock);
+
+	return 0;
+}
+
+static int sc92031_ethtool_nway_reset(struct net_device *dev)
+{
+	int err = 0;
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem *port_base = priv->port_base;
+	u16 bmcr;
+
+	spin_lock_bh(&priv->lock);
+
+	bmcr = _sc92031_mii_read(port_base, MII_BMCR);
+	if (!(bmcr & BMCR_ANENABLE)) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	_sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
+
+out:
+	_sc92031_mii_scan(port_base);
+	mmiowb();
+
+	spin_unlock_bh(&priv->lock);
+
+	return err;
+}
+
+static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
+	"tx_timeout",
+	"rx_loss",
+};
+
+static void sc92031_ethtool_get_strings(struct net_device *dev,
+		u32 stringset, u8 *data)
+{
+	if (stringset == ETH_SS_STATS)
+		memcpy(data, sc92031_ethtool_stats_strings,
+				SILAN_STATS_NUM * ETH_GSTRING_LEN);
+}
+
+static int sc92031_ethtool_get_stats_count(struct net_device *dev)
+{
+	return SILAN_STATS_NUM;
+}
+
+static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
+		struct ethtool_stats *stats, u64 *data)
+{
+	struct sc92031_priv *priv = netdev_priv(dev);
+
+	spin_lock_bh(&priv->lock);
+	data[0] = priv->tx_timeouts;
+	data[1] = priv->rx_loss;
+	spin_unlock_bh(&priv->lock);
+}
+
+static struct ethtool_ops sc92031_ethtool_ops = {
+	.get_settings		= sc92031_ethtool_get_settings,
+	.set_settings		= sc92031_ethtool_set_settings,
+	.get_drvinfo		= sc92031_ethtool_get_drvinfo,
+	.get_wol		= sc92031_ethtool_get_wol,
+	.set_wol		= sc92031_ethtool_set_wol,
+	.nway_reset		= sc92031_ethtool_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_tx_csum		= ethtool_op_get_tx_csum,
+	.get_sg			= ethtool_op_get_sg,
+	.get_tso		= ethtool_op_get_tso,
+	.get_strings		= sc92031_ethtool_get_strings,
+	.get_stats_count	= sc92031_ethtool_get_stats_count,
+	.get_ethtool_stats	= sc92031_ethtool_get_ethtool_stats,
+	.get_perm_addr		= ethtool_op_get_perm_addr,
+	.get_ufo		= ethtool_op_get_ufo,
+};
+
+static int __devinit sc92031_probe(struct pci_dev *pdev,
+		const struct pci_device_id *id)
+{
+	int err;
+	void __iomem* port_base;
+	struct net_device *dev;
+	struct sc92031_priv *priv;
+	u32 mac0, mac1;
+
+	err = pci_enable_device(pdev);
+	if (unlikely(err < 0))
+		goto out_enable_device;
+
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+	if (unlikely(err < 0))
+		goto out_set_dma_mask;
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+	if (unlikely(err < 0))
+		goto out_set_dma_mask;
+
+	err = pci_request_regions(pdev, SC92031_NAME);
+	if (unlikely(err < 0))
+		goto out_request_regions;
+
+	port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
+	if (unlikely(!port_base)) {
+		err = -EIO;
+		goto out_iomap;
+	}
+
+	dev = alloc_etherdev(sizeof(struct sc92031_priv));
+	if (unlikely(!dev)) {
+		err = -ENOMEM;
+		goto out_alloc_etherdev;
+	}
+
+	pci_set_drvdata(pdev, dev);
+
+#if SC92031_USE_BAR == 0
+	dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
+	dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
+#elif SC92031_USE_BAR == 1
+	dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
+#endif
+	dev->irq = pdev->irq;
+
+	/* faked with skb_copy_and_csum_dev */
+	dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+
+	dev->get_stats		= sc92031_get_stats;
+	dev->ethtool_ops	= &sc92031_ethtool_ops;
+	dev->hard_start_xmit	= sc92031_start_xmit;
+	dev->watchdog_timeo	= TX_TIMEOUT;
+	dev->open		= sc92031_open;
+	dev->stop		= sc92031_stop;
+	dev->set_multicast_list	= sc92031_set_multicast_list;
+	dev->tx_timeout		= sc92031_tx_timeout;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller	= sc92031_poll_controller;
+#endif
+
+	priv = netdev_priv(dev);
+	spin_lock_init(&priv->lock);
+	priv->port_base = port_base;
+	priv->pdev = pdev;
+	tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
+	/* Fudge tasklet count so the call to sc92031_enable_interrupts at
+	 * sc92031_open will work correctly */
+	tasklet_disable_nosync(&priv->tasklet);
+
+	/* PCI PM Wakeup */
+	iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
+
+	mac0 = ioread32(port_base + MAC0);
+	mac1 = ioread32(port_base + MAC0 + 4);
+	dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
+	dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
+	dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
+	dev->dev_addr[3] = dev->perm_addr[3] = mac0;
+	dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
+	dev->dev_addr[5] = dev->perm_addr[5] = mac1;
+
+	err = register_netdev(dev);
+	if (err < 0)
+		goto out_register_netdev;
+
+	return 0;
+
+out_register_netdev:
+	free_netdev(dev);
+out_alloc_etherdev:
+	pci_iounmap(pdev, port_base);
+out_iomap:
+	pci_release_regions(pdev);
+out_request_regions:
+out_set_dma_mask:
+	pci_disable_device(pdev);
+out_enable_device:
+	return err;
+}
+
+static void __devexit sc92031_remove(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct sc92031_priv *priv = netdev_priv(dev);
+	void __iomem* port_base = priv->port_base;
+
+	unregister_netdev(dev);
+	free_netdev(dev);
+	pci_iounmap(pdev, port_base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct sc92031_priv *priv = netdev_priv(dev);
+
+	pci_save_state(pdev);
+
+	if (!netif_running(dev))
+		goto out;
+
+	netif_device_detach(dev);
+
+	/* Disable interrupts, stop Tx and Rx. */
+	sc92031_disable_interrupts(dev);
+
+	spin_lock(&priv->lock);
+
+	_sc92031_disable_tx_rx(dev);
+	_sc92031_tx_clear(dev);
+	mmiowb();
+
+	spin_unlock(&priv->lock);
+
+out:
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int sc92031_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct sc92031_priv *priv = netdev_priv(dev);
+
+	pci_restore_state(pdev);
+	pci_set_power_state(pdev, PCI_D0);
+
+	if (!netif_running(dev))
+		goto out;
+
+	/* Interrupts already disabled by sc92031_suspend */
+	spin_lock(&priv->lock);
+
+	_sc92031_reset(dev);
+	mmiowb();
+
+	spin_unlock(&priv->lock);
+	sc92031_enable_interrupts(dev);
+
+	netif_device_attach(dev);
+
+	if (netif_carrier_ok(dev))
+		netif_wake_queue(dev);
+	else
+		netif_tx_disable(dev);
+
+out:
+	return 0;
+}
+
+static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_SC92031) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_8139D) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
+
+static struct pci_driver sc92031_pci_driver = {
+	.name		= SC92031_NAME,
+	.id_table	= sc92031_pci_device_id_table,
+	.probe		= sc92031_probe,
+	.remove		= __devexit_p(sc92031_remove),
+	.suspend	= sc92031_suspend,
+	.resume		= sc92031_resume,
+};
+
+static int __init sc92031_init(void)
+{
+	printk(KERN_INFO SC92031_DESCRIPTION " " SC92031_VERSION "\n");
+	return pci_register_driver(&sc92031_pci_driver);
+}
+
+static void __exit sc92031_exit(void)
+{
+	pci_unregister_driver(&sc92031_pci_driver);
+}
+
+module_init(sc92031_init);
+module_exit(sc92031_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
+MODULE_DESCRIPTION(SC92031_DESCRIPTION);
+MODULE_VERSION(SC92031_VERSION);

+ 0 - 1216
drivers/net/sk_mca.c

@@ -1,1216 +0,0 @@
-/*
-net-3-driver for the SKNET MCA-based cards
-
-This is an extension to the Linux operating system, and is covered by the
-same GNU General Public License that covers that work.
-
-Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
-                                 alfred.arnold@lancom.de)
-
-This driver is based both on the 3C523 driver and the SK_G16 driver.
-
-paper sources:
-  'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
-  Hans-Peter Messmer for the basic Microchannel stuff
-
-  'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
-  for help on Ethernet driver programming
-
-  'Ethernet/IEEE 802.3 Family 1992 World Network Data Book/Handbook' by AMD
-  for documentation on the AM7990 LANCE
-
-  'SKNET Personal Technisches Manual', Version 1.2 by Schneider&Koch
-  for documentation on the Junior board
-
-  'SK-NET MC2+ Technical Manual", Version 1.1 by Schneider&Koch for
-  documentation on the MC2 bord
-
-  A big thank you to the S&K support for providing me so quickly with
-  documentation!
-
-  Also see http://www.syskonnect.com/
-
-  Missing things:
-
-  -> set debug level via ioctl instead of compile-time switches
-  -> I didn't follow the development of the 2.1.x kernels, so my
-     assumptions about which things changed with which kernel version
-     are probably nonsense
-
-History:
-  May 16th, 1999
-  	startup
-  May 22st, 1999
-	added private structure, methods
-        begun building data structures in RAM
-  May 23nd, 1999
-	can receive frames, send frames
-  May 24th, 1999
-        modularized initialization of LANCE
-        loadable as module
-	still Tx problem :-(
-  May 26th, 1999
-  	MC2 works
-  	support for multiple devices
-  	display media type for MC2+
-  May 28th, 1999
-	fixed problem in GetLANCE leaving interrupts turned off
-        increase TX queue to 4 packets to improve send performance
-  May 29th, 1999
-	a few corrections in statistics, caught rcvr overruns
-        reinitialization of LANCE/board in critical situations
-        MCA info implemented
-	implemented LANCE multicast filter
-  Jun 6th, 1999
-	additions for Linux 2.2
-  Dec 25th, 1999
-  	unfortunately there seem to be newer MC2+ boards that react
-  	on IRQ 3/5/9/10 instead of 3/5/10/11, so we have to autoprobe
-  	in questionable cases...
-  Dec 28th, 1999
-	integrated patches from David Weinehall & Bill Wendling for 2.3
-	kernels (isa_...functions).  Things are defined in a way that
-        it still works with 2.0.x 8-)
-  Dec 30th, 1999
-	added handling of the remaining interrupt conditions.  That
-        should cure the spurious hangs.
-  Jan 30th, 2000
-	newer kernels automatically probe more than one board, so the
-	'startslot' as a variable is also needed here
-  June 1st, 2000
-	added changes for recent 2.3 kernels
-
- *************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/time.h>
-#include <linux/mca-legacy.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h>
-#include <asm/io.h>
-
-#define _SK_MCA_DRIVER_
-#include "sk_mca.h"
-
-/* ------------------------------------------------------------------------
- * global static data - not more since we can handle multiple boards and
- * have to pack all state info into the device struct!
- * ------------------------------------------------------------------------ */
-
-static char *MediaNames[Media_Count] =
-    { "10Base2", "10BaseT", "10Base5", "Unknown" };
-
-static unsigned char poly[] =
-    { 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,
-	1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0
-};
-
-/* ------------------------------------------------------------------------
- * private subfunctions
- * ------------------------------------------------------------------------ */
-
-/* dump parts of shared memory - only needed during debugging */
-
-#ifdef DEBUG
-static void dumpmem(struct net_device *dev, u32 start, u32 len)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	int z;
-
-	for (z = 0; z < len; z++) {
-		if ((z & 15) == 0)
-			printk("%04x:", z);
-		printk(" %02x", readb(priv->base + start + z));
-		if ((z & 15) == 15)
-			printk("\n");
-	}
-}
-
-/* print exact time - ditto */
-
-static void PrTime(void)
-{
-	struct timeval tv;
-
-	do_gettimeofday(&tv);
-	printk("%9d:%06d: ", tv.tv_sec, tv.tv_usec);
-}
-#endif
-
-/* deduce resources out of POS registers */
-
-static void __init getaddrs(int slot, int junior, int *base, int *irq,
-		     skmca_medium * medium)
-{
-	u_char pos0, pos1, pos2;
-
-	if (junior) {
-		pos0 = mca_read_stored_pos(slot, 2);
-		*base = ((pos0 & 0x0e) << 13) + 0xc0000;
-		*irq = ((pos0 & 0x10) >> 4) + 10;
-		*medium = Media_Unknown;
-	} else {
-		/* reset POS 104 Bits 0+1 so the shared memory region goes to the
-		   configured area between 640K and 1M.  Afterwards, enable the MC2.
-		   I really don't know what rode SK to do this... */
-
-		mca_write_pos(slot, 4,
-			      mca_read_stored_pos(slot, 4) & 0xfc);
-		mca_write_pos(slot, 2,
-			      mca_read_stored_pos(slot, 2) | 0x01);
-
-		pos1 = mca_read_stored_pos(slot, 3);
-		pos2 = mca_read_stored_pos(slot, 4);
-		*base = ((pos1 & 0x07) << 14) + 0xc0000;
-		switch (pos2 & 0x0c) {
-		case 0:
-			*irq = 3;
-			break;
-		case 4:
-			*irq = 5;
-			break;
-		case 8:
-			*irq = -10;
-			break;
-		case 12:
-			*irq = -11;
-			break;
-		}
-		*medium = (pos2 >> 6) & 3;
-	}
-}
-
-/* check for both cards:
-   When the MC2 is turned off, it was configured for more than 15MB RAM,
-   is disabled and won't get detected using the standard probe.  We
-   therefore have to scan the slots manually :-( */
-
-static int __init dofind(int *junior, int firstslot)
-{
-	int slot;
-	unsigned int id;
-
-	for (slot = firstslot; slot < MCA_MAX_SLOT_NR; slot++) {
-		id = mca_read_stored_pos(slot, 0)
-		    + (((unsigned int) mca_read_stored_pos(slot, 1)) << 8);
-
-		*junior = 0;
-		if (id == SKNET_MCA_ID)
-			return slot;
-		*junior = 1;
-		if (id == SKNET_JUNIOR_MCA_ID)
-			return slot;
-	}
-	return MCA_NOTFOUND;
-}
-
-/* reset the whole board */
-
-static void ResetBoard(struct net_device *dev)
-{
-	skmca_priv *priv = netdev_priv(dev);
-
-	writeb(CTRL_RESET_ON, priv->ctrladdr);
-	udelay(10);
-	writeb(CTRL_RESET_OFF, priv->ctrladdr);
-}
-
-/* wait for LANCE interface to become not busy */
-
-static int WaitLANCE(struct net_device *dev)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	int t = 0;
-
-	while ((readb(priv->ctrladdr) & STAT_IO_BUSY) ==
-	       STAT_IO_BUSY) {
-		udelay(1);
-		if (++t > 1000) {
-			printk("%s: LANCE access timeout", dev->name);
-			return 0;
-		}
-	}
-
-	return 1;
-}
-
-/* set LANCE register - must be atomic */
-
-static void SetLANCE(struct net_device *dev, u16 addr, u16 value)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	unsigned long flags;
-
-	/* disable interrupts */
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* wait until no transfer is pending */
-
-	WaitLANCE(dev);
-
-	/* transfer register address to RAP */
-
-	writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
-	writew(addr, priv->ioregaddr);
-	writeb(IOCMD_GO, priv->cmdaddr);
-	udelay(1);
-	WaitLANCE(dev);
-
-	/* transfer data to register */
-
-	writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_DATA, priv->ctrladdr);
-	writew(value, priv->ioregaddr);
-	writeb(IOCMD_GO, priv->cmdaddr);
-	udelay(1);
-	WaitLANCE(dev);
-
-	/* reenable interrupts */
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/* get LANCE register */
-
-static u16 GetLANCE(struct net_device *dev, u16 addr)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	unsigned long flags;
-	unsigned int res;
-
-	/* disable interrupts */
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* wait until no transfer is pending */
-
-	WaitLANCE(dev);
-
-	/* transfer register address to RAP */
-
-	writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
-	writew(addr, priv->ioregaddr);
-	writeb(IOCMD_GO, priv->cmdaddr);
-	udelay(1);
-	WaitLANCE(dev);
-
-	/* transfer data from register */
-
-	writeb(CTRL_RESET_OFF | CTRL_RW_READ | CTRL_ADR_DATA, priv->ctrladdr);
-	writeb(IOCMD_GO, priv->cmdaddr);
-	udelay(1);
-	WaitLANCE(dev);
-	res = readw(priv->ioregaddr);
-
-	/* reenable interrupts */
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	return res;
-}
-
-/* build up descriptors in shared RAM */
-
-static void InitDscrs(struct net_device *dev)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	u32 bufaddr;
-
-	/* Set up Tx descriptors. The board has only 16K RAM so bits 16..23
-	   are always 0. */
-
-	bufaddr = RAM_DATABASE;
-	{
-		LANCE_TxDescr descr;
-		int z;
-
-		for (z = 0; z < TXCOUNT; z++) {
-			descr.LowAddr = bufaddr;
-			descr.Flags = 0;
-			descr.Len = 0xf000;
-			descr.Status = 0;
-			memcpy_toio(priv->base + RAM_TXBASE +
-				   (z * sizeof(LANCE_TxDescr)), &descr,
-				   sizeof(LANCE_TxDescr));
-			memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
-			bufaddr += RAM_BUFSIZE;
-		}
-	}
-
-	/* do the same for the Rx descriptors */
-
-	{
-		LANCE_RxDescr descr;
-		int z;
-
-		for (z = 0; z < RXCOUNT; z++) {
-			descr.LowAddr = bufaddr;
-			descr.Flags = RXDSCR_FLAGS_OWN;
-			descr.MaxLen = -RAM_BUFSIZE;
-			descr.Len = 0;
-			memcpy_toio(priv->base + RAM_RXBASE +
-				   (z * sizeof(LANCE_RxDescr)), &descr,
-				   sizeof(LANCE_RxDescr));
-			memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
-			bufaddr += RAM_BUFSIZE;
-		}
-	}
-}
-
-/* calculate the hash bit position for a given multicast address
-   taken more or less directly from the AMD datasheet... */
-
-static void UpdateCRC(unsigned char *CRC, int bit)
-{
-	int j;
-
-	/* shift CRC one bit */
-
-	memmove(CRC + 1, CRC, 32 * sizeof(unsigned char));
-	CRC[0] = 0;
-
-	/* if bit XOR controlbit = 1, set CRC = CRC XOR polynomial */
-
-	if (bit ^ CRC[32])
-		for (j = 0; j < 32; j++)
-			CRC[j] ^= poly[j];
-}
-
-static unsigned int GetHash(char *address)
-{
-	unsigned char CRC[33];
-	int i, byte, hashcode;
-
-	/* a multicast address has bit 0 in the first byte set */
-
-	if ((address[0] & 1) == 0)
-		return -1;
-
-	/* initialize CRC */
-
-	memset(CRC, 1, sizeof(CRC));
-
-	/* loop through address bits */
-
-	for (byte = 0; byte < 6; byte++)
-		for (i = 0; i < 8; i++)
-			UpdateCRC(CRC, (address[byte] >> i) & 1);
-
-	/* hashcode is the 6 least significant bits of the CRC */
-
-	hashcode = 0;
-	for (i = 0; i < 6; i++)
-		hashcode = (hashcode << 1) + CRC[i];
-	return hashcode;
-}
-
-/* feed ready-built initialization block into LANCE */
-
-static void InitLANCE(struct net_device *dev)
-{
-	skmca_priv *priv = netdev_priv(dev);
-
-	/* build up descriptors. */
-
-	InitDscrs(dev);
-
-	/* next RX descriptor to be read is the first one.  Since the LANCE
-	   will start from the beginning after initialization, we have to
-	   reset out pointers too. */
-
-	priv->nextrx = 0;
-
-	/* no TX descriptors active */
-
-	priv->nexttxput = priv->nexttxdone = priv->txbusy = 0;
-
-	/* set up the LANCE bus control register - constant for SKnet boards */
-
-	SetLANCE(dev, LANCE_CSR3,
-		 CSR3_BSWAP_OFF | CSR3_ALE_LOW | CSR3_BCON_HOLD);
-
-	/* write address of initialization block into LANCE */
-
-	SetLANCE(dev, LANCE_CSR1, RAM_INITBASE & 0xffff);
-	SetLANCE(dev, LANCE_CSR2, (RAM_INITBASE >> 16) & 0xff);
-
-	/* we don't get ready until the LANCE has read the init block */
-
-	netif_stop_queue(dev);
-
-	/* let LANCE read the initialization block.  LANCE is ready
-	   when we receive the corresponding interrupt. */
-
-	SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_INIT);
-}
-
-/* stop the LANCE so we can reinitialize it */
-
-static void StopLANCE(struct net_device *dev)
-{
-	/* can't take frames any more */
-
-	netif_stop_queue(dev);
-
-	/* disable interrupts, stop it */
-
-	SetLANCE(dev, LANCE_CSR0, CSR0_STOP);
-}
-
-/* initialize card and LANCE for proper operation */
-
-static void InitBoard(struct net_device *dev)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	LANCE_InitBlock block;
-
-	/* Lay out the shared RAM - first we create the init block for the LANCE.
-	   We do not overwrite it later because we need it again when we switch
-	   promiscous mode on/off. */
-
-	block.Mode = 0;
-	if (dev->flags & IFF_PROMISC)
-		block.Mode |= LANCE_INIT_PROM;
-	memcpy(block.PAdr, dev->dev_addr, 6);
-	memset(block.LAdrF, 0, sizeof(block.LAdrF));
-	block.RdrP = (RAM_RXBASE & 0xffffff) | (LRXCOUNT << 29);
-	block.TdrP = (RAM_TXBASE & 0xffffff) | (LTXCOUNT << 29);
-
-	memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
-
-	/* initialize LANCE. Implicitly sets up other structures in RAM. */
-
-	InitLANCE(dev);
-}
-
-/* deinitialize card and LANCE */
-
-static void DeinitBoard(struct net_device *dev)
-{
-	/* stop LANCE */
-
-	StopLANCE(dev);
-
-	/* reset board */
-
-	ResetBoard(dev);
-}
-
-/* probe for device's irq */
-
-static int __init ProbeIRQ(struct net_device *dev)
-{
-	unsigned long imaskval, njiffies, irq;
-	u16 csr0val;
-
-	/* enable all interrupts */
-
-	imaskval = probe_irq_on();
-
-	/* initialize the board. Wait for interrupt 'Initialization done'. */
-
-	ResetBoard(dev);
-	InitBoard(dev);
-
-	njiffies = jiffies + HZ;
-	do {
-		csr0val = GetLANCE(dev, LANCE_CSR0);
-	}
-	while (((csr0val & CSR0_IDON) == 0) && (jiffies != njiffies));
-
-	/* turn of interrupts again */
-
-	irq = probe_irq_off(imaskval);
-
-	/* if we found something, ack the interrupt */
-
-	if (irq)
-		SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_IDON);
-
-	/* back to idle state */
-
-	DeinitBoard(dev);
-
-	return irq;
-}
-
-/* ------------------------------------------------------------------------
- * interrupt handler(s)
- * ------------------------------------------------------------------------ */
-
-/* LANCE has read initialization block -> start it */
-
-static u16 irqstart_handler(struct net_device *dev, u16 oldcsr0)
-{
-	/* now we're ready to transmit */
-
-	netif_wake_queue(dev);
-
-	/* reset IDON bit, start LANCE */
-
-	SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_IDON | CSR0_STRT);
-	return GetLANCE(dev, LANCE_CSR0);
-}
-
-/* did we lose blocks due to a FIFO overrun ? */
-
-static u16 irqmiss_handler(struct net_device *dev, u16 oldcsr0)
-{
-	skmca_priv *priv = netdev_priv(dev);
-
-	/* update statistics */
-
-	priv->stat.rx_fifo_errors++;
-
-	/* reset MISS bit */
-
-	SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_MISS);
-	return GetLANCE(dev, LANCE_CSR0);
-}
-
-/* receive interrupt */
-
-static u16 irqrx_handler(struct net_device *dev, u16 oldcsr0)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	LANCE_RxDescr descr;
-	unsigned int descraddr;
-
-	/* run through queue until we reach a descriptor we do not own */
-
-	descraddr = RAM_RXBASE + (priv->nextrx * sizeof(LANCE_RxDescr));
-	while (1) {
-		/* read descriptor */
-		memcpy_fromio(&descr, priv->base + descraddr,
-			     sizeof(LANCE_RxDescr));
-
-		/* if we reach a descriptor we do not own, we're done */
-		if ((descr.Flags & RXDSCR_FLAGS_OWN) != 0)
-			break;
-
-#ifdef DEBUG
-		PrTime();
-		printk("Receive packet on descr %d len %d\n", priv->nextrx,
-		       descr.Len);
-#endif
-
-		/* erroneous packet ? */
-		if ((descr.Flags & RXDSCR_FLAGS_ERR) != 0) {
-			priv->stat.rx_errors++;
-			if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
-				priv->stat.rx_crc_errors++;
-			else if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
-				priv->stat.rx_frame_errors++;
-			else if ((descr.Flags & RXDSCR_FLAGS_OFLO) != 0)
-				priv->stat.rx_fifo_errors++;
-		}
-
-		/* good packet ? */
-		else {
-			struct sk_buff *skb;
-
-			skb = dev_alloc_skb(descr.Len + 2);
-			if (skb == NULL)
-				priv->stat.rx_dropped++;
-			else {
-				memcpy_fromio(skb_put(skb, descr.Len),
-					     priv->base +
-					     descr.LowAddr, descr.Len);
-				skb->dev = dev;
-				skb->protocol = eth_type_trans(skb, dev);
-				skb->ip_summed = CHECKSUM_NONE;
-				priv->stat.rx_packets++;
-				priv->stat.rx_bytes += descr.Len;
-				netif_rx(skb);
-				dev->last_rx = jiffies;
-			}
-		}
-
-		/* give descriptor back to LANCE */
-		descr.Len = 0;
-		descr.Flags |= RXDSCR_FLAGS_OWN;
-
-		/* update descriptor in shared RAM */
-		memcpy_toio(priv->base + descraddr, &descr,
-			   sizeof(LANCE_RxDescr));
-
-		/* go to next descriptor */
-		priv->nextrx++;
-		descraddr += sizeof(LANCE_RxDescr);
-		if (priv->nextrx >= RXCOUNT) {
-			priv->nextrx = 0;
-			descraddr = RAM_RXBASE;
-		}
-	}
-
-	/* reset RINT bit */
-
-	SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_RINT);
-	return GetLANCE(dev, LANCE_CSR0);
-}
-
-/* transmit interrupt */
-
-static u16 irqtx_handler(struct net_device *dev, u16 oldcsr0)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	LANCE_TxDescr descr;
-	unsigned int descraddr;
-
-	/* check descriptors at most until no busy one is left */
-
-	descraddr =
-	    RAM_TXBASE + (priv->nexttxdone * sizeof(LANCE_TxDescr));
-	while (priv->txbusy > 0) {
-		/* read descriptor */
-		memcpy_fromio(&descr, priv->base + descraddr,
-			     sizeof(LANCE_TxDescr));
-
-		/* if the LANCE still owns this one, we've worked out all sent packets */
-		if ((descr.Flags & TXDSCR_FLAGS_OWN) != 0)
-			break;
-
-#ifdef DEBUG
-		PrTime();
-		printk("Send packet done on descr %d\n", priv->nexttxdone);
-#endif
-
-		/* update statistics */
-		if ((descr.Flags & TXDSCR_FLAGS_ERR) == 0) {
-			priv->stat.tx_packets++;
-			priv->stat.tx_bytes++;
-		} else {
-			priv->stat.tx_errors++;
-			if ((descr.Status & TXDSCR_STATUS_UFLO) != 0) {
-				priv->stat.tx_fifo_errors++;
-				InitLANCE(dev);
-			}
-				else
-			    if ((descr.Status & TXDSCR_STATUS_LCOL) !=
-				0) priv->stat.tx_window_errors++;
-			else if ((descr.Status & TXDSCR_STATUS_LCAR) != 0)
-				priv->stat.tx_carrier_errors++;
-			else if ((descr.Status & TXDSCR_STATUS_RTRY) != 0)
-				priv->stat.tx_aborted_errors++;
-		}
-
-		/* go to next descriptor */
-		priv->nexttxdone++;
-		descraddr += sizeof(LANCE_TxDescr);
-		if (priv->nexttxdone >= TXCOUNT) {
-			priv->nexttxdone = 0;
-			descraddr = RAM_TXBASE;
-		}
-		priv->txbusy--;
-	}
-
-	/* reset TX interrupt bit */
-
-	SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_TINT);
-	oldcsr0 = GetLANCE(dev, LANCE_CSR0);
-
-	/* at least one descriptor is freed.  Therefore we can accept
-	   a new one */
-	/* inform upper layers we're in business again */
-
-	netif_wake_queue(dev);
-
-	return oldcsr0;
-}
-
-/* general interrupt entry */
-
-static irqreturn_t irq_handler(int irq, void *device)
-{
-	struct net_device *dev = (struct net_device *) device;
-	u16 csr0val;
-
-	/* read CSR0 to get interrupt cause */
-
-	csr0val = GetLANCE(dev, LANCE_CSR0);
-
-	/* in case we're not meant... */
-
-	if ((csr0val & CSR0_INTR) == 0)
-		return IRQ_NONE;
-
-#if 0
-	set_bit(LINK_STATE_RXSEM, &dev->state);
-#endif
-
-	/* loop through the interrupt bits until everything is clear */
-
-	do {
-		if ((csr0val & CSR0_IDON) != 0)
-			csr0val = irqstart_handler(dev, csr0val);
-		if ((csr0val & CSR0_RINT) != 0)
-			csr0val = irqrx_handler(dev, csr0val);
-		if ((csr0val & CSR0_MISS) != 0)
-			csr0val = irqmiss_handler(dev, csr0val);
-		if ((csr0val & CSR0_TINT) != 0)
-			csr0val = irqtx_handler(dev, csr0val);
-		if ((csr0val & CSR0_MERR) != 0) {
-			SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_MERR);
-			csr0val = GetLANCE(dev, LANCE_CSR0);
-		}
-		if ((csr0val & CSR0_BABL) != 0) {
-			SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_BABL);
-			csr0val = GetLANCE(dev, LANCE_CSR0);
-		}
-	}
-	while ((csr0val & CSR0_INTR) != 0);
-
-#if 0
-	clear_bit(LINK_STATE_RXSEM, &dev->state);
-#endif
-	return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------------------
- * driver methods
- * ------------------------------------------------------------------------ */
-
-/* MCA info */
-
-static int skmca_getinfo(char *buf, int slot, void *d)
-{
-	int len = 0, i;
-	struct net_device *dev = (struct net_device *) d;
-	skmca_priv *priv;
-
-	/* can't say anything about an uninitialized device... */
-
-	if (dev == NULL)
-		return len;
-	priv = netdev_priv(dev);
-
-	/* print info */
-
-	len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
-	len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
-		       dev->mem_end - 1);
-	len +=
-	    sprintf(buf + len, "Transceiver: %s\n",
-		    MediaNames[priv->medium]);
-	len += sprintf(buf + len, "Device: %s\n", dev->name);
-	len += sprintf(buf + len, "MAC address:");
-	for (i = 0; i < 6; i++)
-		len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
-	buf[len++] = '\n';
-	buf[len] = 0;
-
-	return len;
-}
-
-/* open driver.  Means also initialization and start of LANCE */
-
-static int skmca_open(struct net_device *dev)
-{
-	int result;
-	skmca_priv *priv = netdev_priv(dev);
-
-	/* register resources - only necessary for IRQ */
-	result =
-	    request_irq(priv->realirq, irq_handler,
-			IRQF_SHARED | IRQF_SAMPLE_RANDOM, "sk_mca", dev);
-	if (result != 0) {
-		printk("%s: failed to register irq %d\n", dev->name,
-		       dev->irq);
-		return result;
-	}
-	dev->irq = priv->realirq;
-
-	/* set up the card and LANCE */
-
-	InitBoard(dev);
-
-	/* set up flags */
-
-	netif_start_queue(dev);
-
-	return 0;
-}
-
-/* close driver.  Shut down board and free allocated resources */
-
-static int skmca_close(struct net_device *dev)
-{
-	/* turn off board */
-	DeinitBoard(dev);
-
-	/* release resources */
-	if (dev->irq != 0)
-		free_irq(dev->irq, dev);
-	dev->irq = 0;
-
-	return 0;
-}
-
-/* transmit a block. */
-
-static int skmca_tx(struct sk_buff *skb, struct net_device *dev)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	LANCE_TxDescr descr;
-	unsigned int address;
-	int tmplen, retval = 0;
-	unsigned long flags;
-
-	/* if we get called with a NULL descriptor, the Ethernet layer thinks
-	   our card is stuck an we should reset it.  We'll do this completely: */
-
-	if (skb == NULL) {
-		DeinitBoard(dev);
-		InitBoard(dev);
-		return 0;	/* don't try to free the block here ;-) */
-	}
-
-	/* is there space in the Tx queue ? If no, the upper layer gave us a
-	   packet in spite of us not being ready and is really in trouble.
-	   We'll do the dropping for him: */
-	if (priv->txbusy >= TXCOUNT) {
-		priv->stat.tx_dropped++;
-		retval = -EIO;
-		goto tx_done;
-	}
-
-	/* get TX descriptor */
-	address = RAM_TXBASE + (priv->nexttxput * sizeof(LANCE_TxDescr));
-	memcpy_fromio(&descr, priv->base + address, sizeof(LANCE_TxDescr));
-
-	/* enter packet length as 2s complement - assure minimum length */
-	tmplen = skb->len;
-	if (tmplen < 60)
-		tmplen = 60;
-	descr.Len = 65536 - tmplen;
-
-	/* copy filler into RAM - in case we're filling up...
-	   we're filling a bit more than necessary, but that doesn't harm
-	   since the buffer is far larger... */
-	if (tmplen > skb->len) {
-		char *fill = "NetBSD is a nice OS too! ";
-		unsigned int destoffs = 0, l = strlen(fill);
-
-		while (destoffs < tmplen) {
-			memcpy_toio(priv->base + descr.LowAddr +
-				   destoffs, fill, l);
-			destoffs += l;
-		}
-	}
-
-	/* do the real data copying */
-	memcpy_toio(priv->base + descr.LowAddr, skb->data, skb->len);
-
-	/* hand descriptor over to LANCE - this is the first and last chunk */
-	descr.Flags =
-	    TXDSCR_FLAGS_OWN | TXDSCR_FLAGS_STP | TXDSCR_FLAGS_ENP;
-
-#ifdef DEBUG
-	PrTime();
-	printk("Send packet on descr %d len %d\n", priv->nexttxput,
-	       skb->len);
-#endif
-
-	/* one more descriptor busy */
-
-	spin_lock_irqsave(&priv->lock, flags);
-
-	priv->nexttxput++;
-	if (priv->nexttxput >= TXCOUNT)
-		priv->nexttxput = 0;
-	priv->txbusy++;
-
-	/* are we saturated ? */
-
-	if (priv->txbusy >= TXCOUNT)
-		netif_stop_queue(dev);
-
-	/* write descriptor back to RAM */
-	memcpy_toio(priv->base + address, &descr, sizeof(LANCE_TxDescr));
-
-	/* if no descriptors were active, give the LANCE a hint to read it
-	   immediately */
-
-	if (priv->txbusy == 0)
-		SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_TDMD);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-      tx_done:
-
-	dev_kfree_skb(skb);
-
-	return retval;
-}
-
-/* return pointer to Ethernet statistics */
-
-static struct net_device_stats *skmca_stats(struct net_device *dev)
-{
-	skmca_priv *priv = netdev_priv(dev);
-
-	return &(priv->stat);
-}
-
-/* switch receiver mode.  We use the LANCE's multicast filter to prefilter
-   multicast addresses. */
-
-static void skmca_set_multicast_list(struct net_device *dev)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	LANCE_InitBlock block;
-
-	/* first stop the LANCE... */
-	StopLANCE(dev);
-
-	/* ...then modify the initialization block... */
-	memcpy_fromio(&block, priv->base + RAM_INITBASE, sizeof(block));
-	if (dev->flags & IFF_PROMISC)
-		block.Mode |= LANCE_INIT_PROM;
-	else
-		block.Mode &= ~LANCE_INIT_PROM;
-
-	if (dev->flags & IFF_ALLMULTI) {	/* get all multicasts */
-		memset(block.LAdrF, 0xff, sizeof(block.LAdrF));
-	} else {		/* get selected/no multicasts */
-
-		struct dev_mc_list *mptr;
-		int code;
-
-		memset(block.LAdrF, 0, sizeof(block.LAdrF));
-		for (mptr = dev->mc_list; mptr != NULL; mptr = mptr->next) {
-			code = GetHash(mptr->dmi_addr);
-			block.LAdrF[(code >> 3) & 7] |= 1 << (code & 7);
-		}
-	}
-
-	memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
-
-	/* ...then reinit LANCE with the correct flags */
-	InitLANCE(dev);
-}
-
-/* ------------------------------------------------------------------------
- * hardware check
- * ------------------------------------------------------------------------ */
-
-static int startslot;		/* counts through slots when probing multiple devices */
-
-static void cleanup_card(struct net_device *dev)
-{
-	skmca_priv *priv = netdev_priv(dev);
-	DeinitBoard(dev);
-	if (dev->irq != 0)
-		free_irq(dev->irq, dev);
-	iounmap(priv->base);
-	mca_mark_as_unused(priv->slot);
-	mca_set_adapter_procfn(priv->slot, NULL, NULL);
-}
-
-struct net_device * __init skmca_probe(int unit)
-{
-	struct net_device *dev;
-	int force_detect = 0;
-	int junior, slot, i;
-	int base = 0, irq = 0;
-	skmca_priv *priv;
-	skmca_medium medium;
-	int err;
-
-	/* can't work without an MCA bus ;-) */
-
-	if (MCA_bus == 0)
-		return ERR_PTR(-ENODEV);
-
-	dev = alloc_etherdev(sizeof(skmca_priv));
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	if (unit >= 0) {
-		sprintf(dev->name, "eth%d", unit);
-		netdev_boot_setup_check(dev);
-	}
-
-	SET_MODULE_OWNER(dev);
-
-	/* start address of 1 --> forced detection */
-
-	if (dev->mem_start == 1)
-		force_detect = 1;
-
-	/* search through slots */
-
-	base = dev->mem_start;
-	irq = dev->base_addr;
-	for (slot = startslot; (slot = dofind(&junior, slot)) != -1; slot++) {
-		/* deduce card addresses */
-
-		getaddrs(slot, junior, &base, &irq, &medium);
-
-		/* slot already in use ? */
-
-		if (mca_is_adapter_used(slot))
-			continue;
-
-		/* were we looking for something different ? */
-
-		if (dev->irq && dev->irq != irq)
-			continue;
-		if (dev->mem_start && dev->mem_start != base)
-			continue;
-
-		/* found something that matches */
-
-		break;
-	}
-
-	/* nothing found ? */
-
-	if (slot == -1) {
-		free_netdev(dev);
-		return (base || irq) ? ERR_PTR(-ENXIO) : ERR_PTR(-ENODEV);
-	}
-
-	/* make procfs entries */
-
-	if (junior)
-		mca_set_adapter_name(slot,
-				     "SKNET junior MC2 Ethernet Adapter");
-	else
-		mca_set_adapter_name(slot, "SKNET MC2+ Ethernet Adapter");
-	mca_set_adapter_procfn(slot, (MCA_ProcFn) skmca_getinfo, dev);
-
-	mca_mark_as_used(slot);
-
-	/* announce success */
-	printk("%s: SKNet %s adapter found in slot %d\n", dev->name,
-	       junior ? "Junior MC2" : "MC2+", slot + 1);
-
-	priv = netdev_priv(dev);
-	priv->base = ioremap(base, 0x4000);
-	if (!priv->base) {
-		mca_set_adapter_procfn(slot, NULL, NULL);
-		mca_mark_as_unused(slot);
-		free_netdev(dev);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	priv->slot = slot;
-	priv->macbase = priv->base + 0x3fc0;
-	priv->ioregaddr = priv->base + 0x3ff0;
-	priv->ctrladdr = priv->base + 0x3ff2;
-	priv->cmdaddr = priv->base + 0x3ff3;
-	priv->medium = medium;
-	memset(&priv->stat, 0, sizeof(struct net_device_stats));
-	spin_lock_init(&priv->lock);
-
-	/* set base + irq for this device (irq not allocated so far) */
-	dev->irq = 0;
-	dev->mem_start = base;
-	dev->mem_end = base + 0x4000;
-
-	/* autoprobe ? */
-	if (irq < 0) {
-		int nirq;
-
-		printk
-		    ("%s: ambigous POS bit combination, must probe for IRQ...\n",
-		     dev->name);
-		nirq = ProbeIRQ(dev);
-		if (nirq <= 0)
-			printk("%s: IRQ probe failed, assuming IRQ %d",
-			       dev->name, priv->realirq = -irq);
-		else
-			priv->realirq = nirq;
-	} else
-		priv->realirq = irq;
-
-	/* set methods */
-	dev->open = skmca_open;
-	dev->stop = skmca_close;
-	dev->hard_start_xmit = skmca_tx;
-	dev->do_ioctl = NULL;
-	dev->get_stats = skmca_stats;
-	dev->set_multicast_list = skmca_set_multicast_list;
-	dev->flags |= IFF_MULTICAST;
-
-	/* copy out MAC address */
-	for (i = 0; i < 6; i++)
-		dev->dev_addr[i] = readb(priv->macbase + (i << 1));
-
-	/* print config */
-	printk("%s: IRQ %d, memory %#lx-%#lx, "
-	       "MAC address %02x:%02x:%02x:%02x:%02x:%02x.\n",
-	       dev->name, priv->realirq, dev->mem_start, dev->mem_end - 1,
-	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
-	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
-	printk("%s: %s medium\n", dev->name, MediaNames[priv->medium]);
-
-	/* reset board */
-
-	ResetBoard(dev);
-
-	startslot = slot + 1;
-
-	err = register_netdev(dev);
-	if (err) {
-		cleanup_card(dev);
-		free_netdev(dev);
-		dev = ERR_PTR(err);
-	}
-	return dev;
-}
-
-/* ------------------------------------------------------------------------
- * modularization support
- * ------------------------------------------------------------------------ */
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-#define DEVMAX 5
-
-static struct net_device *moddevs[DEVMAX];
-
-int init_module(void)
-{
-	int z;
-
-	startslot = 0;
-	for (z = 0; z < DEVMAX; z++) {
-		struct net_device *dev = skmca_probe(-1);
-		if (IS_ERR(dev))
-			break;
-		moddevs[z] = dev;
-	}
-	if (!z)
-		return -EIO;
-	return 0;
-}
-
-void cleanup_module(void)
-{
-	int z;
-
-	for (z = 0; z < DEVMAX; z++) {
-		struct net_device *dev = moddevs[z];
-		if (dev) {
-			unregister_netdev(dev);
-			cleanup_card(dev);
-			free_netdev(dev);
-		}
-	}
-}
-#endif				/* MODULE */

+ 0 - 170
drivers/net/sk_mca.h

@@ -1,170 +0,0 @@
-#ifndef _SK_MCA_INCLUDE_
-#define _SK_MCA_INCLUDE_
-
-#ifdef _SK_MCA_DRIVER_
-
-/* Adapter ID's */
-#define SKNET_MCA_ID 0x6afd
-#define SKNET_JUNIOR_MCA_ID 0x6be9
-
-/* media enumeration - defined in a way that it fits onto the MC2+'s
-   POS registers... */
-
-typedef enum { Media_10Base2, Media_10BaseT,
-	Media_10Base5, Media_Unknown, Media_Count
-} skmca_medium;
-
-/* private structure */
-typedef struct {
-	unsigned int slot;	/* MCA-Slot-#                       */
-	void __iomem *base;
-	void __iomem *macbase;	/* base address of MAC address PROM */
-	void __iomem *ioregaddr;/* address of I/O-register (Lo)     */
-	void __iomem *ctrladdr;	/* address of control/stat register */
-	void __iomem *cmdaddr;	/* address of I/O-command register  */
-	int nextrx;		/* index of next RX descriptor to
-				   be read                          */
-	int nexttxput;		/* index of next free TX descriptor */
-	int nexttxdone;		/* index of next TX descriptor to
-				   be finished                      */
-	int txbusy;		/* # of busy TX descriptors         */
-	struct net_device_stats stat;	/* packet statistics            */
-	int realirq;		/* memorizes actual IRQ, even when
-				   currently not allocated          */
-	skmca_medium medium;	/* physical cannector               */
-	spinlock_t lock;
-} skmca_priv;
-
-/* card registers: control/status register bits */
-
-#define CTRL_ADR_DATA      0	/* Bit 0 = 0 ->access data register  */
-#define CTRL_ADR_RAP       1	/* Bit 0 = 1 ->access RAP register   */
-#define CTRL_RW_WRITE      0	/* Bit 1 = 0 ->write register        */
-#define CTRL_RW_READ       2	/* Bit 1 = 1 ->read register         */
-#define CTRL_RESET_ON      0	/* Bit 3 = 0 ->reset board           */
-#define CTRL_RESET_OFF     8	/* Bit 3 = 1 ->no reset of board     */
-
-#define STAT_ADR_DATA      0	/* Bit 0 of ctrl register read back  */
-#define STAT_ADR_RAP       1
-#define STAT_RW_WRITE      0	/* Bit 1 of ctrl register read back  */
-#define STAT_RW_READ       2
-#define STAT_RESET_ON      0	/* Bit 3 of ctrl register read back  */
-#define STAT_RESET_OFF     8
-#define STAT_IRQ_ACT       0	/* interrupt pending                 */
-#define STAT_IRQ_NOACT     16	/* no interrupt pending              */
-#define STAT_IO_NOBUSY     0	/* no transfer busy                  */
-#define STAT_IO_BUSY       32	/* transfer busy                     */
-
-/* I/O command register bits */
-
-#define IOCMD_GO           128	/* Bit 7 = 1 -> start register xfer  */
-
-/* LANCE registers */
-
-#define LANCE_CSR0         0	/* Status/Control                    */
-
-#define CSR0_ERR           0x8000	/* general error flag                */
-#define CSR0_BABL          0x4000	/* transmitter timeout               */
-#define CSR0_CERR          0x2000	/* collision error                   */
-#define CSR0_MISS          0x1000	/* lost Rx block                     */
-#define CSR0_MERR          0x0800	/* memory access error               */
-#define CSR0_RINT          0x0400	/* receiver interrupt                */
-#define CSR0_TINT          0x0200	/* transmitter interrupt             */
-#define CSR0_IDON          0x0100	/* initialization done               */
-#define CSR0_INTR          0x0080	/* general interrupt flag            */
-#define CSR0_INEA          0x0040	/* interrupt enable                  */
-#define CSR0_RXON          0x0020	/* receiver enabled                  */
-#define CSR0_TXON          0x0010	/* transmitter enabled               */
-#define CSR0_TDMD          0x0008	/* force transmission now            */
-#define CSR0_STOP          0x0004	/* stop LANCE                        */
-#define CSR0_STRT          0x0002	/* start LANCE                       */
-#define CSR0_INIT          0x0001	/* read initialization block         */
-
-#define LANCE_CSR1         1	/* addr bit 0..15 of initialization  */
-#define LANCE_CSR2         2	/*          16..23 block             */
-
-#define LANCE_CSR3         3	/* Bus control                       */
-#define CSR3_BCON_HOLD     0	/* Bit 0 = 0 -> BM1,BM0,HOLD         */
-#define CSR3_BCON_BUSRQ    1	/* Bit 0 = 1 -> BUSAK0,BYTE,BUSRQ    */
-#define CSR3_ALE_HIGH      0	/* Bit 1 = 0 -> ALE asserted high    */
-#define CSR3_ALE_LOW       2	/* Bit 1 = 1 -> ALE asserted low     */
-#define CSR3_BSWAP_OFF     0	/* Bit 2 = 0 -> no byte swap         */
-#define CSR3_BSWAP_ON      4	/* Bit 2 = 1 -> byte swap            */
-
-/* LANCE structures */
-
-typedef struct {		/* LANCE initialization block        */
-	u16 Mode;		/* mode flags                        */
-	u8 PAdr[6];		/* MAC address                       */
-	u8 LAdrF[8];		/* Multicast filter                  */
-	u32 RdrP;		/* Receive descriptor                */
-	u32 TdrP;		/* Transmit descriptor               */
-} LANCE_InitBlock;
-
-/* Mode flags init block */
-
-#define LANCE_INIT_PROM    0x8000	/* enable promiscous mode            */
-#define LANCE_INIT_INTL    0x0040	/* internal loopback                 */
-#define LANCE_INIT_DRTY    0x0020	/* disable retry                     */
-#define LANCE_INIT_COLL    0x0010	/* force collision                   */
-#define LANCE_INIT_DTCR    0x0008	/* disable transmit CRC              */
-#define LANCE_INIT_LOOP    0x0004	/* loopback                          */
-#define LANCE_INIT_DTX     0x0002	/* disable transmitter               */
-#define LANCE_INIT_DRX     0x0001	/* disable receiver                  */
-
-typedef struct {		/* LANCE Tx descriptor               */
-	u16 LowAddr;		/* bit 0..15 of address              */
-	u16 Flags;		/* bit 16..23 of address + Flags     */
-	u16 Len;		/* 2s complement of packet length    */
-	u16 Status;		/* Result of transmission            */
-} LANCE_TxDescr;
-
-#define TXDSCR_FLAGS_OWN   0x8000	/* LANCE owns descriptor             */
-#define TXDSCR_FLAGS_ERR   0x4000	/* summary error flag                */
-#define TXDSCR_FLAGS_MORE  0x1000	/* more than one retry needed?       */
-#define TXDSCR_FLAGS_ONE   0x0800	/* one retry?                        */
-#define TXDSCR_FLAGS_DEF   0x0400	/* transmission deferred?            */
-#define TXDSCR_FLAGS_STP   0x0200	/* first packet in chain?            */
-#define TXDSCR_FLAGS_ENP   0x0100	/* last packet in chain?             */
-
-#define TXDSCR_STATUS_BUFF 0x8000	/* buffer error?                     */
-#define TXDSCR_STATUS_UFLO 0x4000	/* silo underflow during transmit?   */
-#define TXDSCR_STATUS_LCOL 0x1000	/* late collision?                   */
-#define TXDSCR_STATUS_LCAR 0x0800	/* loss of carrier?                  */
-#define TXDSCR_STATUS_RTRY 0x0400	/* retry error?                      */
-
-typedef struct {		/* LANCE Rx descriptor               */
-	u16 LowAddr;		/* bit 0..15 of address              */
-	u16 Flags;		/* bit 16..23 of address + Flags     */
-	u16 MaxLen;		/* 2s complement of buffer length    */
-	u16 Len;		/* packet length                     */
-} LANCE_RxDescr;
-
-#define RXDSCR_FLAGS_OWN   0x8000	/* LANCE owns descriptor             */
-#define RXDSCR_FLAGS_ERR   0x4000	/* summary error flag                */
-#define RXDSCR_FLAGS_FRAM  0x2000	/* framing error flag                */
-#define RXDSCR_FLAGS_OFLO  0x1000	/* FIFO overflow?                    */
-#define RXDSCR_FLAGS_CRC   0x0800	/* CRC error?                        */
-#define RXDSCR_FLAGS_BUFF  0x0400	/* buffer error?                     */
-#define RXDSCR_FLAGS_STP   0x0200	/* first packet in chain?            */
-#define RXDCSR_FLAGS_ENP   0x0100	/* last packet in chain?             */
-
-/* RAM layout */
-
-#define TXCOUNT            4	/* length of TX descriptor queue     */
-#define LTXCOUNT           2	/* log2 of it                        */
-#define RXCOUNT            4	/* length of RX descriptor queue     */
-#define LRXCOUNT           2	/* log2 of it                        */
-
-#define RAM_INITBASE       0	/* LANCE init block                  */
-#define RAM_TXBASE         24	/* Start of TX descriptor queue      */
-#define RAM_RXBASE         \
-(RAM_TXBASE + (TXCOUNT * 8))	/* Start of RX descriptor queue      */
-#define RAM_DATABASE       \
-(RAM_RXBASE + (RXCOUNT * 8))	/* Start of data area for frames     */
-#define RAM_BUFSIZE        1580	/* max. frame size - should never be
-				   reached                           */
-
-#endif				/* _SK_MCA_DRIVER_ */
-
-#endif	/* _SK_MCA_INCLUDE_ */

+ 0 - 83
drivers/net/skfp/can.c

@@ -1,83 +0,0 @@
-/******************************************************************************
- *
- *	(C)Copyright 1998,1999 SysKonnect,
- *	a business unit of Schneider & Koch & Co. Datensysteme GmbH.
- *
- *	See the file "skfddi.c" for further information.
- *
- *	This program is free software; you can redistribute it and/or modify
- *	it under the terms of the GNU General Public License as published by
- *	the Free Software Foundation; either version 2 of the License, or
- *	(at your option) any later version.
- *
- *	The information in this file is provided "AS IS" without warranty.
- *
- ******************************************************************************/
-
-#ifndef	lint
-static const char xID_sccs[] = "@(#)can.c	1.5 97/04/07 (C) SK " ;
-#endif
-
-/*
- * canonical bit order
- */
-const u_char canonical[256] = {
-	0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0,
-	0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0,
-	0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8,
-	0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8,
-	0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4,
-	0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4,
-	0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec,
-	0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc,
-	0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2,
-	0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2,
-	0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea,
-	0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa,
-	0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6,
-	0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6,
-	0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee,
-	0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe,
-	0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1,
-	0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1,
-	0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9,
-	0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9,
-	0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5,
-	0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5,
-	0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed,
-	0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd,
-	0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3,
-	0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3,
-	0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb,
-	0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb,
-	0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7,
-	0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7,
-	0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef,
-	0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
-} ;
-
-#ifdef	MAKE_TABLE
-int byte_reverse(x)
-int x ;
-{
-	int     y = 0 ;
-
-	if (x & 0x01)
-		y |= 0x80 ;
-	if (x & 0x02)
-		y |= 0x40 ;
-	if (x & 0x04)
-		y |= 0x20 ;
-	if (x & 0x08)
-		y |= 0x10 ;
-	if (x & 0x10)
-		y |= 0x08 ;
-	if (x & 0x20)
-		y |= 0x04 ;
-	if (x & 0x40)
-		y |= 0x02 ;
-	if (x & 0x80)
-		y |= 0x01 ;
-	return(y) ;
-}
-#endif

+ 10 - 14
drivers/net/skfp/drvfbi.c

@@ -23,6 +23,7 @@
 #include "h/smc.h"
 #include "h/smc.h"
 #include "h/supern_2.h"
 #include "h/supern_2.h"
 #include "h/skfbiinc.h"
 #include "h/skfbiinc.h"
+#include <linux/bitrev.h>
 
 
 #ifndef	lint
 #ifndef	lint
 static const char ID_sccs[] = "@(#)drvfbi.c	1.63 99/02/11 (C) SK " ;
 static const char ID_sccs[] = "@(#)drvfbi.c	1.63 99/02/11 (C) SK " ;
@@ -445,16 +446,14 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
 	char PmdType ;
 	char PmdType ;
 	int	i ;
 	int	i ;
 
 
-	extern const u_char canonical[256] ;
-
 #if	(defined(ISA) || defined(MCA))
 #if	(defined(ISA) || defined(MCA))
 	for (i = 0; i < 4 ;i++) {	/* read mac address from board */
 	for (i = 0; i < 4 ;i++) {	/* read mac address from board */
 		smc->hw.fddi_phys_addr.a[i] =
 		smc->hw.fddi_phys_addr.a[i] =
-			canonical[(inpw(PR_A(i+SA_MAC))&0xff)] ;
+			bitrev8(inpw(PR_A(i+SA_MAC)));
 	}
 	}
 	for (i = 4; i < 6; i++) {
 	for (i = 4; i < 6; i++) {
 		smc->hw.fddi_phys_addr.a[i] =
 		smc->hw.fddi_phys_addr.a[i] =
-			canonical[(inpw(PR_A(i+SA_MAC+PRA_OFF))&0xff)] ;
+			bitrev8(inpw(PR_A(i+SA_MAC+PRA_OFF)));
 	}
 	}
 #endif
 #endif
 #ifdef	EISA
 #ifdef	EISA
@@ -464,17 +463,17 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
 	 */
 	 */
 	for (i = 0; i < 4 ;i++) {	/* read mac address from board */
 	for (i = 0; i < 4 ;i++) {	/* read mac address from board */
 		smc->hw.fddi_phys_addr.a[i] =
 		smc->hw.fddi_phys_addr.a[i] =
-			canonical[inp(PR_A(i+SA_MAC))] ;
+			bitrev8(inp(PR_A(i+SA_MAC)));
 	}
 	}
 	for (i = 4; i < 6; i++) {
 	for (i = 4; i < 6; i++) {
 		smc->hw.fddi_phys_addr.a[i] =
 		smc->hw.fddi_phys_addr.a[i] =
-			canonical[inp(PR_A(i+SA_MAC+PRA_OFF))] ;
+			bitrev8(inp(PR_A(i+SA_MAC+PRA_OFF)));
 	}
 	}
 #endif
 #endif
 #ifdef	PCI
 #ifdef	PCI
 	for (i = 0; i < 6; i++) {	/* read mac address from board */
 	for (i = 0; i < 6; i++) {	/* read mac address from board */
 		smc->hw.fddi_phys_addr.a[i] =
 		smc->hw.fddi_phys_addr.a[i] =
-			canonical[inp(ADDR(B2_MAC_0+i))] ;
+			bitrev8(inp(ADDR(B2_MAC_0+i)));
 	}
 	}
 #endif
 #endif
 #ifndef	PCI
 #ifndef	PCI
@@ -493,7 +492,7 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
 	if (mac_addr) {
 	if (mac_addr) {
 		for (i = 0; i < 6 ;i++) {
 		for (i = 0; i < 6 ;i++) {
 			smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ;
 			smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ;
-			smc->hw.fddi_home_addr.a[i] = canonical[mac_addr[i]] ;
+			smc->hw.fddi_home_addr.a[i] = bitrev8(mac_addr[i]);
 		}
 		}
 		return ;
 		return ;
 	}
 	}
@@ -501,7 +500,7 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
 
 
 	for (i = 0; i < 6 ;i++) {
 	for (i = 0; i < 6 ;i++) {
 		smc->hw.fddi_canon_addr.a[i] =
 		smc->hw.fddi_canon_addr.a[i] =
-			canonical[smc->hw.fddi_phys_addr.a[i]] ;
+			bitrev8(smc->hw.fddi_phys_addr.a[i]);
 	}
 	}
 }
 }
 
 
@@ -1269,11 +1268,8 @@ void driver_get_bia(struct s_smc *smc, struct fddi_addr *bia_addr)
 {
 {
 	int i ;
 	int i ;
 
 
-	extern const u_char canonical[256] ;
-
-	for (i = 0 ; i < 6 ; i++) {
-		bia_addr->a[i] = canonical[smc->hw.fddi_phys_addr.a[i]] ;
-	}
+	for (i = 0 ; i < 6 ; i++)
+		bia_addr->a[i] = bitrev8(smc->hw.fddi_phys_addr.a[i]);
 }
 }
 
 
 void smt_start_watchdog(struct s_smc *smc)
 void smt_start_watchdog(struct s_smc *smc)

+ 2 - 2
drivers/net/skfp/fplustm.c

@@ -22,7 +22,7 @@
 #include "h/fddi.h"
 #include "h/fddi.h"
 #include "h/smc.h"
 #include "h/smc.h"
 #include "h/supern_2.h"
 #include "h/supern_2.h"
-#include "can.c"
+#include <linux/bitrev.h>
 
 
 #ifndef	lint
 #ifndef	lint
 static const char ID_sccs[] = "@(#)fplustm.c	1.32 99/02/23 (C) SK " ;
 static const char ID_sccs[] = "@(#)fplustm.c	1.32 99/02/23 (C) SK " ;
@@ -1073,7 +1073,7 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
 	if (can) {
 	if (can) {
 		p = own->a ;
 		p = own->a ;
 		for (i = 0 ; i < 6 ; i++, p++)
 		for (i = 0 ; i < 6 ; i++, p++)
-			*p = canonical[*p] ;
+			*p = bitrev8(*p);
 	}
 	}
 	slot = NULL;
 	slot = NULL;
 	for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
 	for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){

+ 4 - 6
drivers/net/skfp/smt.c

@@ -18,6 +18,7 @@
 #include "h/fddi.h"
 #include "h/fddi.h"
 #include "h/smc.h"
 #include "h/smc.h"
 #include "h/smt_p.h"
 #include "h/smt_p.h"
+#include <linux/bitrev.h>
 
 
 #define KERNEL
 #define KERNEL
 #include "h/smtstate.h"
 #include "h/smtstate.h"
@@ -26,8 +27,6 @@
 static const char ID_sccs[] = "@(#)smt.c	2.43 98/11/23 (C) SK " ;
 static const char ID_sccs[] = "@(#)smt.c	2.43 98/11/23 (C) SK " ;
 #endif
 #endif
 
 
-extern const u_char canonical[256] ;
-
 /*
 /*
  * FC in SMbuf
  * FC in SMbuf
  */
  */
@@ -180,7 +179,7 @@ void smt_agent_init(struct s_smc *smc)
 	driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ;
 	driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ;
 	for (i = 0 ; i < 6 ; i ++) {
 	for (i = 0 ; i < 6 ; i ++) {
 		smc->mib.fddiSMTStationId.sid_node.a[i] =
 		smc->mib.fddiSMTStationId.sid_node.a[i] =
-			canonical[smc->mib.fddiSMTStationId.sid_node.a[i]] ;
+			bitrev8(smc->mib.fddiSMTStationId.sid_node.a[i]);
 	}
 	}
 	smc->mib.fddiSMTManufacturerData[0] =
 	smc->mib.fddiSMTManufacturerData[0] =
 		smc->mib.fddiSMTStationId.sid_node.a[0] ;
 		smc->mib.fddiSMTStationId.sid_node.a[0] ;
@@ -2049,9 +2048,8 @@ static void hwm_conv_can(struct s_smc *smc, char *data, int len)
 
 
 	SK_UNUSED(smc) ;
 	SK_UNUSED(smc) ;
 
 
-	for (i = len; i ; i--, data++) {
-		*data = canonical[*(u_char *)data] ;
-	}
+	for (i = len; i ; i--, data++)
+		*data = bitrev8(*data);
 }
 }
 #endif
 #endif
 
 

+ 159 - 76
drivers/net/skge.c

@@ -42,7 +42,7 @@
 #include "skge.h"
 #include "skge.h"
 
 
 #define DRV_NAME		"skge"
 #define DRV_NAME		"skge"
-#define DRV_VERSION		"1.9"
+#define DRV_VERSION		"1.10"
 #define PFX			DRV_NAME " "
 #define PFX			DRV_NAME " "
 
 
 #define DEFAULT_TX_RING_SIZE	128
 #define DEFAULT_TX_RING_SIZE	128
@@ -132,18 +132,93 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 }
 }
 
 
 /* Wake on Lan only supported on Yukon chips with rev 1 or above */
 /* Wake on Lan only supported on Yukon chips with rev 1 or above */
-static int wol_supported(const struct skge_hw *hw)
+static u32 wol_supported(const struct skge_hw *hw)
 {
 {
-	return !((hw->chip_id == CHIP_ID_GENESIS ||
-		  (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)));
+	if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev != 0)
+		return WAKE_MAGIC | WAKE_PHY;
+	else
+		return 0;
+}
+
+static u32 pci_wake_enabled(struct pci_dev *dev)
+{
+	int pm = pci_find_capability(dev, PCI_CAP_ID_PM);
+	u16 value;
+
+	/* If device doesn't support PM Capabilities, but request is to disable
+	 * wake events, it's a nop; otherwise fail */
+	if (!pm)
+		return 0;
+
+	pci_read_config_word(dev, pm + PCI_PM_PMC, &value);
+
+	value &= PCI_PM_CAP_PME_MASK;
+	value >>= ffs(PCI_PM_CAP_PME_MASK) - 1;   /* First bit of mask */
+
+	return value != 0;
+}
+
+static void skge_wol_init(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	enum pause_control save_mode;
+	u32 ctrl;
+
+	/* Bring hardware out of reset */
+	skge_write16(hw, B0_CTST, CS_RST_CLR);
+	skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+	skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+	skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+	/* Force to 10/100 skge_reset will re-enable on resume	 */
+	save_mode = skge->flow_control;
+	skge->flow_control = FLOW_MODE_SYMMETRIC;
+
+	ctrl = skge->advertising;
+	skge->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
+
+	skge_phy_reset(skge);
+
+	skge->flow_control = save_mode;
+	skge->advertising = ctrl;
+
+	/* Set GMAC to no flow control and auto update for speed/duplex */
+	gma_write16(hw, port, GM_GP_CTRL,
+		    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
+		    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
+
+	/* Set WOL address */
+	memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
+		    skge->netdev->dev_addr, ETH_ALEN);
+
+	/* Turn on appropriate WOL control bits */
+	skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
+	ctrl = 0;
+	if (skge->wol & WAKE_PHY)
+		ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
+	else
+		ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
+
+	if (skge->wol & WAKE_MAGIC)
+		ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
+	else
+		ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
+
+	ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
+	skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
+
+	/* block receiver */
+	skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
 }
 }
 
 
 static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
 {
 	struct skge_port *skge = netdev_priv(dev);
 	struct skge_port *skge = netdev_priv(dev);
 
 
-	wol->supported = wol_supported(skge->hw) ? WAKE_MAGIC : 0;
-	wol->wolopts = skge->wol ? WAKE_MAGIC : 0;
+	wol->supported = wol_supported(skge->hw);
+	wol->wolopts = skge->wol;
 }
 }
 
 
 static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -151,23 +226,12 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 	struct skge_port *skge = netdev_priv(dev);
 	struct skge_port *skge = netdev_priv(dev);
 	struct skge_hw *hw = skge->hw;
 	struct skge_hw *hw = skge->hw;
 
 
-	if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
+	if (wol->wolopts & wol_supported(hw))
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 
 
-	if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw))
-		return -EOPNOTSUPP;
-
-	skge->wol = wol->wolopts == WAKE_MAGIC;
-
-	if (skge->wol) {
-		memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
-
-		skge_write16(hw, WOL_CTRL_STAT,
-			     WOL_CTL_ENA_PME_ON_MAGIC_PKT |
-			     WOL_CTL_ENA_MAGIC_PKT_UNIT);
-	} else
-		skge_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
-
+	skge->wol = wol->wolopts;
+	if (!netif_running(dev))
+		skge_wol_init(skge);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2373,6 +2437,9 @@ static int skge_up(struct net_device *dev)
 	size_t rx_size, tx_size;
 	size_t rx_size, tx_size;
 	int err;
 	int err;
 
 
+	if (!is_valid_ether_addr(dev->dev_addr))
+		return -EINVAL;
+
 	if (netif_msg_ifup(skge))
 	if (netif_msg_ifup(skge))
 		printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
 		printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
 
 
@@ -2392,7 +2459,7 @@ static int skge_up(struct net_device *dev)
 	BUG_ON(skge->dma & 7);
 	BUG_ON(skge->dma & 7);
 
 
 	if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
 	if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
-		printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n");
+		dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
 		err = -EINVAL;
 		err = -EINVAL;
 		goto free_pci_mem;
 		goto free_pci_mem;
 	}
 	}
@@ -3001,6 +3068,7 @@ static void skge_mac_intr(struct skge_hw *hw, int port)
 /* Handle device specific framing and timeout interrupts */
 /* Handle device specific framing and timeout interrupts */
 static void skge_error_irq(struct skge_hw *hw)
 static void skge_error_irq(struct skge_hw *hw)
 {
 {
+	struct pci_dev *pdev = hw->pdev;
 	u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
 	u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
 
 
 	if (hw->chip_id == CHIP_ID_GENESIS) {
 	if (hw->chip_id == CHIP_ID_GENESIS) {
@@ -3016,12 +3084,12 @@ static void skge_error_irq(struct skge_hw *hw)
 	}
 	}
 
 
 	if (hwstatus & IS_RAM_RD_PAR) {
 	if (hwstatus & IS_RAM_RD_PAR) {
-		printk(KERN_ERR PFX "Ram read data parity error\n");
+		dev_err(&pdev->dev, "Ram read data parity error\n");
 		skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
 		skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
 	}
 	}
 
 
 	if (hwstatus & IS_RAM_WR_PAR) {
 	if (hwstatus & IS_RAM_WR_PAR) {
-		printk(KERN_ERR PFX "Ram write data parity error\n");
+		dev_err(&pdev->dev, "Ram write data parity error\n");
 		skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
 		skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
 	}
 	}
 
 
@@ -3032,38 +3100,38 @@ static void skge_error_irq(struct skge_hw *hw)
 		skge_mac_parity(hw, 1);
 		skge_mac_parity(hw, 1);
 
 
 	if (hwstatus & IS_R1_PAR_ERR) {
 	if (hwstatus & IS_R1_PAR_ERR) {
-		printk(KERN_ERR PFX "%s: receive queue parity error\n",
-		       hw->dev[0]->name);
+		dev_err(&pdev->dev, "%s: receive queue parity error\n",
+			hw->dev[0]->name);
 		skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
 		skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
 	}
 	}
 
 
 	if (hwstatus & IS_R2_PAR_ERR) {
 	if (hwstatus & IS_R2_PAR_ERR) {
-		printk(KERN_ERR PFX "%s: receive queue parity error\n",
-		       hw->dev[1]->name);
+		dev_err(&pdev->dev, "%s: receive queue parity error\n",
+			hw->dev[1]->name);
 		skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
 		skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
 	}
 	}
 
 
 	if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
 	if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
 		u16 pci_status, pci_cmd;
 		u16 pci_status, pci_cmd;
 
 
-		pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd);
-		pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
+		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+		pci_read_config_word(pdev, PCI_STATUS, &pci_status);
 
 
-		printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n",
-			       pci_name(hw->pdev), pci_cmd, pci_status);
+		dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n",
+			pci_cmd, pci_status);
 
 
 		/* Write the error bits back to clear them. */
 		/* Write the error bits back to clear them. */
 		pci_status &= PCI_STATUS_ERROR_BITS;
 		pci_status &= PCI_STATUS_ERROR_BITS;
 		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-		pci_write_config_word(hw->pdev, PCI_COMMAND,
+		pci_write_config_word(pdev, PCI_COMMAND,
 				      pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
 				      pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
-		pci_write_config_word(hw->pdev, PCI_STATUS, pci_status);
+		pci_write_config_word(pdev, PCI_STATUS, pci_status);
 		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 
 
 		/* if error still set then just ignore it */
 		/* if error still set then just ignore it */
 		hwstatus = skge_read32(hw, B0_HWE_ISRC);
 		hwstatus = skge_read32(hw, B0_HWE_ISRC);
 		if (hwstatus & IS_IRQ_STAT) {
 		if (hwstatus & IS_IRQ_STAT) {
-			printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n");
+			dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n");
 			hw->intr_mask &= ~IS_HW_ERR;
 			hw->intr_mask &= ~IS_HW_ERR;
 		}
 		}
 	}
 	}
@@ -3277,8 +3345,8 @@ static int skge_reset(struct skge_hw *hw)
 			hw->phy_addr = PHY_ADDR_BCOM;
 			hw->phy_addr = PHY_ADDR_BCOM;
 			break;
 			break;
 		default:
 		default:
-			printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
-			       pci_name(hw->pdev), hw->phy_type);
+			dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n",
+			       hw->phy_type);
 			return -EOPNOTSUPP;
 			return -EOPNOTSUPP;
 		}
 		}
 		break;
 		break;
@@ -3293,8 +3361,8 @@ static int skge_reset(struct skge_hw *hw)
 		break;
 		break;
 
 
 	default:
 	default:
-		printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
-		       pci_name(hw->pdev), hw->chip_id);
+		dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
+		       hw->chip_id);
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	}
 	}
 
 
@@ -3334,7 +3402,7 @@ static int skge_reset(struct skge_hw *hw)
 		/* avoid boards with stuck Hardware error bits */
 		/* avoid boards with stuck Hardware error bits */
 		if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
 		if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
 		    (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
 		    (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
-			printk(KERN_WARNING PFX "stuck hardware sensor bit\n");
+			dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n");
 			hw->intr_mask &= ~IS_HW_ERR;
 			hw->intr_mask &= ~IS_HW_ERR;
 		}
 		}
 
 
@@ -3408,7 +3476,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
 	struct net_device *dev = alloc_etherdev(sizeof(*skge));
 	struct net_device *dev = alloc_etherdev(sizeof(*skge));
 
 
 	if (!dev) {
 	if (!dev) {
-		printk(KERN_ERR "skge etherdev alloc failed");
+		dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
 		return NULL;
 		return NULL;
 	}
 	}
 
 
@@ -3452,6 +3520,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
 	skge->duplex = -1;
 	skge->duplex = -1;
 	skge->speed = -1;
 	skge->speed = -1;
 	skge->advertising = skge_supported_modes(hw);
 	skge->advertising = skge_supported_modes(hw);
+	skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0;
 
 
 	hw->dev[port] = dev;
 	hw->dev[port] = dev;
 
 
@@ -3496,15 +3565,13 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 
 
 	err = pci_enable_device(pdev);
 	err = pci_enable_device(pdev);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s cannot enable PCI device\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
 		goto err_out;
 		goto err_out;
 	}
 	}
 
 
 	err = pci_request_regions(pdev, DRV_NAME);
 	err = pci_request_regions(pdev, DRV_NAME);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
 		goto err_out_disable_pdev;
 		goto err_out_disable_pdev;
 	}
 	}
 
 
@@ -3519,8 +3586,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 	}
 	}
 
 
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s no usable DMA configuration\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "no usable DMA configuration\n");
 		goto err_out_free_regions;
 		goto err_out_free_regions;
 	}
 	}
 
 
@@ -3538,8 +3604,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 	err = -ENOMEM;
 	err = -ENOMEM;
 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
 	if (!hw) {
 	if (!hw) {
-		printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot allocate hardware struct\n");
 		goto err_out_free_regions;
 		goto err_out_free_regions;
 	}
 	}
 
 
@@ -3550,8 +3615,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 
 
 	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
 	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
 	if (!hw->regs) {
 	if (!hw->regs) {
-		printk(KERN_ERR PFX "%s: cannot map device registers\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot map device registers\n");
 		goto err_out_free_hw;
 		goto err_out_free_hw;
 	}
 	}
 
 
@@ -3567,23 +3631,19 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 	if (!dev)
 	if (!dev)
 		goto err_out_led_off;
 		goto err_out_led_off;
 
 
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n",
-		       pci_name(pdev));
-		err = -EIO;
-		goto err_out_free_netdev;
-	}
+	/* Some motherboards are broken and has zero in ROM. */
+	if (!is_valid_ether_addr(dev->dev_addr))
+		dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n");
 
 
 	err = register_netdev(dev);
 	err = register_netdev(dev);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s: cannot register net device\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot register net device\n");
 		goto err_out_free_netdev;
 		goto err_out_free_netdev;
 	}
 	}
 
 
 	err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
 	err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
+		dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
 		       dev->name, pdev->irq);
 		       dev->name, pdev->irq);
 		goto err_out_unregister;
 		goto err_out_unregister;
 	}
 	}
@@ -3594,7 +3654,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 			skge_show_addr(dev1);
 			skge_show_addr(dev1);
 		else {
 		else {
 			/* Failure to register second port need not be fatal */
 			/* Failure to register second port need not be fatal */
-			printk(KERN_WARNING PFX "register of second port failed\n");
+			dev_warn(&pdev->dev, "register of second port failed\n");
 			hw->dev[1] = NULL;
 			hw->dev[1] = NULL;
 			free_netdev(dev1);
 			free_netdev(dev1);
 		}
 		}
@@ -3659,28 +3719,46 @@ static void __devexit skge_remove(struct pci_dev *pdev)
 }
 }
 
 
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
+static int vaux_avail(struct pci_dev *pdev)
+{
+	int pm_cap;
+
+	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (pm_cap) {
+		u16 ctl;
+		pci_read_config_word(pdev, pm_cap + PCI_PM_PMC, &ctl);
+		if (ctl & PCI_PM_CAP_AUX_POWER)
+			return 1;
+	}
+	return 0;
+}
+
+
 static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
 static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 {
 	struct skge_hw *hw  = pci_get_drvdata(pdev);
 	struct skge_hw *hw  = pci_get_drvdata(pdev);
-	int i, wol = 0;
+	int i, err, wol = 0;
+
+	err = pci_save_state(pdev);
+	if (err)
+		return err;
 
 
-	pci_save_state(pdev);
 	for (i = 0; i < hw->ports; i++) {
 	for (i = 0; i < hw->ports; i++) {
 		struct net_device *dev = hw->dev[i];
 		struct net_device *dev = hw->dev[i];
+		struct skge_port *skge = netdev_priv(dev);
 
 
-		if (netif_running(dev)) {
-			struct skge_port *skge = netdev_priv(dev);
+		if (netif_running(dev))
+			skge_down(dev);
+		if (skge->wol)
+			skge_wol_init(skge);
 
 
-			netif_carrier_off(dev);
-			if (skge->wol)
-				netif_stop_queue(dev);
-			else
-				skge_down(dev);
-			wol |= skge->wol;
-		}
-		netif_device_detach(dev);
+		wol |= skge->wol;
 	}
 	}
 
 
+	if (wol && vaux_avail(pdev))
+		skge_write8(hw, B0_POWER_CTRL,
+			    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
+
 	skge_write32(hw, B0_IMSK, 0);
 	skge_write32(hw, B0_IMSK, 0);
 	pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
 	pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3693,8 +3771,14 @@ static int skge_resume(struct pci_dev *pdev)
 	struct skge_hw *hw  = pci_get_drvdata(pdev);
 	struct skge_hw *hw  = pci_get_drvdata(pdev);
 	int i, err;
 	int i, err;
 
 
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
+	err = pci_set_power_state(pdev, PCI_D0);
+	if (err)
+		goto out;
+
+	err = pci_restore_state(pdev);
+	if (err)
+		goto out;
+
 	pci_enable_wake(pdev, PCI_D0, 0);
 	pci_enable_wake(pdev, PCI_D0, 0);
 
 
 	err = skge_reset(hw);
 	err = skge_reset(hw);
@@ -3704,7 +3788,6 @@ static int skge_resume(struct pci_dev *pdev)
 	for (i = 0; i < hw->ports; i++) {
 	for (i = 0; i < hw->ports; i++) {
 		struct net_device *dev = hw->dev[i];
 		struct net_device *dev = hw->dev[i];
 
 
-		netif_device_attach(dev);
 		if (netif_running(dev)) {
 		if (netif_running(dev)) {
 			err = skge_up(dev);
 			err = skge_up(dev);
 
 

+ 2 - 0
drivers/net/skge.h

@@ -876,11 +876,13 @@ enum {
 	WOL_PATT_CNT_0	= 0x0f38,/* 32 bit	WOL Pattern Counter 3..0 */
 	WOL_PATT_CNT_0	= 0x0f38,/* 32 bit	WOL Pattern Counter 3..0 */
 	WOL_PATT_CNT_4	= 0x0f3c,/* 24 bit	WOL Pattern Counter 6..4 */
 	WOL_PATT_CNT_4	= 0x0f3c,/* 24 bit	WOL Pattern Counter 6..4 */
 };
 };
+#define WOL_REGS(port, x)	(x + (port)*0x80)
 
 
 enum {
 enum {
 	WOL_PATT_RAM_1	= 0x1000,/*  WOL Pattern RAM Link 1 */
 	WOL_PATT_RAM_1	= 0x1000,/*  WOL Pattern RAM Link 1 */
 	WOL_PATT_RAM_2	= 0x1400,/*  WOL Pattern RAM Link 2 */
 	WOL_PATT_RAM_2	= 0x1400,/*  WOL Pattern RAM Link 2 */
 };
 };
+#define WOL_PATT_RAM_BASE(port)	(WOL_PATT_RAM_1 + (port)*0x400)
 
 
 enum {
 enum {
 	BASE_XMAC_1	= 0x2000,/* XMAC 1 registers */
 	BASE_XMAC_1	= 0x2000,/* XMAC 1 registers */

+ 336 - 207
drivers/net/sky2.c

@@ -49,7 +49,7 @@
 #include "sky2.h"
 #include "sky2.h"
 
 
 #define DRV_NAME		"sky2"
 #define DRV_NAME		"sky2"
-#define DRV_VERSION		"1.10"
+#define DRV_VERSION		"1.12"
 #define PFX			DRV_NAME " "
 #define PFX			DRV_NAME " "
 
 
 /*
 /*
@@ -105,6 +105,7 @@ static const struct pci_device_id sky2_id_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, 	/* DGE-550SX */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, 	/* DGE-550SX */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },	/* DGE-560SX */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },	/* DGE-560SX */
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) },	/* DGE-550T */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
@@ -126,6 +127,9 @@ static const struct pci_device_id sky2_id_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
 	{ 0 }
 	{ 0 }
 };
 };
 
 
@@ -140,7 +144,7 @@ static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
 static const char *yukon2_name[] = {
 static const char *yukon2_name[] = {
 	"XL",		/* 0xb3 */
 	"XL",		/* 0xb3 */
 	"EC Ultra", 	/* 0xb4 */
 	"EC Ultra", 	/* 0xb4 */
-	"UNKNOWN",	/* 0xb5 */
+	"Extreme",	/* 0xb5 */
 	"EC",		/* 0xb6 */
 	"EC",		/* 0xb6 */
 	"FE",		/* 0xb7 */
 	"FE",		/* 0xb7 */
 };
 };
@@ -192,76 +196,52 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
 	return v;
 	return v;
 }
 }
 
 
-static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
-{
-	u16 power_control;
-	int vaux;
-
-	pr_debug("sky2_set_power_state %d\n", state);
-	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-
-	power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
-	vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
-		(power_control & PCI_PM_CAP_PME_D3cold);
-
-	power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
-
-	power_control |= PCI_PM_CTRL_PME_STATUS;
-	power_control &= ~(PCI_PM_CTRL_STATE_MASK);
 
 
-	switch (state) {
-	case PCI_D0:
-		/* switch power to VCC (WA for VAUX problem) */
-		sky2_write8(hw, B0_POWER_CTRL,
-			    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
-
-		/* disable Core Clock Division, */
-		sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
-
-		if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
-			/* enable bits are inverted */
-			sky2_write8(hw, B2_Y2_CLK_GATE,
-				    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
-				    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
-				    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
-		else
-			sky2_write8(hw, B2_Y2_CLK_GATE, 0);
+static void sky2_power_on(struct sky2_hw *hw)
+{
+	/* switch power to VCC (WA for VAUX problem) */
+	sky2_write8(hw, B0_POWER_CTRL,
+		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 
 
-		if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
-			u32 reg1;
+	/* disable Core Clock Division, */
+	sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 
 
-			sky2_pci_write32(hw, PCI_DEV_REG3, 0);
-			reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
-			reg1 &= P_ASPM_CONTROL_MSK;
-			sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
-			sky2_pci_write32(hw, PCI_DEV_REG5, 0);
-		}
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+		/* enable bits are inverted */
+		sky2_write8(hw, B2_Y2_CLK_GATE,
+			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
+			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
+			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
+	else
+		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 
 
-		break;
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
+		u32 reg1;
 
 
-	case PCI_D3hot:
-	case PCI_D3cold:
-		if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
-			sky2_write8(hw, B2_Y2_CLK_GATE, 0);
-		else
-			/* enable bits are inverted */
-			sky2_write8(hw, B2_Y2_CLK_GATE,
-				    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
-				    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
-				    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
-
-		/* switch power to VAUX */
-		if (vaux && state != PCI_D3cold)
-			sky2_write8(hw, B0_POWER_CTRL,
-				    (PC_VAUX_ENA | PC_VCC_ENA |
-				     PC_VAUX_ON | PC_VCC_OFF));
-		break;
-	default:
-		printk(KERN_ERR PFX "Unknown power state %d\n", state);
+		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+		reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
+		reg1 &= P_ASPM_CONTROL_MSK;
+		sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
+		sky2_pci_write32(hw, PCI_DEV_REG5, 0);
 	}
 	}
+}
 
 
-	sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
-	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+static void sky2_power_aux(struct sky2_hw *hw)
+{
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
+	else
+		/* enable bits are inverted */
+		sky2_write8(hw, B2_Y2_CLK_GATE,
+			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
+			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
+			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
+
+	/* switch power to VAUX */
+	if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
+		sky2_write8(hw, B0_POWER_CTRL,
+			    (PC_VAUX_ENA | PC_VCC_ENA |
+			     PC_VAUX_ON | PC_VCC_OFF));
 }
 }
 
 
 static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
 static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
@@ -313,8 +293,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 	u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
 	u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
 
 
-	if (sky2->autoneg == AUTONEG_ENABLE &&
-	    !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
+	if (sky2->autoneg == AUTONEG_ENABLE
+	    && !(hw->chip_id == CHIP_ID_YUKON_XL
+		 || hw->chip_id == CHIP_ID_YUKON_EC_U
+		 || hw->chip_id == CHIP_ID_YUKON_EX)) {
 		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 
 
 		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
 		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -341,8 +323,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 			/* enable automatic crossover */
 			/* enable automatic crossover */
 			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
 			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
 
 
-			if (sky2->autoneg == AUTONEG_ENABLE &&
-			    (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
+			if (sky2->autoneg == AUTONEG_ENABLE
+			    && (hw->chip_id == CHIP_ID_YUKON_XL
+				|| hw->chip_id == CHIP_ID_YUKON_EC_U
+				|| hw->chip_id == CHIP_ID_YUKON_EX)) {
 				ctrl &= ~PHY_M_PC_DSC_MSK;
 				ctrl &= ~PHY_M_PC_DSC_MSK;
 				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
 				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
 			}
 			}
@@ -497,7 +481,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 		/* restore page register */
 		/* restore page register */
 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 		break;
 		break;
+
 	case CHIP_ID_YUKON_EC_U:
 	case CHIP_ID_YUKON_EC_U:
+	case CHIP_ID_YUKON_EX:
 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 
 
 		/* select page 3 to access LED control register */
 		/* select page 3 to access LED control register */
@@ -539,7 +525,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 
 
 		/* set page register to 0 */
 		/* set page register to 0 */
 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
-	} else {
+	} else if (hw->chip_id != CHIP_ID_YUKON_EX) {
 		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
 		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
 
 
 		if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
 		if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
@@ -591,6 +577,73 @@ static void sky2_phy_reinit(struct sky2_port *sky2)
 	spin_unlock_bh(&sky2->phy_lock);
 	spin_unlock_bh(&sky2->phy_lock);
 }
 }
 
 
+/* Put device in state to listen for Wake On Lan */
+static void sky2_wol_init(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	enum flow_control save_mode;
+	u16 ctrl;
+	u32 reg1;
+
+	/* Bring hardware out of reset */
+	sky2_write16(hw, B0_CTST, CS_RST_CLR);
+	sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+	/* Force to 10/100
+	 * sky2_reset will re-enable on resume
+	 */
+	save_mode = sky2->flow_mode;
+	ctrl = sky2->advertising;
+
+	sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
+	sky2->flow_mode = FC_NONE;
+	sky2_phy_power(hw, port, 1);
+	sky2_phy_reinit(sky2);
+
+	sky2->flow_mode = save_mode;
+	sky2->advertising = ctrl;
+
+	/* Set GMAC to no flow control and auto update for speed/duplex */
+	gma_write16(hw, port, GM_GP_CTRL,
+		    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
+		    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
+
+	/* Set WOL address */
+	memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
+		    sky2->netdev->dev_addr, ETH_ALEN);
+
+	/* Turn on appropriate WOL control bits */
+	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
+	ctrl = 0;
+	if (sky2->wol & WAKE_PHY)
+		ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
+	else
+		ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
+
+	if (sky2->wol & WAKE_MAGIC)
+		ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
+	else
+		ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
+
+	ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
+	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
+
+	/* Turn on legacy PCI-Express PME mode */
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+	reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+	reg1 |= PCI_Y2_PME_LEGACY;
+	sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+	/* block receiver */
+	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+
+}
+
 static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 {
 {
 	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
@@ -684,7 +737,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
 	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
 	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
 	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
 
 
-	if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
 		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
 		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
 		sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
 		sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
 		if (hw->dev[port]->mtu > ETH_DATA_LEN) {
 		if (hw->dev[port]->mtu > ETH_DATA_LEN) {
@@ -1467,6 +1520,9 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
 			if (unlikely(netif_msg_tx_done(sky2)))
 			if (unlikely(netif_msg_tx_done(sky2)))
 				printk(KERN_DEBUG "%s: tx done %u\n",
 				printk(KERN_DEBUG "%s: tx done %u\n",
 				       dev->name, idx);
 				       dev->name, idx);
+			sky2->net_stats.tx_packets++;
+			sky2->net_stats.tx_bytes += re->skb->len;
+
 			dev_kfree_skb_any(re->skb);
 			dev_kfree_skb_any(re->skb);
 		}
 		}
 
 
@@ -1641,7 +1697,9 @@ static void sky2_link_up(struct sky2_port *sky2)
 	sky2_write8(hw, SK_REG(port, LNK_LED_REG),
 	sky2_write8(hw, SK_REG(port, LNK_LED_REG),
 		    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
 		    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
 
 
-	if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) {
+	if (hw->chip_id == CHIP_ID_YUKON_XL
+	    || hw->chip_id == CHIP_ID_YUKON_EC_U
+	    || hw->chip_id == CHIP_ID_YUKON_EX) {
 		u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 		u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 		u16 led = PHY_M_LEDC_LOS_CTRL(1);	/* link active */
 		u16 led = PHY_M_LEDC_LOS_CTRL(1);	/* link active */
 
 
@@ -1734,14 +1792,16 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
 	sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
 	sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
 
 
 	/* Pause bits are offset (9..8) */
 	/* Pause bits are offset (9..8) */
-	if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
+	if (hw->chip_id == CHIP_ID_YUKON_XL
+	    || hw->chip_id == CHIP_ID_YUKON_EC_U
+	    || hw->chip_id == CHIP_ID_YUKON_EX)
 		aux >>= 6;
 		aux >>= 6;
 
 
 	sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN,
 	sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN,
 				      aux & PHY_M_PS_TX_P_EN);
 				      aux & PHY_M_PS_TX_P_EN);
 
 
 	if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
 	if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
-	    && hw->chip_id != CHIP_ID_YUKON_EC_U)
+	    && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
 		sky2->flow_status = FC_NONE;
 		sky2->flow_status = FC_NONE;
 
 
 	if (aux & PHY_M_PS_RX_P_EN)
 	if (aux & PHY_M_PS_RX_P_EN)
@@ -1794,48 +1854,37 @@ out:
 }
 }
 
 
 
 
-/* Transmit timeout is only called if we are running, carries is up
+/* Transmit timeout is only called if we are running, carrier is up
  * and tx queue is full (stopped).
  * and tx queue is full (stopped).
+ * Called with netif_tx_lock held.
  */
  */
 static void sky2_tx_timeout(struct net_device *dev)
 static void sky2_tx_timeout(struct net_device *dev)
 {
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_hw *hw = sky2->hw;
 	struct sky2_hw *hw = sky2->hw;
-	unsigned txq = txqaddr[sky2->port];
-	u16 report, done;
+	u32 imask;
 
 
 	if (netif_msg_timer(sky2))
 	if (netif_msg_timer(sky2))
 		printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
 		printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
 
 
-	report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
-	done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
-
 	printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
 	printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
-	       dev->name,
-	       sky2->tx_cons, sky2->tx_prod, report, done);
+	       dev->name, sky2->tx_cons, sky2->tx_prod,
+	       sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
+	       sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
 
 
-	if (report != done) {
-		printk(KERN_INFO PFX "status burst pending (irq moderation?)\n");
-
-		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
-		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
-	} else if (report != sky2->tx_cons) {
-		printk(KERN_INFO PFX "status report lost?\n");
+	imask = sky2_read32(hw, B0_IMSK);	/* block IRQ in hw */
+	sky2_write32(hw, B0_IMSK, 0);
+	sky2_read32(hw, B0_IMSK);
 
 
-		netif_tx_lock_bh(dev);
-		sky2_tx_complete(sky2, report);
-		netif_tx_unlock_bh(dev);
-	} else {
-		printk(KERN_INFO PFX "hardware hung? flushing\n");
+	netif_poll_disable(hw->dev[0]);		/* stop NAPI poll */
+	synchronize_irq(hw->pdev->irq);
 
 
-		sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
-		sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+	netif_start_queue(dev);			/* don't wakeup during flush */
+	sky2_tx_complete(sky2, sky2->tx_prod);	/* Flush transmit queue */
 
 
-		sky2_tx_clean(dev);
+	sky2_write32(hw, B0_IMSK, imask);
 
 
-		sky2_qset(hw, txq);
-		sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
-	}
+	sky2_phy_reinit(sky2);			/* this clears flow control etc */
 }
 }
 
 
 static int sky2_change_mtu(struct net_device *dev, int new_mtu)
 static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -1849,8 +1898,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	/* TSO on Yukon Ultra and MTU > 1500 not supported */
 	if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
 	if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
-		return -EINVAL;
+		dev->features &= ~NETIF_F_TSO;
 
 
 	if (!netif_running(dev)) {
 	if (!netif_running(dev)) {
 		dev->mtu = new_mtu;
 		dev->mtu = new_mtu;
@@ -2089,6 +2139,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
 				goto force_update;
 				goto force_update;
 
 
 			skb->protocol = eth_type_trans(skb, dev);
 			skb->protocol = eth_type_trans(skb, dev);
+			sky2->net_stats.rx_packets++;
+			sky2->net_stats.rx_bytes += skb->len;
 			dev->last_rx = jiffies;
 			dev->last_rx = jiffies;
 
 
 #ifdef SKY2_VLAN_TAG_USED
 #ifdef SKY2_VLAN_TAG_USED
@@ -2218,8 +2270,8 @@ static void sky2_hw_intr(struct sky2_hw *hw)
 
 
 		pci_err = sky2_pci_read16(hw, PCI_STATUS);
 		pci_err = sky2_pci_read16(hw, PCI_STATUS);
 		if (net_ratelimit())
 		if (net_ratelimit())
-			printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
-			       pci_name(hw->pdev), pci_err);
+			dev_err(&hw->pdev->dev, "PCI hardware error (0x%x)\n",
+			        pci_err);
 
 
 		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 		sky2_pci_write16(hw, PCI_STATUS,
 		sky2_pci_write16(hw, PCI_STATUS,
@@ -2234,8 +2286,8 @@ static void sky2_hw_intr(struct sky2_hw *hw)
 		pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
 		pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
 
 
 		if (net_ratelimit())
 		if (net_ratelimit())
-			printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
-			       pci_name(hw->pdev), pex_err);
+			dev_err(&hw->pdev->dev, "PCI Express error (0x%x)\n",
+				pex_err);
 
 
 		/* clear the interrupt */
 		/* clear the interrupt */
 		sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 		sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2404,6 +2456,7 @@ static inline u32 sky2_mhz(const struct sky2_hw *hw)
 	switch (hw->chip_id) {
 	switch (hw->chip_id) {
 	case CHIP_ID_YUKON_EC:
 	case CHIP_ID_YUKON_EC:
 	case CHIP_ID_YUKON_EC_U:
 	case CHIP_ID_YUKON_EC_U:
+	case CHIP_ID_YUKON_EX:
 		return 125;	/* 125 Mhz */
 		return 125;	/* 125 Mhz */
 	case CHIP_ID_YUKON_FE:
 	case CHIP_ID_YUKON_FE:
 		return 100;	/* 100 Mhz */
 		return 100;	/* 100 Mhz */
@@ -2423,34 +2476,62 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
 }
 }
 
 
 
 
-static int sky2_reset(struct sky2_hw *hw)
+static int __devinit sky2_init(struct sky2_hw *hw)
 {
 {
-	u16 status;
 	u8 t8;
 	u8 t8;
-	int i;
 
 
 	sky2_write8(hw, B0_CTST, CS_RST_CLR);
 	sky2_write8(hw, B0_CTST, CS_RST_CLR);
 
 
 	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
 	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
 	if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
 	if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
-		printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
-		       pci_name(hw->pdev), hw->chip_id);
+		dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
+			hw->chip_id);
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	}
 	}
 
 
+	if (hw->chip_id == CHIP_ID_YUKON_EX)
+		dev_warn(&hw->pdev->dev, "this driver not yet tested on this chip type\n"
+			 "Please report success or failure to <netdev@vger.kernel.org>\n");
+
+	/* Make sure and enable all clocks */
+	if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U)
+		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+
 	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
 	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
 
 
 	/* This rev is really old, and requires untested workarounds */
 	/* This rev is really old, and requires untested workarounds */
 	if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
 	if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
-		printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n",
-		       pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
-		       hw->chip_id, hw->chip_rev);
+		dev_err(&hw->pdev->dev, "unsupported revision Yukon-%s (0x%x) rev %d\n",
+			yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
+			hw->chip_id, hw->chip_rev);
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	}
 	}
 
 
+	hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
+	hw->ports = 1;
+	t8 = sky2_read8(hw, B2_Y2_HW_RES);
+	if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
+		if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
+			++hw->ports;
+	}
+
+	return 0;
+}
+
+static void sky2_reset(struct sky2_hw *hw)
+{
+	u16 status;
+	int i;
+
 	/* disable ASF */
 	/* disable ASF */
 	if (hw->chip_id <= CHIP_ID_YUKON_EC) {
 	if (hw->chip_id <= CHIP_ID_YUKON_EC) {
-		sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
+		if (hw->chip_id == CHIP_ID_YUKON_EX) {
+			status = sky2_read16(hw, HCU_CCSR);
+			status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
+				    HCU_CCSR_UC_STATE_MSK);
+			sky2_write16(hw, HCU_CCSR, status);
+		} else
+			sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
 		sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
 		sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
 	}
 	}
 
 
@@ -2472,15 +2553,7 @@ static int sky2_reset(struct sky2_hw *hw)
 		sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
 		sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
 
 
 
 
-	hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
-	hw->ports = 1;
-	t8 = sky2_read8(hw, B2_Y2_HW_RES);
-	if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
-		if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
-			++hw->ports;
-	}
-
-	sky2_set_power_state(hw, PCI_D0);
+	sky2_power_on(hw);
 
 
 	for (i = 0; i < hw->ports; i++) {
 	for (i = 0; i < hw->ports; i++) {
 		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
 		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -2563,7 +2636,37 @@ static int sky2_reset(struct sky2_hw *hw)
 	sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
 	sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
 	sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
 	sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
 	sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
 	sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
+}
+
+static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
+{
+	return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
+}
+
+static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	const struct sky2_port *sky2 = netdev_priv(dev);
+
+	wol->supported = sky2_wol_supported(sky2->hw);
+	wol->wolopts = sky2->wol;
+}
+
+static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+
+	if (wol->wolopts & ~sky2_wol_supported(sky2->hw))
+		return -EOPNOTSUPP;
+
+	sky2->wol = wol->wolopts;
+
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U)
+		sky2_write32(hw, B0_CTST, sky2->wol
+			     ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
 
 
+	if (!netif_running(dev))
+		sky2_wol_init(sky2);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2814,25 +2917,9 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
 	}
 	}
 }
 }
 
 
-/* Use hardware MIB variables for critical path statistics and
- * transmit feedback not reported at interrupt.
- * Other errors are accounted for in interrupt handler.
- */
 static struct net_device_stats *sky2_get_stats(struct net_device *dev)
 static struct net_device_stats *sky2_get_stats(struct net_device *dev)
 {
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_port *sky2 = netdev_priv(dev);
-	u64 data[13];
-
-	sky2_phy_stats(sky2, data, ARRAY_SIZE(data));
-
-	sky2->net_stats.tx_bytes = data[0];
-	sky2->net_stats.rx_bytes = data[1];
-	sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
-	sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
-	sky2->net_stats.multicast = data[3] + data[5];
-	sky2->net_stats.collisions = data[10];
-	sky2->net_stats.tx_aborted_errors = data[12];
-
 	return &sky2->net_stats;
 	return &sky2->net_stats;
 }
 }
 
 
@@ -3191,7 +3278,9 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 static const struct ethtool_ops sky2_ethtool_ops = {
 static const struct ethtool_ops sky2_ethtool_ops = {
 	.get_settings = sky2_get_settings,
 	.get_settings = sky2_get_settings,
 	.set_settings = sky2_set_settings,
 	.set_settings = sky2_set_settings,
-	.get_drvinfo = sky2_get_drvinfo,
+	.get_drvinfo  = sky2_get_drvinfo,
+	.get_wol      = sky2_get_wol,
+	.set_wol      = sky2_set_wol,
 	.get_msglevel = sky2_get_msglevel,
 	.get_msglevel = sky2_get_msglevel,
 	.set_msglevel = sky2_set_msglevel,
 	.set_msglevel = sky2_set_msglevel,
 	.nway_reset   = sky2_nway_reset,
 	.nway_reset   = sky2_nway_reset,
@@ -3221,13 +3310,14 @@ static const struct ethtool_ops sky2_ethtool_ops = {
 
 
 /* Initialize network device */
 /* Initialize network device */
 static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
 static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
-						     unsigned port, int highmem)
+						     unsigned port,
+						     int highmem, int wol)
 {
 {
 	struct sky2_port *sky2;
 	struct sky2_port *sky2;
 	struct net_device *dev = alloc_etherdev(sizeof(*sky2));
 	struct net_device *dev = alloc_etherdev(sizeof(*sky2));
 
 
 	if (!dev) {
 	if (!dev) {
-		printk(KERN_ERR "sky2 etherdev alloc failed");
+		dev_err(&hw->pdev->dev, "etherdev alloc failed");
 		return NULL;
 		return NULL;
 	}
 	}
 
 
@@ -3269,6 +3359,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
 	sky2->speed = -1;
 	sky2->speed = -1;
 	sky2->advertising = sky2_supported_modes(hw);
 	sky2->advertising = sky2_supported_modes(hw);
 	sky2->rx_csum = 1;
 	sky2->rx_csum = 1;
+	sky2->wol = wol;
 
 
 	spin_lock_init(&sky2->phy_lock);
 	spin_lock_init(&sky2->phy_lock);
 	sky2->tx_pending = TX_DEF_PENDING;
 	sky2->tx_pending = TX_DEF_PENDING;
@@ -3278,11 +3369,9 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
 
 
 	sky2->port = port;
 	sky2->port = port;
 
 
-	if (hw->chip_id != CHIP_ID_YUKON_EC_U)
-		dev->features |= NETIF_F_TSO;
+	dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
 	if (highmem)
 	if (highmem)
 		dev->features |= NETIF_F_HIGHDMA;
 		dev->features |= NETIF_F_HIGHDMA;
-	dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 
 
 #ifdef SKY2_VLAN_TAG_USED
 #ifdef SKY2_VLAN_TAG_USED
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -3343,8 +3432,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
 
 
 	err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
 	err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
-		       pci_name(pdev), pdev->irq);
+		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
 		return err;
 		return err;
 	}
 	}
 
 
@@ -3355,9 +3443,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
 
 
 	if (!hw->msi) {
 	if (!hw->msi) {
 		/* MSI test failed, go back to INTx mode */
 		/* MSI test failed, go back to INTx mode */
-		printk(KERN_INFO PFX "%s: No interrupt generated using MSI, "
-		       "switching to INTx mode.\n",
-		       pci_name(pdev));
+		dev_info(&pdev->dev, "No interrupt generated using MSI, "
+			 "switching to INTx mode.\n");
 
 
 		err = -EOPNOTSUPP;
 		err = -EOPNOTSUPP;
 		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
 		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
@@ -3371,62 +3458,62 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
 	return err;
 	return err;
 }
 }
 
 
+static int __devinit pci_wake_enabled(struct pci_dev *dev)
+{
+	int pm  = pci_find_capability(dev, PCI_CAP_ID_PM);
+	u16 value;
+
+	if (!pm)
+		return 0;
+	if (pci_read_config_word(dev, pm + PCI_PM_CTRL, &value))
+		return 0;
+	return value & PCI_PM_CTRL_PME_ENABLE;
+}
+
 static int __devinit sky2_probe(struct pci_dev *pdev,
 static int __devinit sky2_probe(struct pci_dev *pdev,
 				const struct pci_device_id *ent)
 				const struct pci_device_id *ent)
 {
 {
-	struct net_device *dev, *dev1 = NULL;
+	struct net_device *dev;
 	struct sky2_hw *hw;
 	struct sky2_hw *hw;
-	int err, pm_cap, using_dac = 0;
+	int err, using_dac = 0, wol_default;
 
 
 	err = pci_enable_device(pdev);
 	err = pci_enable_device(pdev);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s cannot enable PCI device\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
 		goto err_out;
 		goto err_out;
 	}
 	}
 
 
 	err = pci_request_regions(pdev, DRV_NAME);
 	err = pci_request_regions(pdev, DRV_NAME);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
 		goto err_out;
 		goto err_out;
 	}
 	}
 
 
 	pci_set_master(pdev);
 	pci_set_master(pdev);
 
 
-	/* Find power-management capability. */
-	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
-	if (pm_cap == 0) {
-		printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
-		       "aborting.\n");
-		err = -EIO;
-		goto err_out_free_regions;
-	}
-
 	if (sizeof(dma_addr_t) > sizeof(u32) &&
 	if (sizeof(dma_addr_t) > sizeof(u32) &&
 	    !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
 	    !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
 		using_dac = 1;
 		using_dac = 1;
 		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
 		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
 		if (err < 0) {
 		if (err < 0) {
-			printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
-			       "for consistent allocations\n", pci_name(pdev));
+			dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
+				"for consistent allocations\n");
 			goto err_out_free_regions;
 			goto err_out_free_regions;
 		}
 		}
-
 	} else {
 	} else {
 		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
 		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
 		if (err) {
 		if (err) {
-			printk(KERN_ERR PFX "%s no usable DMA configuration\n",
-			       pci_name(pdev));
+			dev_err(&pdev->dev, "no usable DMA configuration\n");
 			goto err_out_free_regions;
 			goto err_out_free_regions;
 		}
 		}
 	}
 	}
 
 
+	wol_default = pci_wake_enabled(pdev) ? WAKE_MAGIC : 0;
+
 	err = -ENOMEM;
 	err = -ENOMEM;
 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
 	if (!hw) {
 	if (!hw) {
-		printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot allocate hardware struct\n");
 		goto err_out_free_regions;
 		goto err_out_free_regions;
 	}
 	}
 
 
@@ -3434,11 +3521,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
 
 
 	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
 	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
 	if (!hw->regs) {
 	if (!hw->regs) {
-		printk(KERN_ERR PFX "%s: cannot map device registers\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot map device registers\n");
 		goto err_out_free_hw;
 		goto err_out_free_hw;
 	}
 	}
-	hw->pm_cap = pm_cap;
 
 
 #ifdef __BIG_ENDIAN
 #ifdef __BIG_ENDIAN
 	/* The sk98lin vendor driver uses hardware byte swapping but
 	/* The sk98lin vendor driver uses hardware byte swapping but
@@ -3458,18 +3543,22 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
 	if (!hw->st_le)
 	if (!hw->st_le)
 		goto err_out_iounmap;
 		goto err_out_iounmap;
 
 
-	err = sky2_reset(hw);
+	err = sky2_init(hw);
 	if (err)
 	if (err)
 		goto err_out_iounmap;
 		goto err_out_iounmap;
 
 
-	printk(KERN_INFO PFX "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n",
+	dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n",
 	       DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
 	       DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
 	       pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
 	       pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
 	       hw->chip_id, hw->chip_rev);
 	       hw->chip_id, hw->chip_rev);
 
 
-	dev = sky2_init_netdev(hw, 0, using_dac);
-	if (!dev)
+	sky2_reset(hw);
+
+	dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
+	if (!dev) {
+		err = -ENOMEM;
 		goto err_out_free_pci;
 		goto err_out_free_pci;
+	}
 
 
 	if (!disable_msi && pci_enable_msi(pdev) == 0) {
 	if (!disable_msi && pci_enable_msi(pdev) == 0) {
 		err = sky2_test_msi(hw);
 		err = sky2_test_msi(hw);
@@ -3481,32 +3570,33 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
 
 
 	err = register_netdev(dev);
 	err = register_netdev(dev);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s: cannot register net device\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "cannot register net device\n");
 		goto err_out_free_netdev;
 		goto err_out_free_netdev;
 	}
 	}
 
 
 	err = request_irq(pdev->irq,  sky2_intr, hw->msi ? 0 : IRQF_SHARED,
 	err = request_irq(pdev->irq,  sky2_intr, hw->msi ? 0 : IRQF_SHARED,
 			  dev->name, hw);
 			  dev->name, hw);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
-		       pci_name(pdev), pdev->irq);
+		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
 		goto err_out_unregister;
 		goto err_out_unregister;
 	}
 	}
 	sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
 	sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
 
 
 	sky2_show_addr(dev);
 	sky2_show_addr(dev);
 
 
-	if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) {
-		if (register_netdev(dev1) == 0)
-			sky2_show_addr(dev1);
-		else {
-			/* Failure to register second port need not be fatal */
-			printk(KERN_WARNING PFX
-			       "register of second port failed\n");
+	if (hw->ports > 1) {
+		struct net_device *dev1;
+
+		dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
+		if (!dev1)
+			dev_warn(&pdev->dev, "allocation for second device failed\n");
+		else if ((err = register_netdev(dev1))) {
+			dev_warn(&pdev->dev,
+				 "register of second port failed (%d)\n", err);
 			hw->dev[1] = NULL;
 			hw->dev[1] = NULL;
 			free_netdev(dev1);
 			free_netdev(dev1);
-		}
+		} else
+			sky2_show_addr(dev1);
 	}
 	}
 
 
 	setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
 	setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
@@ -3555,7 +3645,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
 		unregister_netdev(dev1);
 		unregister_netdev(dev1);
 	unregister_netdev(dev0);
 	unregister_netdev(dev0);
 
 
-	sky2_set_power_state(hw, PCI_D3hot);
+	sky2_power_aux(hw);
+
 	sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
 	sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
 	sky2_write8(hw, B0_CTST, CS_RST_SET);
 	sky2_write8(hw, B0_CTST, CS_RST_SET);
 	sky2_read8(hw, B0_CTST);
 	sky2_read8(hw, B0_CTST);
@@ -3580,27 +3671,31 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
 static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
 static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 {
 	struct sky2_hw *hw = pci_get_drvdata(pdev);
 	struct sky2_hw *hw = pci_get_drvdata(pdev);
-	int i;
-	pci_power_t pstate = pci_choose_state(pdev, state);
-
-	if (!(pstate == PCI_D3hot || pstate == PCI_D3cold))
-		return -EINVAL;
+	int i, wol = 0;
 
 
 	del_timer_sync(&hw->idle_timer);
 	del_timer_sync(&hw->idle_timer);
 	netif_poll_disable(hw->dev[0]);
 	netif_poll_disable(hw->dev[0]);
 
 
 	for (i = 0; i < hw->ports; i++) {
 	for (i = 0; i < hw->ports; i++) {
 		struct net_device *dev = hw->dev[i];
 		struct net_device *dev = hw->dev[i];
+		struct sky2_port *sky2 = netdev_priv(dev);
 
 
-		if (netif_running(dev)) {
+		if (netif_running(dev))
 			sky2_down(dev);
 			sky2_down(dev);
-			netif_device_detach(dev);
-		}
+
+		if (sky2->wol)
+			sky2_wol_init(sky2);
+
+		wol |= sky2->wol;
 	}
 	}
 
 
 	sky2_write32(hw, B0_IMSK, 0);
 	sky2_write32(hw, B0_IMSK, 0);
+	sky2_power_aux(hw);
+
 	pci_save_state(pdev);
 	pci_save_state(pdev);
-	sky2_set_power_state(hw, pstate);
+	pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3609,21 +3704,22 @@ static int sky2_resume(struct pci_dev *pdev)
 	struct sky2_hw *hw = pci_get_drvdata(pdev);
 	struct sky2_hw *hw = pci_get_drvdata(pdev);
 	int i, err;
 	int i, err;
 
 
-	pci_restore_state(pdev);
-	pci_enable_wake(pdev, PCI_D0, 0);
-	sky2_set_power_state(hw, PCI_D0);
+	err = pci_set_power_state(pdev, PCI_D0);
+	if (err)
+		goto out;
 
 
-	err = sky2_reset(hw);
+	err = pci_restore_state(pdev);
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 
+	pci_enable_wake(pdev, PCI_D0, 0);
+	sky2_reset(hw);
+
 	sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
 	sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
 
 
 	for (i = 0; i < hw->ports; i++) {
 	for (i = 0; i < hw->ports; i++) {
 		struct net_device *dev = hw->dev[i];
 		struct net_device *dev = hw->dev[i];
 		if (netif_running(dev)) {
 		if (netif_running(dev)) {
-			netif_device_attach(dev);
-
 			err = sky2_up(dev);
 			err = sky2_up(dev);
 			if (err) {
 			if (err) {
 				printk(KERN_ERR PFX "%s: could not up: %d\n",
 				printk(KERN_ERR PFX "%s: could not up: %d\n",
@@ -3636,11 +3732,43 @@ static int sky2_resume(struct pci_dev *pdev)
 
 
 	netif_poll_enable(hw->dev[0]);
 	netif_poll_enable(hw->dev[0]);
 	sky2_idle_start(hw);
 	sky2_idle_start(hw);
+	return 0;
 out:
 out:
+	dev_err(&pdev->dev, "resume failed (%d)\n", err);
+	pci_disable_device(pdev);
 	return err;
 	return err;
 }
 }
 #endif
 #endif
 
 
+static void sky2_shutdown(struct pci_dev *pdev)
+{
+	struct sky2_hw *hw = pci_get_drvdata(pdev);
+	int i, wol = 0;
+
+	del_timer_sync(&hw->idle_timer);
+	netif_poll_disable(hw->dev[0]);
+
+	for (i = 0; i < hw->ports; i++) {
+		struct net_device *dev = hw->dev[i];
+		struct sky2_port *sky2 = netdev_priv(dev);
+
+		if (sky2->wol) {
+			wol = 1;
+			sky2_wol_init(sky2);
+		}
+	}
+
+	if (wol)
+		sky2_power_aux(hw);
+
+	pci_enable_wake(pdev, PCI_D3hot, wol);
+	pci_enable_wake(pdev, PCI_D3cold, wol);
+
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+}
+
 static struct pci_driver sky2_driver = {
 static struct pci_driver sky2_driver = {
 	.name = DRV_NAME,
 	.name = DRV_NAME,
 	.id_table = sky2_id_table,
 	.id_table = sky2_id_table,
@@ -3650,6 +3778,7 @@ static struct pci_driver sky2_driver = {
 	.suspend = sky2_suspend,
 	.suspend = sky2_suspend,
 	.resume = sky2_resume,
 	.resume = sky2_resume,
 #endif
 #endif
+	.shutdown = sky2_shutdown,
 };
 };
 
 
 static int __init sky2_init_module(void)
 static int __init sky2_init_module(void)

+ 60 - 25
drivers/net/sky2.h

@@ -32,6 +32,7 @@ enum pci_dev_reg_1 {
 	PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
 	PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
 	PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
 	PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
 	PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
 	PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
+	PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
 };
 };
 
 
 enum pci_dev_reg_2 {
 enum pci_dev_reg_2 {
@@ -370,12 +371,9 @@ enum {
 
 
 /*	B2_CHIP_ID		 8 bit 	Chip Identification Number */
 /*	B2_CHIP_ID		 8 bit 	Chip Identification Number */
 enum {
 enum {
-	CHIP_ID_GENESIS	   = 0x0a, /* Chip ID for GENESIS */
-	CHIP_ID_YUKON	   = 0xb0, /* Chip ID for YUKON */
-	CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
-	CHIP_ID_YUKON_LP   = 0xb2, /* Chip ID for YUKON-LP */
 	CHIP_ID_YUKON_XL   = 0xb3, /* Chip ID for YUKON-2 XL */
 	CHIP_ID_YUKON_XL   = 0xb3, /* Chip ID for YUKON-2 XL */
 	CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */
 	CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */
+	CHIP_ID_YUKON_EX   = 0xb5, /* Chip ID for YUKON-2 Extreme */
 	CHIP_ID_YUKON_EC   = 0xb6, /* Chip ID for YUKON-2 EC */
 	CHIP_ID_YUKON_EC   = 0xb6, /* Chip ID for YUKON-2 EC */
  	CHIP_ID_YUKON_FE   = 0xb7, /* Chip ID for YUKON-2 FE */
  	CHIP_ID_YUKON_FE   = 0xb7, /* Chip ID for YUKON-2 FE */
 
 
@@ -767,6 +765,24 @@ enum {
 	POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit	Poll. List Start Addr (high) */
 	POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit	Poll. List Start Addr (high) */
 };
 };
 
 
+enum {
+	SMB_CFG		 = 0x0e40, /* 32 bit	SMBus Config Register */
+	SMB_CSR		 = 0x0e44, /* 32 bit	SMBus Control/Status Register */
+};
+
+enum {
+	CPU_WDOG	 = 0x0e48, /* 32 bit	Watchdog Register  */
+	CPU_CNTR	 = 0x0e4C, /* 32 bit	Counter Register  */
+	CPU_TIM		 = 0x0e50,/* 32 bit	Timer Compare Register  */
+	CPU_AHB_ADDR	 = 0x0e54, /* 32 bit	CPU AHB Debug  Register  */
+	CPU_AHB_WDATA	 = 0x0e58, /* 32 bit	CPU AHB Debug  Register  */
+	CPU_AHB_RDATA	 = 0x0e5C, /* 32 bit	CPU AHB Debug  Register  */
+	HCU_MAP_BASE	 = 0x0e60, /* 32 bit	Reset Mapping Base */
+	CPU_AHB_CTRL	 = 0x0e64, /* 32 bit	CPU AHB Debug  Register  */
+	HCU_CCSR	 = 0x0e68, /* 32 bit	CPU Control and Status Register */
+	HCU_HCSR	 = 0x0e6C, /* 32 bit	Host Control and Status Register */
+};
+
 /* ASF Subsystem Registers (Yukon-2 only) */
 /* ASF Subsystem Registers (Yukon-2 only) */
 enum {
 enum {
 	B28_Y2_SMB_CONFIG  = 0x0e40,/* 32 bit	ASF SMBus Config Register */
 	B28_Y2_SMB_CONFIG  = 0x0e40,/* 32 bit	ASF SMBus Config Register */
@@ -837,33 +853,27 @@ enum {
 	GMAC_LINK_CTRL	= 0x0f10,/* 16 bit	Link Control Reg */
 	GMAC_LINK_CTRL	= 0x0f10,/* 16 bit	Link Control Reg */
 
 
 /* Wake-up Frame Pattern Match Control Registers (YUKON only) */
 /* Wake-up Frame Pattern Match Control Registers (YUKON only) */
-
-	WOL_REG_OFFS	= 0x20,/* HW-Bug: Address is + 0x20 against spec. */
-
 	WOL_CTRL_STAT	= 0x0f20,/* 16 bit	WOL Control/Status Reg */
 	WOL_CTRL_STAT	= 0x0f20,/* 16 bit	WOL Control/Status Reg */
 	WOL_MATCH_CTL	= 0x0f22,/*  8 bit	WOL Match Control Reg */
 	WOL_MATCH_CTL	= 0x0f22,/*  8 bit	WOL Match Control Reg */
 	WOL_MATCH_RES	= 0x0f23,/*  8 bit	WOL Match Result Reg */
 	WOL_MATCH_RES	= 0x0f23,/*  8 bit	WOL Match Result Reg */
 	WOL_MAC_ADDR	= 0x0f24,/* 32 bit	WOL MAC Address */
 	WOL_MAC_ADDR	= 0x0f24,/* 32 bit	WOL MAC Address */
-	WOL_PATT_PME	= 0x0f2a,/*  8 bit	WOL PME Match Enable (Yukon-2) */
-	WOL_PATT_ASFM	= 0x0f2b,/*  8 bit	WOL ASF Match Enable (Yukon-2) */
 	WOL_PATT_RPTR	= 0x0f2c,/*  8 bit	WOL Pattern Read Pointer */
 	WOL_PATT_RPTR	= 0x0f2c,/*  8 bit	WOL Pattern Read Pointer */
 
 
 /* WOL Pattern Length Registers (YUKON only) */
 /* WOL Pattern Length Registers (YUKON only) */
-
 	WOL_PATT_LEN_LO	= 0x0f30,/* 32 bit	WOL Pattern Length 3..0 */
 	WOL_PATT_LEN_LO	= 0x0f30,/* 32 bit	WOL Pattern Length 3..0 */
 	WOL_PATT_LEN_HI	= 0x0f34,/* 24 bit	WOL Pattern Length 6..4 */
 	WOL_PATT_LEN_HI	= 0x0f34,/* 24 bit	WOL Pattern Length 6..4 */
 
 
 /* WOL Pattern Counter Registers (YUKON only) */
 /* WOL Pattern Counter Registers (YUKON only) */
-
-
 	WOL_PATT_CNT_0	= 0x0f38,/* 32 bit	WOL Pattern Counter 3..0 */
 	WOL_PATT_CNT_0	= 0x0f38,/* 32 bit	WOL Pattern Counter 3..0 */
 	WOL_PATT_CNT_4	= 0x0f3c,/* 24 bit	WOL Pattern Counter 6..4 */
 	WOL_PATT_CNT_4	= 0x0f3c,/* 24 bit	WOL Pattern Counter 6..4 */
 };
 };
+#define WOL_REGS(port, x)	(x + (port)*0x80)
 
 
 enum {
 enum {
 	WOL_PATT_RAM_1	= 0x1000,/*  WOL Pattern RAM Link 1 */
 	WOL_PATT_RAM_1	= 0x1000,/*  WOL Pattern RAM Link 1 */
 	WOL_PATT_RAM_2	= 0x1400,/*  WOL Pattern RAM Link 2 */
 	WOL_PATT_RAM_2	= 0x1400,/*  WOL Pattern RAM Link 2 */
 };
 };
+#define WOL_PATT_RAM_BASE(port)	(WOL_PATT_RAM_1 + (port)*0x400)
 
 
 enum {
 enum {
 	BASE_GMAC_1	= 0x2800,/* GMAC 1 registers */
 	BASE_GMAC_1	= 0x2800,/* GMAC 1 registers */
@@ -1654,6 +1664,39 @@ enum {
 	Y2_ASF_CLR_ASFI = 1<<1,	/* Clear host IRQ */
 	Y2_ASF_CLR_ASFI = 1<<1,	/* Clear host IRQ */
 	Y2_ASF_HOST_IRQ = 1<<0,	/* Issue an IRQ to HOST system */
 	Y2_ASF_HOST_IRQ = 1<<0,	/* Issue an IRQ to HOST system */
 };
 };
+/*	HCU_CCSR	CPU Control and Status Register */
+enum {
+	HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */
+	HCU_CCSR_CPU_SLEEP	= 1<<26, /* CPU sleep status */
+	/* Clock Stretching Timeout */
+	HCU_CCSR_CS_TO		= 1<<25,
+	HCU_CCSR_WDOG		= 1<<24, /* Watchdog Reset */
+
+	HCU_CCSR_CLR_IRQ_HOST	= 1<<17, /* Clear IRQ_HOST */
+	HCU_CCSR_SET_IRQ_HCU	= 1<<16, /* Set IRQ_HCU */
+
+	HCU_CCSR_AHB_RST	= 1<<9, /* Reset AHB bridge */
+	HCU_CCSR_CPU_RST_MODE	= 1<<8, /* CPU Reset Mode */
+
+	HCU_CCSR_SET_SYNC_CPU	= 1<<5,
+	HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */
+	HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3,
+	HCU_CCSR_OS_PRSNT	= 1<<2, /* ASF OS Present */
+/* Microcontroller State */
+	HCU_CCSR_UC_STATE_MSK	= 3,
+	HCU_CCSR_UC_STATE_BASE	= 1<<0,
+	HCU_CCSR_ASF_RESET	= 0,
+	HCU_CCSR_ASF_HALTED	= 1<<1,
+	HCU_CCSR_ASF_RUNNING	= 1<<0,
+};
+
+/*	HCU_HCSR	Host Control and Status Register */
+enum {
+	HCU_HCSR_SET_IRQ_CPU	= 1<<16, /* Set IRQ_CPU */
+
+	HCU_HCSR_CLR_IRQ_HCU	= 1<<1, /* Clear IRQ_HCU */
+	HCU_HCSR_SET_IRQ_HOST	= 1<<0,	/* Set IRQ_HOST */
+};
 
 
 /*	STAT_CTRL		32 bit	Status BMU control register (Yukon-2 only) */
 /*	STAT_CTRL		32 bit	Status BMU control register (Yukon-2 only) */
 enum {
 enum {
@@ -1715,14 +1758,17 @@ enum {
 	GM_IS_RX_COMPL	= 1<<0,	/* Frame Reception Complete */
 	GM_IS_RX_COMPL	= 1<<0,	/* Frame Reception Complete */
 
 
 #define GMAC_DEF_MSK     GM_IS_TX_FF_UR
 #define GMAC_DEF_MSK     GM_IS_TX_FF_UR
+};
 
 
 /*	GMAC_LINK_CTRL	16 bit	GMAC Link Control Reg (YUKON only) */
 /*	GMAC_LINK_CTRL	16 bit	GMAC Link Control Reg (YUKON only) */
-						/* Bits 15.. 2:	reserved */
+enum {						/* Bits 15.. 2:	reserved */
 	GMLC_RST_CLR	= 1<<1,	/* Clear GMAC Link Reset */
 	GMLC_RST_CLR	= 1<<1,	/* Clear GMAC Link Reset */
 	GMLC_RST_SET	= 1<<0,	/* Set   GMAC Link Reset */
 	GMLC_RST_SET	= 1<<0,	/* Set   GMAC Link Reset */
+};
 
 
 
 
 /*	WOL_CTRL_STAT	16 bit	WOL Control/Status Reg */
 /*	WOL_CTRL_STAT	16 bit	WOL Control/Status Reg */
+enum {
 	WOL_CTL_LINK_CHG_OCC		= 1<<15,
 	WOL_CTL_LINK_CHG_OCC		= 1<<15,
 	WOL_CTL_MAGIC_PKT_OCC		= 1<<14,
 	WOL_CTL_MAGIC_PKT_OCC		= 1<<14,
 	WOL_CTL_PATTERN_OCC		= 1<<13,
 	WOL_CTL_PATTERN_OCC		= 1<<13,
@@ -1741,17 +1787,6 @@ enum {
 	WOL_CTL_DIS_PATTERN_UNIT	= 1<<0,
 	WOL_CTL_DIS_PATTERN_UNIT	= 1<<0,
 };
 };
 
 
-#define WOL_CTL_DEFAULT				\
-	(WOL_CTL_DIS_PME_ON_LINK_CHG |	\
-	WOL_CTL_DIS_PME_ON_PATTERN |	\
-	WOL_CTL_DIS_PME_ON_MAGIC_PKT |	\
-	WOL_CTL_DIS_LINK_CHG_UNIT |		\
-	WOL_CTL_DIS_PATTERN_UNIT |		\
-	WOL_CTL_DIS_MAGIC_PKT_UNIT)
-
-/*	WOL_MATCH_CTL	 8 bit	WOL Match Control Reg */
-#define WOL_CTL_PATT_ENA(x)	(1 << (x))
-
 
 
 /* Control flags */
 /* Control flags */
 enum {
 enum {
@@ -1875,6 +1910,7 @@ struct sky2_port {
 	u8		     autoneg;	/* AUTONEG_ENABLE, AUTONEG_DISABLE */
 	u8		     autoneg;	/* AUTONEG_ENABLE, AUTONEG_DISABLE */
 	u8		     duplex;	/* DUPLEX_HALF, DUPLEX_FULL */
 	u8		     duplex;	/* DUPLEX_HALF, DUPLEX_FULL */
 	u8		     rx_csum;
 	u8		     rx_csum;
+	u8		     wol;
  	enum flow_control    flow_mode;
  	enum flow_control    flow_mode;
  	enum flow_control    flow_status;
  	enum flow_control    flow_status;
 
 
@@ -1887,7 +1923,6 @@ struct sky2_hw {
 	struct pci_dev	     *pdev;
 	struct pci_dev	     *pdev;
 	struct net_device    *dev[2];
 	struct net_device    *dev[2];
 
 
-	int		     pm_cap;
 	u8	     	     chip_id;
 	u8	     	     chip_id;
 	u8		     chip_rev;
 	u8		     chip_rev;
 	u8		     pmd_type;
 	u8		     pmd_type;

+ 150 - 163
drivers/net/spider_net.c

@@ -280,72 +280,67 @@ spider_net_free_chain(struct spider_net_card *card,
 {
 {
 	struct spider_net_descr *descr;
 	struct spider_net_descr *descr;
 
 
-	for (descr = chain->tail; !descr->bus_addr; descr = descr->next) {
-		pci_unmap_single(card->pdev, descr->bus_addr,
-				 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
+	descr = chain->ring;
+	do {
 		descr->bus_addr = 0;
 		descr->bus_addr = 0;
-	}
+		descr->next_descr_addr = 0;
+		descr = descr->next;
+	} while (descr != chain->ring);
+
+	dma_free_coherent(&card->pdev->dev, chain->num_desc,
+	    chain->ring, chain->dma_addr);
 }
 }
 
 
 /**
 /**
- * spider_net_init_chain - links descriptor chain
+ * spider_net_init_chain - alloc and link descriptor chain
  * @card: card structure
  * @card: card structure
  * @chain: address of chain
  * @chain: address of chain
- * @start_descr: address of descriptor array
- * @no: number of descriptors
  *
  *
- * we manage a circular list that mirrors the hardware structure,
+ * We manage a circular list that mirrors the hardware structure,
  * except that the hardware uses bus addresses.
  * except that the hardware uses bus addresses.
  *
  *
- * returns 0 on success, <0 on failure
+ * Returns 0 on success, <0 on failure
  */
  */
 static int
 static int
 spider_net_init_chain(struct spider_net_card *card,
 spider_net_init_chain(struct spider_net_card *card,
-		       struct spider_net_descr_chain *chain,
-		       struct spider_net_descr *start_descr,
-		       int no)
+		       struct spider_net_descr_chain *chain)
 {
 {
 	int i;
 	int i;
 	struct spider_net_descr *descr;
 	struct spider_net_descr *descr;
 	dma_addr_t buf;
 	dma_addr_t buf;
+	size_t alloc_size;
 
 
-	descr = start_descr;
-	memset(descr, 0, sizeof(*descr) * no);
+	alloc_size = chain->num_desc * sizeof (struct spider_net_descr);
 
 
-	/* set up the hardware pointers in each descriptor */
-	for (i=0; i<no; i++, descr++) {
-		descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+	chain->ring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
+		&chain->dma_addr, GFP_KERNEL);
+
+	if (!chain->ring)
+		return -ENOMEM;
 
 
-		buf = pci_map_single(card->pdev, descr,
-				     SPIDER_NET_DESCR_SIZE,
-				     PCI_DMA_BIDIRECTIONAL);
+	descr = chain->ring;
+	memset(descr, 0, alloc_size);
 
 
-		if (pci_dma_mapping_error(buf))
-			goto iommu_error;
+	/* Set up the hardware pointers in each descriptor */
+	buf = chain->dma_addr;
+	for (i=0; i < chain->num_desc; i++, descr++) {
+		descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
 
 
 		descr->bus_addr = buf;
 		descr->bus_addr = buf;
+		descr->next_descr_addr = 0;
 		descr->next = descr + 1;
 		descr->next = descr + 1;
 		descr->prev = descr - 1;
 		descr->prev = descr - 1;
 
 
+		buf += sizeof(struct spider_net_descr);
 	}
 	}
 	/* do actual circular list */
 	/* do actual circular list */
-	(descr-1)->next = start_descr;
-	start_descr->prev = descr-1;
+	(descr-1)->next = chain->ring;
+	chain->ring->prev = descr-1;
 
 
 	spin_lock_init(&chain->lock);
 	spin_lock_init(&chain->lock);
-	chain->head = start_descr;
-	chain->tail = start_descr;
-
+	chain->head = chain->ring;
+	chain->tail = chain->ring;
 	return 0;
 	return 0;
-
-iommu_error:
-	descr = start_descr;
-	for (i=0; i < no; i++, descr++)
-		if (descr->bus_addr)
-			pci_unmap_single(card->pdev, descr->bus_addr,
-					 SPIDER_NET_DESCR_SIZE,
-					 PCI_DMA_BIDIRECTIONAL);
-	return -ENOMEM;
 }
 }
 
 
 /**
 /**
@@ -372,21 +367,20 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
 }
 }
 
 
 /**
 /**
- * spider_net_prepare_rx_descr - reinitializes a rx descriptor
+ * spider_net_prepare_rx_descr - Reinitialize RX descriptor
  * @card: card structure
  * @card: card structure
  * @descr: descriptor to re-init
  * @descr: descriptor to re-init
  *
  *
- * return 0 on succes, <0 on failure
+ * Return 0 on succes, <0 on failure.
  *
  *
- * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
- * Activate the descriptor state-wise
+ * Allocates a new rx skb, iommu-maps it and attaches it to the
+ * descriptor. Mark the descriptor as activated, ready-to-use.
  */
  */
 static int
 static int
 spider_net_prepare_rx_descr(struct spider_net_card *card,
 spider_net_prepare_rx_descr(struct spider_net_card *card,
 			    struct spider_net_descr *descr)
 			    struct spider_net_descr *descr)
 {
 {
 	dma_addr_t buf;
 	dma_addr_t buf;
-	int error = 0;
 	int offset;
 	int offset;
 	int bufsize;
 	int bufsize;
 
 
@@ -414,7 +408,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
 		(SPIDER_NET_RXBUF_ALIGN - 1);
 		(SPIDER_NET_RXBUF_ALIGN - 1);
 	if (offset)
 	if (offset)
 		skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
 		skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
-	/* io-mmu-map the skb */
+	/* iommu-map the skb */
 	buf = pci_map_single(card->pdev, descr->skb->data,
 	buf = pci_map_single(card->pdev, descr->skb->data,
 			SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
 			SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
 	descr->buf_addr = buf;
 	descr->buf_addr = buf;
@@ -425,11 +419,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
 		card->spider_stats.rx_iommu_map_error++;
 		card->spider_stats.rx_iommu_map_error++;
 		descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
 		descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
 	} else {
 	} else {
+		descr->next_descr_addr = 0;
+		wmb();
 		descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
 		descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
 					 SPIDER_NET_DMAC_NOINTR_COMPLETE;
 					 SPIDER_NET_DMAC_NOINTR_COMPLETE;
+
+		wmb();
+		descr->prev->next_descr_addr = descr->bus_addr;
 	}
 	}
 
 
-	return error;
+	return 0;
 }
 }
 
 
 /**
 /**
@@ -493,10 +492,10 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
 }
 }
 
 
 /**
 /**
- * spider_net_alloc_rx_skbs - allocates rx skbs in rx descriptor chains
+ * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
  * @card: card structure
  * @card: card structure
  *
  *
- * returns 0 on success, <0 on failure
+ * Returns 0 on success, <0 on failure.
  */
  */
 static int
 static int
 spider_net_alloc_rx_skbs(struct spider_net_card *card)
 spider_net_alloc_rx_skbs(struct spider_net_card *card)
@@ -507,16 +506,16 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
 	result = -ENOMEM;
 	result = -ENOMEM;
 
 
 	chain = &card->rx_chain;
 	chain = &card->rx_chain;
-	/* put at least one buffer into the chain. if this fails,
-	 * we've got a problem. if not, spider_net_refill_rx_chain
-	 * will do the rest at the end of this function */
+	/* Put at least one buffer into the chain. if this fails,
+	 * we've got a problem. If not, spider_net_refill_rx_chain
+	 * will do the rest at the end of this function. */
 	if (spider_net_prepare_rx_descr(card, chain->head))
 	if (spider_net_prepare_rx_descr(card, chain->head))
 		goto error;
 		goto error;
 	else
 	else
 		chain->head = chain->head->next;
 		chain->head = chain->head->next;
 
 
-	/* this will allocate the rest of the rx buffers; if not, it's
-	 * business as usual later on */
+	/* This will allocate the rest of the rx buffers;
+	 * if not, it's business as usual later on. */
 	spider_net_refill_rx_chain(card);
 	spider_net_refill_rx_chain(card);
 	spider_net_enable_rxdmac(card);
 	spider_net_enable_rxdmac(card);
 	return 0;
 	return 0;
@@ -707,7 +706,7 @@ spider_net_set_low_watermark(struct spider_net_card *card)
 	}
 	}
 
 
 	/* If TX queue is short, don't even bother with interrupts */
 	/* If TX queue is short, don't even bother with interrupts */
-	if (cnt < card->num_tx_desc/4)
+	if (cnt < card->tx_chain.num_desc/4)
 		return cnt;
 		return cnt;
 
 
 	/* Set low-watermark 3/4th's of the way into the queue. */
 	/* Set low-watermark 3/4th's of the way into the queue. */
@@ -915,16 +914,13 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
  * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
  * @descr: descriptor to process
  * @descr: descriptor to process
  * @card: card structure
  * @card: card structure
- * @napi: whether caller is in NAPI context
- *
- * returns 1 on success, 0 if no packet was passed to the stack
  *
  *
- * iommu-unmaps the skb, fills out skb structure and passes the data to the
- * stack. The descriptor state is not changed.
+ * Fills out skb structure and passes the data to the stack.
+ * The descriptor state is not changed.
  */
  */
-static int
+static void
 spider_net_pass_skb_up(struct spider_net_descr *descr,
 spider_net_pass_skb_up(struct spider_net_descr *descr,
-		       struct spider_net_card *card, int napi)
+		       struct spider_net_card *card)
 {
 {
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 	struct net_device *netdev;
 	struct net_device *netdev;
@@ -932,23 +928,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
 
 
 	data_status = descr->data_status;
 	data_status = descr->data_status;
 	data_error = descr->data_error;
 	data_error = descr->data_error;
-
 	netdev = card->netdev;
 	netdev = card->netdev;
 
 
-	/* unmap descriptor */
-	pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
-			PCI_DMA_FROMDEVICE);
-
-	/* the cases we'll throw away the packet immediately */
-	if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
-		if (netif_msg_rx_err(card))
-			pr_err("error in received descriptor found, "
-			       "data_status=x%08x, data_error=x%08x\n",
-			       data_status, data_error);
-		card->spider_stats.rx_desc_error++;
-		return 0;
-	}
-
 	skb = descr->skb;
 	skb = descr->skb;
 	skb->dev = netdev;
 	skb->dev = netdev;
 	skb_put(skb, descr->valid_size);
 	skb_put(skb, descr->valid_size);
@@ -977,57 +958,72 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
 	}
 	}
 
 
 	/* pass skb up to stack */
 	/* pass skb up to stack */
-	if (napi)
-		netif_receive_skb(skb);
-	else
-		netif_rx_ni(skb);
+	netif_receive_skb(skb);
 
 
 	/* update netdevice statistics */
 	/* update netdevice statistics */
 	card->netdev_stats.rx_packets++;
 	card->netdev_stats.rx_packets++;
 	card->netdev_stats.rx_bytes += skb->len;
 	card->netdev_stats.rx_bytes += skb->len;
+}
 
 
-	return 1;
+#ifdef DEBUG
+static void show_rx_chain(struct spider_net_card *card)
+{
+	struct spider_net_descr_chain *chain = &card->rx_chain;
+	struct spider_net_descr *start= chain->tail;
+	struct spider_net_descr *descr= start;
+	int status;
+
+	int cnt = 0;
+	int cstat = spider_net_get_descr_status(descr);
+	printk(KERN_INFO "RX chain tail at descr=%ld\n",
+	     (start - card->descr) - card->tx_chain.num_desc);
+	status = cstat;
+	do
+	{
+		status = spider_net_get_descr_status(descr);
+		if (cstat != status) {
+			printk(KERN_INFO "Have %d descrs with stat=x%08x\n", cnt, cstat);
+			cstat = status;
+			cnt = 0;
+		}
+		cnt ++;
+		descr = descr->next;
+	} while (descr != start);
+	printk(KERN_INFO "Last %d descrs with stat=x%08x\n", cnt, cstat);
 }
 }
+#endif
 
 
 /**
 /**
  * spider_net_decode_one_descr - processes an rx descriptor
  * spider_net_decode_one_descr - processes an rx descriptor
  * @card: card structure
  * @card: card structure
- * @napi: whether caller is in NAPI context
  *
  *
- * returns 1 if a packet has been sent to the stack, otherwise 0
+ * Returns 1 if a packet has been sent to the stack, otherwise 0
  *
  *
- * processes an rx descriptor by iommu-unmapping the data buffer and passing
+ * Processes an rx descriptor by iommu-unmapping the data buffer and passing
  * the packet up to the stack. This function is called in softirq
  * the packet up to the stack. This function is called in softirq
  * context, e.g. either bottom half from interrupt or NAPI polling context
  * context, e.g. either bottom half from interrupt or NAPI polling context
  */
  */
 static int
 static int
-spider_net_decode_one_descr(struct spider_net_card *card, int napi)
+spider_net_decode_one_descr(struct spider_net_card *card)
 {
 {
 	struct spider_net_descr_chain *chain = &card->rx_chain;
 	struct spider_net_descr_chain *chain = &card->rx_chain;
 	struct spider_net_descr *descr = chain->tail;
 	struct spider_net_descr *descr = chain->tail;
 	int status;
 	int status;
-	int result;
 
 
 	status = spider_net_get_descr_status(descr);
 	status = spider_net_get_descr_status(descr);
 
 
-	if (status == SPIDER_NET_DESCR_CARDOWNED) {
-		/* nothing in the descriptor yet */
-		result=0;
-		goto out;
-	}
-
-	if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
-		/* not initialized yet, the ring must be empty */
-		spider_net_refill_rx_chain(card);
-		spider_net_enable_rxdmac(card);
-		result=0;
-		goto out;
-	}
+	/* Nothing in the descriptor, or ring must be empty */
+	if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
+	    (status == SPIDER_NET_DESCR_NOT_IN_USE))
+		return 0;
 
 
 	/* descriptor definitively used -- move on tail */
 	/* descriptor definitively used -- move on tail */
 	chain->tail = descr->next;
 	chain->tail = descr->next;
 
 
-	result = 0;
+	/* unmap descriptor */
+	pci_unmap_single(card->pdev, descr->buf_addr,
+			SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
+
 	if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
 	if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
 	     (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
 	     (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
 	     (status == SPIDER_NET_DESCR_FORCE_END) ) {
 	     (status == SPIDER_NET_DESCR_FORCE_END) ) {
@@ -1035,31 +1031,55 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
 			pr_err("%s: dropping RX descriptor with state %d\n",
 			pr_err("%s: dropping RX descriptor with state %d\n",
 			       card->netdev->name, status);
 			       card->netdev->name, status);
 		card->netdev_stats.rx_dropped++;
 		card->netdev_stats.rx_dropped++;
-		pci_unmap_single(card->pdev, descr->buf_addr,
-				SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
-		dev_kfree_skb_irq(descr->skb);
-		goto refill;
+		goto bad_desc;
 	}
 	}
 
 
 	if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
 	if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
 	     (status != SPIDER_NET_DESCR_FRAME_END) ) {
 	     (status != SPIDER_NET_DESCR_FRAME_END) ) {
-		if (netif_msg_rx_err(card)) {
-			pr_err("%s: RX descriptor with state %d\n",
+		if (netif_msg_rx_err(card))
+			pr_err("%s: RX descriptor with unkown state %d\n",
 			       card->netdev->name, status);
 			       card->netdev->name, status);
-			card->spider_stats.rx_desc_unk_state++;
-		}
-		goto refill;
+		card->spider_stats.rx_desc_unk_state++;
+		goto bad_desc;
 	}
 	}
 
 
-	/* ok, we've got a packet in descr */
-	result = spider_net_pass_skb_up(descr, card, napi);
-refill:
+	/* The cases we'll throw away the packet immediately */
+	if (descr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
+		if (netif_msg_rx_err(card))
+			pr_err("%s: error in received descriptor found, "
+			       "data_status=x%08x, data_error=x%08x\n",
+			       card->netdev->name,
+			       descr->data_status, descr->data_error);
+		goto bad_desc;
+	}
+
+	if (descr->dmac_cmd_status & 0xfefe) {
+		pr_err("%s: bad status, cmd_status=x%08x\n",
+			       card->netdev->name,
+			       descr->dmac_cmd_status);
+		pr_err("buf_addr=x%08x\n", descr->buf_addr);
+		pr_err("buf_size=x%08x\n", descr->buf_size);
+		pr_err("next_descr_addr=x%08x\n", descr->next_descr_addr);
+		pr_err("result_size=x%08x\n", descr->result_size);
+		pr_err("valid_size=x%08x\n", descr->valid_size);
+		pr_err("data_status=x%08x\n", descr->data_status);
+		pr_err("data_error=x%08x\n", descr->data_error);
+		pr_err("bus_addr=x%08x\n", descr->bus_addr);
+		pr_err("which=%ld\n", descr - card->rx_chain.ring);
+
+		card->spider_stats.rx_desc_error++;
+		goto bad_desc;
+	}
+
+	/* Ok, we've got a packet in descr */
+	spider_net_pass_skb_up(descr, card);
 	descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
 	descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
-	/* change the descriptor state: */
-	if (!napi)
-		spider_net_refill_rx_chain(card);
-out:
-	return result;
+	return 1;
+
+bad_desc:
+	dev_kfree_skb_irq(descr->skb);
+	descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+	return 0;
 }
 }
 
 
 /**
 /**
@@ -1085,7 +1105,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
 	packets_to_do = min(*budget, netdev->quota);
 	packets_to_do = min(*budget, netdev->quota);
 
 
 	while (packets_to_do) {
 	while (packets_to_do) {
-		if (spider_net_decode_one_descr(card, 1)) {
+		if (spider_net_decode_one_descr(card)) {
 			packets_done++;
 			packets_done++;
 			packets_to_do--;
 			packets_to_do--;
 		} else {
 		} else {
@@ -1098,6 +1118,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
 	netdev->quota -= packets_done;
 	netdev->quota -= packets_done;
 	*budget -= packets_done;
 	*budget -= packets_done;
 	spider_net_refill_rx_chain(card);
 	spider_net_refill_rx_chain(card);
+	spider_net_enable_rxdmac(card);
 
 
 	/* if all packets are in the stack, enable interrupts and return 0 */
 	/* if all packets are in the stack, enable interrupts and return 0 */
 	/* if not, return 1 */
 	/* if not, return 1 */
@@ -1226,24 +1247,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
 	return 0;
 	return 0;
 }
 }
 
 
-/**
- * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
- * @card: card structure
- *
- * spider_net_handle_rxram_full empties the RX ring so that spider can put
- * more packets in it and empty its RX RAM. This is called in bottom half
- * context
- */
-static void
-spider_net_handle_rxram_full(struct spider_net_card *card)
-{
-	while (spider_net_decode_one_descr(card, 0))
-		;
-	spider_net_enable_rxchtails(card);
-	spider_net_enable_rxdmac(card);
-	netif_rx_schedule(card->netdev);
-}
-
 /**
 /**
  * spider_net_handle_error_irq - handles errors raised by an interrupt
  * spider_net_handle_error_irq - handles errors raised by an interrupt
  * @card: card structure
  * @card: card structure
@@ -1366,10 +1369,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
 	case SPIDER_NET_GRFAFLLINT: /* fallthrough */
 	case SPIDER_NET_GRFAFLLINT: /* fallthrough */
 	case SPIDER_NET_GRMFLLINT:
 	case SPIDER_NET_GRMFLLINT:
 		if (netif_msg_intr(card) && net_ratelimit())
 		if (netif_msg_intr(card) && net_ratelimit())
-			pr_debug("Spider RX RAM full, incoming packets "
+			pr_err("Spider RX RAM full, incoming packets "
 			       "might be discarded!\n");
 			       "might be discarded!\n");
 		spider_net_rx_irq_off(card);
 		spider_net_rx_irq_off(card);
-		tasklet_schedule(&card->rxram_full_tl);
+		netif_rx_schedule(card->netdev);
 		show_error = 0;
 		show_error = 0;
 		break;
 		break;
 
 
@@ -1384,7 +1387,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
 	case SPIDER_NET_GDCDCEINT: /* fallthrough */
 	case SPIDER_NET_GDCDCEINT: /* fallthrough */
 	case SPIDER_NET_GDBDCEINT: /* fallthrough */
 	case SPIDER_NET_GDBDCEINT: /* fallthrough */
 	case SPIDER_NET_GDADCEINT:
 	case SPIDER_NET_GDADCEINT:
-		if (netif_msg_intr(card))
+		if (netif_msg_intr(card) && net_ratelimit())
 			pr_err("got descriptor chain end interrupt, "
 			pr_err("got descriptor chain end interrupt, "
 			       "restarting DMAC %c.\n",
 			       "restarting DMAC %c.\n",
 			       'D'-(i-SPIDER_NET_GDDDCEINT)/3);
 			       'D'-(i-SPIDER_NET_GDDDCEINT)/3);
@@ -1455,7 +1458,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
 			break;
 			break;
 	}
 	}
 
 
-	if ((show_error) && (netif_msg_intr(card)))
+	if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
 		pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
 		pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
 		       "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
 		       "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
 		       card->netdev->name,
 		       card->netdev->name,
@@ -1651,27 +1654,18 @@ int
 spider_net_open(struct net_device *netdev)
 spider_net_open(struct net_device *netdev)
 {
 {
 	struct spider_net_card *card = netdev_priv(netdev);
 	struct spider_net_card *card = netdev_priv(netdev);
-	struct spider_net_descr *descr;
-	int i, result;
+	int result;
 
 
-	result = -ENOMEM;
-	if (spider_net_init_chain(card, &card->tx_chain, card->descr,
-	                          card->num_tx_desc))
+	result = spider_net_init_chain(card, &card->tx_chain);
+	if (result)
 		goto alloc_tx_failed;
 		goto alloc_tx_failed;
-
 	card->low_watermark = NULL;
 	card->low_watermark = NULL;
 
 
-	/* rx_chain is after tx_chain, so offset is descr + tx_count */
-	if (spider_net_init_chain(card, &card->rx_chain,
-	                          card->descr + card->num_tx_desc,
-	                          card->num_rx_desc))
+	result = spider_net_init_chain(card, &card->rx_chain);
+	if (result)
 		goto alloc_rx_failed;
 		goto alloc_rx_failed;
 
 
-	descr = card->rx_chain.head;
-	for (i=0; i < card->num_rx_desc; i++, descr++)
-		descr->next_descr_addr = descr->next->bus_addr;
-
-	/* allocate rx skbs */
+	/* Allocate rx skbs */
 	if (spider_net_alloc_rx_skbs(card))
 	if (spider_net_alloc_rx_skbs(card))
 		goto alloc_skbs_failed;
 		goto alloc_skbs_failed;
 
 
@@ -1902,7 +1896,6 @@ spider_net_stop(struct net_device *netdev)
 {
 {
 	struct spider_net_card *card = netdev_priv(netdev);
 	struct spider_net_card *card = netdev_priv(netdev);
 
 
-	tasklet_kill(&card->rxram_full_tl);
 	netif_poll_disable(netdev);
 	netif_poll_disable(netdev);
 	netif_carrier_off(netdev);
 	netif_carrier_off(netdev);
 	netif_stop_queue(netdev);
 	netif_stop_queue(netdev);
@@ -1924,6 +1917,7 @@ spider_net_stop(struct net_device *netdev)
 
 
 	/* release chains */
 	/* release chains */
 	spider_net_release_tx_chain(card, 1);
 	spider_net_release_tx_chain(card, 1);
+	spider_net_free_rx_chain_contents(card);
 
 
 	spider_net_free_rx_chain_contents(card);
 	spider_net_free_rx_chain_contents(card);
 
 
@@ -2046,9 +2040,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
 
 
 	pci_set_drvdata(card->pdev, netdev);
 	pci_set_drvdata(card->pdev, netdev);
 
 
-	card->rxram_full_tl.data = (unsigned long) card;
-	card->rxram_full_tl.func =
-		(void (*)(unsigned long)) spider_net_handle_rxram_full;
 	init_timer(&card->tx_timer);
 	init_timer(&card->tx_timer);
 	card->tx_timer.function =
 	card->tx_timer.function =
 		(void (*)(unsigned long)) spider_net_cleanup_tx_ring;
 		(void (*)(unsigned long)) spider_net_cleanup_tx_ring;
@@ -2057,8 +2048,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
 
 
 	card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
 	card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
 
 
-	card->num_tx_desc = tx_descriptors;
-	card->num_rx_desc = rx_descriptors;
+	card->tx_chain.num_desc = tx_descriptors;
+	card->rx_chain.num_desc = rx_descriptors;
 
 
 	spider_net_setup_netdev_ops(netdev);
 	spider_net_setup_netdev_ops(netdev);
 
 
@@ -2107,12 +2098,8 @@ spider_net_alloc_card(void)
 {
 {
 	struct net_device *netdev;
 	struct net_device *netdev;
 	struct spider_net_card *card;
 	struct spider_net_card *card;
-	size_t alloc_size;
 
 
-	alloc_size = sizeof (*card) +
-		sizeof (struct spider_net_descr) * rx_descriptors +
-		sizeof (struct spider_net_descr) * tx_descriptors;
-	netdev = alloc_etherdev(alloc_size);
+	netdev = alloc_etherdev(sizeof(struct spider_net_card));
 	if (!netdev)
 	if (!netdev)
 		return NULL;
 		return NULL;
 
 

+ 6 - 14
drivers/net/spider_net.h

@@ -24,7 +24,7 @@
 #ifndef _SPIDER_NET_H
 #ifndef _SPIDER_NET_H
 #define _SPIDER_NET_H
 #define _SPIDER_NET_H
 
 
-#define VERSION "1.6 A"
+#define VERSION "1.6 B"
 
 
 #include "sungem_phy.h"
 #include "sungem_phy.h"
 
 
@@ -378,6 +378,9 @@ struct spider_net_descr_chain {
 	spinlock_t lock;
 	spinlock_t lock;
 	struct spider_net_descr *head;
 	struct spider_net_descr *head;
 	struct spider_net_descr *tail;
 	struct spider_net_descr *tail;
+	struct spider_net_descr *ring;
+	int num_desc;
+	dma_addr_t dma_addr;
 };
 };
 
 
 /* descriptor data_status bits */
 /* descriptor data_status bits */
@@ -397,8 +400,6 @@ struct spider_net_descr_chain {
  * 701b8000 would be correct, but every packets gets that flag */
  * 701b8000 would be correct, but every packets gets that flag */
 #define SPIDER_NET_DESTROY_RX_FLAGS	0x700b8000
 #define SPIDER_NET_DESTROY_RX_FLAGS	0x700b8000
 
 
-#define SPIDER_NET_DESCR_SIZE		32
-
 /* this will be bigger some time */
 /* this will be bigger some time */
 struct spider_net_options {
 struct spider_net_options {
 	int rx_csum; /* for rx: if 0 ip_summed=NONE,
 	int rx_csum; /* for rx: if 0 ip_summed=NONE,
@@ -441,25 +442,16 @@ struct spider_net_card {
 	struct spider_net_descr_chain rx_chain;
 	struct spider_net_descr_chain rx_chain;
 	struct spider_net_descr *low_watermark;
 	struct spider_net_descr *low_watermark;
 
 
-	struct net_device_stats netdev_stats;
-
-	struct spider_net_options options;
-
-	spinlock_t intmask_lock;
-	struct tasklet_struct rxram_full_tl;
 	struct timer_list tx_timer;
 	struct timer_list tx_timer;
-
 	struct work_struct tx_timeout_task;
 	struct work_struct tx_timeout_task;
 	atomic_t tx_timeout_task_counter;
 	atomic_t tx_timeout_task_counter;
 	wait_queue_head_t waitq;
 	wait_queue_head_t waitq;
 
 
 	/* for ethtool */
 	/* for ethtool */
 	int msg_enable;
 	int msg_enable;
-	int num_rx_desc;
-	int num_tx_desc;
+	struct net_device_stats netdev_stats;
 	struct spider_net_extra_stats spider_stats;
 	struct spider_net_extra_stats spider_stats;
-
-	struct spider_net_descr descr[0];
+	struct spider_net_options options;
 };
 };
 
 
 #define pr_err(fmt,arg...) \
 #define pr_err(fmt,arg...) \

+ 2 - 2
drivers/net/spider_net_ethtool.c

@@ -158,9 +158,9 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev,
 	struct spider_net_card *card = netdev->priv;
 	struct spider_net_card *card = netdev->priv;
 
 
 	ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
 	ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
-	ering->tx_pending = card->num_tx_desc;
+	ering->tx_pending = card->tx_chain.num_desc;
 	ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
 	ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
-	ering->rx_pending = card->num_rx_desc;
+	ering->rx_pending = card->rx_chain.num_desc;
 }
 }
 
 
 static int spider_net_get_stats_count(struct net_device *netdev)
 static int spider_net_get_stats_count(struct net_device *netdev)

+ 0 - 32
drivers/net/tg3.c

@@ -58,11 +58,7 @@
 #define TG3_VLAN_TAG_USED 0
 #define TG3_VLAN_TAG_USED 0
 #endif
 #endif
 
 
-#ifdef NETIF_F_TSO
 #define TG3_TSO_SUPPORT	1
 #define TG3_TSO_SUPPORT	1
-#else
-#define TG3_TSO_SUPPORT	0
-#endif
 
 
 #include "tg3.h"
 #include "tg3.h"
 
 
@@ -3873,7 +3869,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 
 	entry = tp->tx_prod;
 	entry = tp->tx_prod;
 	base_flags = 0;
 	base_flags = 0;
-#if TG3_TSO_SUPPORT != 0
 	mss = 0;
 	mss = 0;
 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
 	    (mss = skb_shinfo(skb)->gso_size) != 0) {
 	    (mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -3906,11 +3901,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	}
 	}
 	else if (skb->ip_summed == CHECKSUM_PARTIAL)
 	else if (skb->ip_summed == CHECKSUM_PARTIAL)
 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
-#else
-	mss = 0;
-	if (skb->ip_summed == CHECKSUM_PARTIAL)
-		base_flags |= TXD_FLAG_TCPUDP_CSUM;
-#endif
 #if TG3_VLAN_TAG_USED
 #if TG3_VLAN_TAG_USED
 	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
 	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
 		base_flags |= (TXD_FLAG_VLAN |
 		base_flags |= (TXD_FLAG_VLAN |
@@ -3970,7 +3960,6 @@ out_unlock:
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
 }
 }
 
 
-#if TG3_TSO_SUPPORT != 0
 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
 
 
 /* Use GSO to workaround a rare TSO bug that may be triggered when the
 /* Use GSO to workaround a rare TSO bug that may be triggered when the
@@ -4002,7 +3991,6 @@ tg3_tso_bug_end:
 
 
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
 }
 }
-#endif
 
 
 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
@@ -4036,7 +4024,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
 	base_flags = 0;
 	base_flags = 0;
 	if (skb->ip_summed == CHECKSUM_PARTIAL)
 	if (skb->ip_summed == CHECKSUM_PARTIAL)
 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
-#if TG3_TSO_SUPPORT != 0
 	mss = 0;
 	mss = 0;
 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
 	    (mss = skb_shinfo(skb)->gso_size) != 0) {
 	    (mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -4091,9 +4078,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
 			}
 			}
 		}
 		}
 	}
 	}
-#else
-	mss = 0;
-#endif
 #if TG3_VLAN_TAG_USED
 #if TG3_VLAN_TAG_USED
 	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
 	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
 		base_flags |= (TXD_FLAG_VLAN |
 		base_flags |= (TXD_FLAG_VLAN |
@@ -5329,7 +5313,6 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
 	return 0;
 	return 0;
 }
 }
 
 
-#if TG3_TSO_SUPPORT != 0
 
 
 #define TG3_TSO_FW_RELEASE_MAJOR	0x1
 #define TG3_TSO_FW_RELEASE_MAJOR	0x1
 #define TG3_TSO_FW_RELASE_MINOR		0x6
 #define TG3_TSO_FW_RELASE_MINOR		0x6
@@ -5906,7 +5889,6 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
 	return 0;
 	return 0;
 }
 }
 
 
-#endif /* TG3_TSO_SUPPORT != 0 */
 
 
 /* tp->lock is held. */
 /* tp->lock is held. */
 static void __tg3_set_mac_addr(struct tg3 *tp)
 static void __tg3_set_mac_addr(struct tg3 *tp)
@@ -6120,7 +6102,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
 	}
 	}
-#if TG3_TSO_SUPPORT != 0
 	else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
 	else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
 		int fw_len;
 		int fw_len;
 
 
@@ -6135,7 +6116,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 		tw32(BUFMGR_MB_POOL_SIZE,
 		tw32(BUFMGR_MB_POOL_SIZE,
 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
 	}
 	}
-#endif
 
 
 	if (tp->dev->mtu <= ETH_DATA_LEN) {
 	if (tp->dev->mtu <= ETH_DATA_LEN) {
 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
@@ -6337,10 +6317,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 
 
-#if TG3_TSO_SUPPORT != 0
 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 		rdmac_mode |= (1 << 27);
 		rdmac_mode |= (1 << 27);
-#endif
 
 
 	/* Receive/send statistics. */
 	/* Receive/send statistics. */
 	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
 	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
@@ -6511,10 +6489,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
 	tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
 	tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
-#if TG3_TSO_SUPPORT != 0
 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
-#endif
 	tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
 	tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
 
 
@@ -6524,13 +6500,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 			return err;
 			return err;
 	}
 	}
 
 
-#if TG3_TSO_SUPPORT != 0
 	if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
 	if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
 		err = tg3_load_tso_firmware(tp);
 		err = tg3_load_tso_firmware(tp);
 		if (err)
 		if (err)
 			return err;
 			return err;
 	}
 	}
-#endif
 
 
 	tp->tx_mode = TX_MODE_ENABLE;
 	tp->tx_mode = TX_MODE_ENABLE;
 	tw32_f(MAC_TX_MODE, tp->tx_mode);
 	tw32_f(MAC_TX_MODE, tp->tx_mode);
@@ -8062,7 +8036,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value)
 	tp->msg_enable = value;
 	tp->msg_enable = value;
 }
 }
 
 
-#if TG3_TSO_SUPPORT != 0
 static int tg3_set_tso(struct net_device *dev, u32 value)
 static int tg3_set_tso(struct net_device *dev, u32 value)
 {
 {
 	struct tg3 *tp = netdev_priv(dev);
 	struct tg3 *tp = netdev_priv(dev);
@@ -8081,7 +8054,6 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
 	}
 	}
 	return ethtool_op_set_tso(dev, value);
 	return ethtool_op_set_tso(dev, value);
 }
 }
-#endif
 
 
 static int tg3_nway_reset(struct net_device *dev)
 static int tg3_nway_reset(struct net_device *dev)
 {
 {
@@ -9212,10 +9184,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
 	.set_tx_csum		= tg3_set_tx_csum,
 	.set_tx_csum		= tg3_set_tx_csum,
 	.get_sg			= ethtool_op_get_sg,
 	.get_sg			= ethtool_op_get_sg,
 	.set_sg			= ethtool_op_set_sg,
 	.set_sg			= ethtool_op_set_sg,
-#if TG3_TSO_SUPPORT != 0
 	.get_tso		= ethtool_op_get_tso,
 	.get_tso		= ethtool_op_get_tso,
 	.set_tso		= tg3_set_tso,
 	.set_tso		= tg3_set_tso,
-#endif
 	.self_test_count	= tg3_get_test_count,
 	.self_test_count	= tg3_get_test_count,
 	.self_test		= tg3_self_test,
 	.self_test		= tg3_self_test,
 	.get_strings		= tg3_get_strings,
 	.get_strings		= tg3_get_strings,
@@ -11856,7 +11826,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 
 
 	tg3_init_bufmgr_config(tp);
 	tg3_init_bufmgr_config(tp);
 
 
-#if TG3_TSO_SUPPORT != 0
 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
 		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
 		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
 	}
 	}
@@ -11881,7 +11850,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 			dev->features |= NETIF_F_TSO6;
 			dev->features |= NETIF_F_TSO6;
 	}
 	}
 
 
-#endif
 
 
 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
 	    !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
 	    !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&

+ 10 - 13
drivers/net/ucc_geth.c

@@ -2865,8 +2865,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
 			if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
 			if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
 				align = UCC_GETH_TX_BD_RING_ALIGNMENT;
 				align = UCC_GETH_TX_BD_RING_ALIGNMENT;
 			ugeth->tx_bd_ring_offset[j] =
 			ugeth->tx_bd_ring_offset[j] =
-				(u32) (kmalloc((u32) (length + align),
-				GFP_KERNEL));
+				kmalloc((u32) (length + align), GFP_KERNEL);
+
 			if (ugeth->tx_bd_ring_offset[j] != 0)
 			if (ugeth->tx_bd_ring_offset[j] != 0)
 				ugeth->p_tx_bd_ring[j] =
 				ugeth->p_tx_bd_ring[j] =
 					(void*)((ugeth->tx_bd_ring_offset[j] +
 					(void*)((ugeth->tx_bd_ring_offset[j] +
@@ -2901,7 +2901,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
 			if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
 			if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
 				align = UCC_GETH_RX_BD_RING_ALIGNMENT;
 				align = UCC_GETH_RX_BD_RING_ALIGNMENT;
 			ugeth->rx_bd_ring_offset[j] =
 			ugeth->rx_bd_ring_offset[j] =
-			    (u32) (kmalloc((u32) (length + align), GFP_KERNEL));
+				kmalloc((u32) (length + align), GFP_KERNEL);
 			if (ugeth->rx_bd_ring_offset[j] != 0)
 			if (ugeth->rx_bd_ring_offset[j] != 0)
 				ugeth->p_rx_bd_ring[j] =
 				ugeth->p_rx_bd_ring[j] =
 					(void*)((ugeth->rx_bd_ring_offset[j] +
 					(void*)((ugeth->rx_bd_ring_offset[j] +
@@ -2927,10 +2927,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
 	/* Init Tx bds */
 	/* Init Tx bds */
 	for (j = 0; j < ug_info->numQueuesTx; j++) {
 	for (j = 0; j < ug_info->numQueuesTx; j++) {
 		/* Setup the skbuff rings */
 		/* Setup the skbuff rings */
-		ugeth->tx_skbuff[j] =
-		    (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
-					       ugeth->ug_info->bdRingLenTx[j],
-					       GFP_KERNEL);
+		ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
+					      ugeth->ug_info->bdRingLenTx[j],
+					      GFP_KERNEL);
 
 
 		if (ugeth->tx_skbuff[j] == NULL) {
 		if (ugeth->tx_skbuff[j] == NULL) {
 			ugeth_err("%s: Could not allocate tx_skbuff",
 			ugeth_err("%s: Could not allocate tx_skbuff",
@@ -2959,10 +2958,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
 	/* Init Rx bds */
 	/* Init Rx bds */
 	for (j = 0; j < ug_info->numQueuesRx; j++) {
 	for (j = 0; j < ug_info->numQueuesRx; j++) {
 		/* Setup the skbuff rings */
 		/* Setup the skbuff rings */
-		ugeth->rx_skbuff[j] =
-		    (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
-					       ugeth->ug_info->bdRingLenRx[j],
-					       GFP_KERNEL);
+		ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
+					      ugeth->ug_info->bdRingLenRx[j],
+					      GFP_KERNEL);
 
 
 		if (ugeth->rx_skbuff[j] == NULL) {
 		if (ugeth->rx_skbuff[j] == NULL) {
 			ugeth_err("%s: Could not allocate rx_skbuff",
 			ugeth_err("%s: Could not allocate rx_skbuff",
@@ -3453,8 +3451,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
 	 * allocated resources can be released when the channel is freed.
 	 * allocated resources can be released when the channel is freed.
 	 */
 	 */
 	if (!(ugeth->p_init_enet_param_shadow =
 	if (!(ugeth->p_init_enet_param_shadow =
-	     (struct ucc_geth_init_pram *) kmalloc(sizeof(struct ucc_geth_init_pram),
-					      GFP_KERNEL))) {
+	      kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
 		ugeth_err
 		ugeth_err
 		    ("%s: Can not allocate memory for"
 		    ("%s: Can not allocate memory for"
 			" p_UccInitEnetParamShadows.", __FUNCTION__);
 			" p_UccInitEnetParamShadows.", __FUNCTION__);

+ 13 - 11
drivers/net/wan/Kconfig

@@ -235,6 +235,19 @@ comment "Cyclades-PC300 MLPPP support is disabled."
 comment "Refer to the file README.mlppp, provided by PC300 package."
 comment "Refer to the file README.mlppp, provided by PC300 package."
 	depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
 	depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
 
 
+config PC300TOO
+	tristate "Cyclades PC300 RSV/X21 alternative support"
+	depends on HDLC && PCI
+	help
+	  Alternative driver for PC300 RSV/X21 PCI cards made by
+	  Cyclades, Inc. If you have such a card, say Y here and see
+	  <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+	  To compile this as a module, choose M here: the module
+	  will be called pc300too.
+
+	  If unsure, say N here.
+
 config N2
 config N2
 	tristate "SDL RISCom/N2 support"
 	tristate "SDL RISCom/N2 support"
 	depends on HDLC && ISA
 	depends on HDLC && ISA
@@ -344,17 +357,6 @@ config DLCI
 	  To compile this driver as a module, choose M here: the
 	  To compile this driver as a module, choose M here: the
 	  module will be called dlci.
 	  module will be called dlci.
 
 
-config DLCI_COUNT
-	int "Max open DLCI"
-	depends on DLCI
-	default "24"
-	help
-	  Maximal number of logical point-to-point frame relay connections
-	  (the identifiers of which are called DCLIs) that the driver can
-	  handle.
-
-	  The default is probably fine.
-
 config DLCI_MAX
 config DLCI_MAX
 	int "Max DLCI per device"
 	int "Max DLCI per device"
 	depends on DLCI
 	depends on DLCI

+ 1 - 0
drivers/net/wan/Makefile

@@ -41,6 +41,7 @@ obj-$(CONFIG_N2)		+= n2.o
 obj-$(CONFIG_C101)		+= c101.o
 obj-$(CONFIG_C101)		+= c101.o
 obj-$(CONFIG_WANXL)		+= wanxl.o
 obj-$(CONFIG_WANXL)		+= wanxl.o
 obj-$(CONFIG_PCI200SYN)		+= pci200syn.o
 obj-$(CONFIG_PCI200SYN)		+= pci200syn.o
+obj-$(CONFIG_PC300TOO)		+= pc300too.o
 
 
 clean-files := wanxlfw.inc
 clean-files := wanxlfw.inc
 $(obj)/wanxl.o:	$(obj)/wanxlfw.inc
 $(obj)/wanxl.o:	$(obj)/wanxlfw.inc

Some files were not shown because too many files changed in this diff