소스 검색

Merge branch 'master' of /repos/git/net-next-2.6

Conflicts:
	include/net/netfilter/xt_rateest.h
	net/bridge/br_netfilter.c
	net/netfilter/nf_conntrack_core.c

Signed-off-by: Patrick McHardy <kaber@trash.net>
Patrick McHardy 15 년 전
부모
커밋
f9181f4ffc
100개의 변경된 파일3130개의 추가작업 그리고 1669개의 파일을 삭제
  1. 2 0
      Documentation/filesystems/nfs/nfsroot.txt
  2. 82 2
      Documentation/networking/bonding.txt
  3. 26 0
      Documentation/networking/packet_mmap.txt
  4. 5 0
      Documentation/networking/pktgen.txt
  5. 1 4
      MAINTAINERS
  6. 1 1
      drivers/infiniband/core/addr.c
  7. 2 2
      drivers/infiniband/hw/cxgb3/iwch_cm.c
  8. 2 2
      drivers/infiniband/hw/cxgb4/cm.c
  9. 1 1
      drivers/infiniband/hw/nes/nes_cm.c
  10. 3 3
      drivers/isdn/capi/kcapi.c
  11. 2 2
      drivers/isdn/hardware/mISDN/netjet.c
  12. 3 3
      drivers/net/3c527.h
  13. 2 2
      drivers/net/8139cp.c
  14. 2 1
      drivers/net/8139too.c
  15. 1 0
      drivers/net/Kconfig
  16. 11 0
      drivers/net/arm/ixp4xx_eth.c
  17. 7 2
      drivers/net/atl1c/atl1c.h
  18. 92 15
      drivers/net/atl1c/atl1c_hw.c
  19. 46 3
      drivers/net/atl1c/atl1c_hw.h
  20. 200 148
      drivers/net/atl1c/atl1c_main.c
  21. 2 2
      drivers/net/atlx/atl1.h
  22. 70 74
      drivers/net/b44.c
  23. 6 7
      drivers/net/benet/be_cmds.c
  24. 49 46
      drivers/net/benet/be_main.c
  25. 41 16
      drivers/net/bnx2.c
  26. 5 3
      drivers/net/bnx2x_link.c
  27. 13 20
      drivers/net/bonding/bond_alb.c
  28. 114 16
      drivers/net/bonding/bond_main.c
  29. 193 86
      drivers/net/bonding/bond_sysfs.c
  30. 10 4
      drivers/net/bonding/bonding.h
  31. 9 2
      drivers/net/caif/caif_serial.c
  32. 5 5
      drivers/net/can/mscan/mpc5xxx_can.c
  33. 1 1
      drivers/net/can/mscan/mscan.h
  34. 1 1
      drivers/net/can/usb/ems_usb.c
  35. 0 1
      drivers/net/chelsio/common.h
  36. 2 47
      drivers/net/chelsio/subr.c
  37. 1 1
      drivers/net/cnic.c
  38. 1 1
      drivers/net/dm9000.c
  39. 8 9
      drivers/net/e1000/e1000_main.c
  40. 1 1
      drivers/net/e1000e/netdev.c
  41. 1 1
      drivers/net/ehea/ehea_qmr.h
  42. 7 0
      drivers/net/enic/enic.h
  43. 97 103
      drivers/net/enic/enic_main.c
  44. 1 1
      drivers/net/enic/vnic_dev.c
  45. 3 2
      drivers/net/enic/vnic_vic.c
  46. 1 1
      drivers/net/enic/vnic_vic.h
  47. 4 3
      drivers/net/epic100.c
  48. 44 59
      drivers/net/ethoc.c
  49. 34 22
      drivers/net/fec.c
  50. 5 19
      drivers/net/fec_mpc52xx_phy.c
  51. 1 1
      drivers/net/fsl_pq_mdio.h
  52. 5 6
      drivers/net/gianfar.c
  53. 5 7
      drivers/net/greth.c
  54. 1 1
      drivers/net/irda/donauboe.h
  55. 1 1
      drivers/net/irda/irda-usb.h
  56. 1 1
      drivers/net/irda/ks959-sir.c
  57. 1 1
      drivers/net/irda/ksdazzle-sir.c
  58. 3 3
      drivers/net/irda/vlsi_ir.h
  59. 3 5
      drivers/net/ixgbe/ixgbe.h
  60. 2 3
      drivers/net/ixgbe/ixgbe_82599.c
  61. 2 0
      drivers/net/ixgbe/ixgbe_common.c
  62. 20 6
      drivers/net/ixgbe/ixgbe_common.h
  63. 1 1
      drivers/net/ixgbe/ixgbe_dcb_nl.c
  64. 24 48
      drivers/net/ixgbe/ixgbe_ethtool.c
  65. 16 19
      drivers/net/ixgbe/ixgbe_fcoe.c
  66. 133 154
      drivers/net/ixgbe/ixgbe_main.c
  67. 5 10
      drivers/net/ixgbe/ixgbe_sriov.c
  68. 1 0
      drivers/net/ixgbe/ixgbe_type.h
  69. 1 0
      drivers/net/ixgbevf/ixgbevf_main.c
  70. 15 17
      drivers/net/korina.c
  71. 2 1
      drivers/net/ksz884x.c
  72. 51 10
      drivers/net/loopback.c
  73. 29 28
      drivers/net/mac8390.c
  74. 23 8
      drivers/net/macvlan.c
  75. 7 7
      drivers/net/mlx4/eq.c
  76. 1 1
      drivers/net/mlx4/mr.c
  77. 50 1
      drivers/net/phy/lxt.c
  78. 4 4
      drivers/net/ppp_generic.c
  79. 1 2
      drivers/net/pppoe.c
  80. 5 5
      drivers/net/ps3_gelic_wireless.h
  81. 110 10
      drivers/net/qlcnic/qlcnic.h
  82. 494 19
      drivers/net/qlcnic/qlcnic_ctx.c
  83. 6 5
      drivers/net/qlcnic/qlcnic_ethtool.c
  84. 79 5
      drivers/net/qlcnic/qlcnic_hdr.h
  85. 4 10
      drivers/net/qlcnic/qlcnic_hw.c
  86. 30 10
      drivers/net/qlcnic/qlcnic_init.c
  87. 284 28
      drivers/net/qlcnic/qlcnic_main.c
  88. 12 12
      drivers/net/qlge/qlge.h
  89. 146 156
      drivers/net/r6040.c
  90. 11 0
      drivers/net/r8169.c
  91. 29 46
      drivers/net/sfc/efx.c
  92. 2 2
      drivers/net/sfc/efx.h
  93. 5 3
      drivers/net/sfc/falcon.c
  94. 11 10
      drivers/net/sfc/mcdi_phy.c
  95. 26 20
      drivers/net/sfc/net_driver.h
  96. 42 13
      drivers/net/sfc/nic.c
  97. 2 2
      drivers/net/sfc/nic.h
  98. 186 207
      drivers/net/sfc/rx.c
  99. 13 15
      drivers/net/sfc/selftest.c
  100. 4 0
      drivers/net/sfc/siena.c

+ 2 - 0
Documentation/filesystems/nfs/nfsroot.txt

@@ -124,6 +124,8 @@ ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>
 
 
   <hostname>	Name of the client. May be supplied by autoconfiguration,
   <hostname>	Name of the client. May be supplied by autoconfiguration,
   		but its absence will not trigger autoconfiguration.
   		but its absence will not trigger autoconfiguration.
+		If specified and DHCP is used, the user provided hostname will
+		be carried in the DHCP request to hopefully update DNS record.
 
 
   		Default: Client IP address is used in ASCII notation.
   		Default: Client IP address is used in ASCII notation.
 
 

+ 82 - 2
Documentation/networking/bonding.txt

@@ -49,6 +49,7 @@ Table of Contents
 3.3	Configuring Bonding Manually with Ifenslave
 3.3	Configuring Bonding Manually with Ifenslave
 3.3.1		Configuring Multiple Bonds Manually
 3.3.1		Configuring Multiple Bonds Manually
 3.4	Configuring Bonding Manually via Sysfs
 3.4	Configuring Bonding Manually via Sysfs
+3.5	Overriding Configuration for Special Cases
 
 
 4. Querying Bonding Configuration
 4. Querying Bonding Configuration
 4.1	Bonding Configuration
 4.1	Bonding Configuration
@@ -1318,8 +1319,87 @@ echo 2000 > /sys/class/net/bond1/bonding/arp_interval
 echo +eth2 > /sys/class/net/bond1/bonding/slaves
 echo +eth2 > /sys/class/net/bond1/bonding/slaves
 echo +eth3 > /sys/class/net/bond1/bonding/slaves
 echo +eth3 > /sys/class/net/bond1/bonding/slaves
 
 
-
-4. Querying Bonding Configuration 
+3.5 Overriding Configuration for Special Cases
+----------------------------------------------
+When using the bonding driver, the physical port which transmits a frame is
+typically selected by the bonding driver, and is not relevant to the user or
+system administrator.  The output port is simply selected using the policies of
+the selected bonding mode.  On occasion however, it is helpful to direct certain
+classes of traffic to certain physical interfaces on output to implement
+slightly more complex policies.  For example, to reach a web server over a
+bonded interface in which eth0 connects to a private network, while eth1
+connects via a public network, it may be desirous to bias the bond to send said
+traffic over eth0 first, using eth1 only as a fall back, while all other traffic
+can safely be sent over either interface.  Such configurations may be achieved
+using the traffic control utilities inherent in linux.
+
+By default the bonding driver is multiqueue aware and 16 queues are created
+when the driver initializes (see Documentation/networking/multiqueue.txt
+for details).  If more or less queues are desired the module parameter
+tx_queues can be used to change this value.  There is no sysfs parameter
+available as the allocation is done at module init time.
+
+The output of the file /proc/net/bonding/bondX has changed so the output Queue
+ID is now printed for each slave:
+
+Bonding Mode: fault-tolerance (active-backup)
+Primary Slave: None
+Currently Active Slave: eth0
+MII Status: up
+MII Polling Interval (ms): 0
+Up Delay (ms): 0
+Down Delay (ms): 0
+
+Slave Interface: eth0
+MII Status: up
+Link Failure Count: 0
+Permanent HW addr: 00:1a:a0:12:8f:cb
+Slave queue ID: 0
+
+Slave Interface: eth1
+MII Status: up
+Link Failure Count: 0
+Permanent HW addr: 00:1a:a0:12:8f:cc
+Slave queue ID: 2
+
+The queue_id for a slave can be set using the command:
+
+# echo "eth1:2" > /sys/class/net/bond0/bonding/queue_id
+
+Any interface that needs a queue_id set should set it with multiple calls
+like the one above until proper priorities are set for all interfaces.  On
+distributions that allow configuration via initscripts, multiple 'queue_id'
+arguments can be added to BONDING_OPTS to set all needed slave queues.
+
+These queue id's can be used in conjunction with the tc utility to configure
+a multiqueue qdisc and filters to bias certain traffic to transmit on certain
+slave devices.  For instance, say we wanted, in the above configuration to
+force all traffic bound to 192.168.1.100 to use eth1 in the bond as its output
+device. The following commands would accomplish this:
+
+# tc qdisc add dev bond0 handle 1 root multiq
+
+# tc filter add dev bond0 protocol ip parent 1: prio 1 u32 match ip dst \
+	192.168.1.100 action skbedit queue_mapping 2
+
+These commands tell the kernel to attach a multiqueue queue discipline to the
+bond0 interface and filter traffic enqueued to it, such that packets with a dst
+ip of 192.168.1.100 have their output queue mapping value overwritten to 2.
+This value is then passed into the driver, causing the normal output path
+selection policy to be overridden, selecting instead qid 2, which maps to eth1.
+
+Note that qid values begin at 1.  Qid 0 is reserved to initiate to the driver
+that normal output policy selection should take place.  One benefit to simply
+leaving the qid for a slave to 0 is the multiqueue awareness in the bonding
+driver that is now present.  This awareness allows tc filters to be placed on
+slave devices as well as bond devices and the bonding driver will simply act as
+a pass-through for selecting output queues on the slave device rather than 
+output port selection.
+
+This feature first appeared in bonding driver version 3.7.0 and support for
+output slave selection was limited to round-robin and active-backup modes.
+
+4 Querying Bonding Configuration
 =================================
 =================================
 
 
 4.1 Bonding Configuration
 4.1 Bonding Configuration

+ 26 - 0
Documentation/networking/packet_mmap.txt

@@ -493,6 +493,32 @@ The user can also use poll() to check if a buffer is available:
     pfd.events = POLLOUT;
     pfd.events = POLLOUT;
     retval = poll(&pfd, 1, timeout);
     retval = poll(&pfd, 1, timeout);
 
 
+-------------------------------------------------------------------------------
++ PACKET_TIMESTAMP
+-------------------------------------------------------------------------------
+
+The PACKET_TIMESTAMP setting determines the source of the timestamp in
+the packet meta information.  If your NIC is capable of timestamping
+packets in hardware, you can request those hardware timestamps to used.
+Note: you may need to enable the generation of hardware timestamps with
+SIOCSHWTSTAMP.
+
+PACKET_TIMESTAMP accepts the same integer bit field as
+SO_TIMESTAMPING.  However, only the SOF_TIMESTAMPING_SYS_HARDWARE
+and SOF_TIMESTAMPING_RAW_HARDWARE values are recognized by
+PACKET_TIMESTAMP.  SOF_TIMESTAMPING_SYS_HARDWARE takes precedence over
+SOF_TIMESTAMPING_RAW_HARDWARE if both bits are set.
+
+    int req = 0;
+    req |= SOF_TIMESTAMPING_SYS_HARDWARE;
+    setsockopt(fd, SOL_PACKET, PACKET_TIMESTAMP, (void *) &req, sizeof(req))
+
+If PACKET_TIMESTAMP is not set, a software timestamp generated inside
+the networking stack is used (the behavior before this setting was added).
+
+See include/linux/net_tstamp.h and Documentation/networking/timestamping
+for more information on hardware timestamps.
+
 --------------------------------------------------------------------------------
 --------------------------------------------------------------------------------
 + THANKS
 + THANKS
 --------------------------------------------------------------------------------
 --------------------------------------------------------------------------------

+ 5 - 0
Documentation/networking/pktgen.txt

@@ -151,6 +151,8 @@ Examples:
 
 
  pgset stop    	          aborts injection. Also, ^C aborts generator.
  pgset stop    	          aborts injection. Also, ^C aborts generator.
 
 
+ pgset "rate 300M"        set rate to 300 Mb/s
+ pgset "ratep 1000000"    set rate to 1Mpps
 
 
 Example scripts
 Example scripts
 ===============
 ===============
@@ -241,6 +243,9 @@ src6
 flows
 flows
 flowlen
 flowlen
 
 
+rate
+ratep
+
 References:
 References:
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/

+ 1 - 4
MAINTAINERS

@@ -2978,7 +2978,6 @@ F:	drivers/net/ixgb/
 F:	drivers/net/ixgbe/
 F:	drivers/net/ixgbe/
 
 
 INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
 INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
-M:	Zhu Yi <yi.zhu@intel.com>
 M:	Reinette Chatre <reinette.chatre@intel.com>
 M:	Reinette Chatre <reinette.chatre@intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
@@ -2988,7 +2987,6 @@ F:	Documentation/networking/README.ipw2100
 F:	drivers/net/wireless/ipw2x00/ipw2100.*
 F:	drivers/net/wireless/ipw2x00/ipw2100.*
 
 
 INTEL PRO/WIRELESS 2915ABG NETWORK CONNECTION SUPPORT
 INTEL PRO/WIRELESS 2915ABG NETWORK CONNECTION SUPPORT
-M:	Zhu Yi <yi.zhu@intel.com>
 M:	Reinette Chatre <reinette.chatre@intel.com>
 M:	Reinette Chatre <reinette.chatre@intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
@@ -3019,8 +3017,8 @@ F:	drivers/net/wimax/i2400m/
 F:	include/linux/wimax/i2400m.h
 F:	include/linux/wimax/i2400m.h
 
 
 INTEL WIRELESS WIFI LINK (iwlwifi)
 INTEL WIRELESS WIFI LINK (iwlwifi)
-M:	Zhu Yi <yi.zhu@intel.com>
 M:	Reinette Chatre <reinette.chatre@intel.com>
 M:	Reinette Chatre <reinette.chatre@intel.com>
+M:	Wey-Yi Guy <wey-yi.w.guy@intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
 W:	http://intellinuxwireless.org
 W:	http://intellinuxwireless.org
@@ -3030,7 +3028,6 @@ F:	drivers/net/wireless/iwlwifi/
 
 
 INTEL WIRELESS MULTICOMM 3200 WIFI (iwmc3200wifi)
 INTEL WIRELESS MULTICOMM 3200 WIFI (iwmc3200wifi)
 M:	Samuel Ortiz <samuel.ortiz@intel.com>
 M:	Samuel Ortiz <samuel.ortiz@intel.com>
-M:	Zhu Yi <yi.zhu@intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
 S:	Supported
 S:	Supported

+ 1 - 1
drivers/infiniband/core/addr.c

@@ -215,7 +215,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
 
 
 	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
 	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
 	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
 	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-		neigh_event_send(rt->u.dst.neighbour, NULL);
+		neigh_event_send(rt->dst.neighbour, NULL);
 		ret = -ENODATA;
 		ret = -ENODATA;
 		if (neigh)
 		if (neigh)
 			goto release;
 			goto release;

+ 2 - 2
drivers/infiniband/hw/cxgb3/iwch_cm.c

@@ -1364,7 +1364,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
 		       __func__);
 		       __func__);
 		goto reject;
 		goto reject;
 	}
 	}
-	dst = &rt->u.dst;
+	dst = &rt->dst;
 	l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
 	l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
 	if (!l2t) {
 	if (!l2t) {
 		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
 		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
@@ -1932,7 +1932,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 		err = -EHOSTUNREACH;
 		err = -EHOSTUNREACH;
 		goto fail3;
 		goto fail3;
 	}
 	}
-	ep->dst = &rt->u.dst;
+	ep->dst = &rt->dst;
 
 
 	/* get a l2t entry */
 	/* get a l2t entry */
 	ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
 	ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,

+ 2 - 2
drivers/infiniband/hw/cxgb4/cm.c

@@ -1364,7 +1364,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 		       __func__);
 		       __func__);
 		goto reject;
 		goto reject;
 	}
 	}
-	dst = &rt->u.dst;
+	dst = &rt->dst;
 	if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
 	if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
 		pdev = ip_dev_find(&init_net, peer_ip);
 		pdev = ip_dev_find(&init_net, peer_ip);
 		BUG_ON(!pdev);
 		BUG_ON(!pdev);
@@ -1938,7 +1938,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 		err = -EHOSTUNREACH;
 		err = -EHOSTUNREACH;
 		goto fail3;
 		goto fail3;
 	}
 	}
-	ep->dst = &rt->u.dst;
+	ep->dst = &rt->dst;
 
 
 	/* get a l2t entry */
 	/* get a l2t entry */
 	if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
 	if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {

+ 1 - 1
drivers/infiniband/hw/nes/nes_cm.c

@@ -1146,7 +1146,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
 	}
 	}
 
 
 	if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
 	if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
-		neigh_event_send(rt->u.dst.neighbour, NULL);
+		neigh_event_send(rt->dst.neighbour, NULL);
 
 
 	ip_rt_put(rt);
 	ip_rt_put(rt);
 	return rc;
 	return rc;

+ 3 - 3
drivers/isdn/capi/kcapi.c

@@ -1020,12 +1020,12 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
 		if (cmd == AVMB1_ADDCARD) {
 		if (cmd == AVMB1_ADDCARD) {
 		   if ((retval = copy_from_user(&cdef, data,
 		   if ((retval = copy_from_user(&cdef, data,
 					    sizeof(avmb1_carddef))))
 					    sizeof(avmb1_carddef))))
-			   return retval;
+			   return -EFAULT;
 		   cdef.cardtype = AVM_CARDTYPE_B1;
 		   cdef.cardtype = AVM_CARDTYPE_B1;
 		} else {
 		} else {
 		   if ((retval = copy_from_user(&cdef, data,
 		   if ((retval = copy_from_user(&cdef, data,
 					    sizeof(avmb1_extcarddef))))
 					    sizeof(avmb1_extcarddef))))
-			   return retval;
+			   return -EFAULT;
 		}
 		}
 		cparams.port = cdef.port;
 		cparams.port = cdef.port;
 		cparams.irq = cdef.irq;
 		cparams.irq = cdef.irq;
@@ -1218,7 +1218,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
 		kcapi_carddef cdef;
 		kcapi_carddef cdef;
 
 
 		if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
 		if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
-			return retval;
+			return -EFAULT;
 
 
 		cparams.port = cdef.port;
 		cparams.port = cdef.port;
 		cparams.irq = cdef.irq;
 		cparams.irq = cdef.irq;

+ 2 - 2
drivers/isdn/hardware/mISDN/netjet.c

@@ -320,12 +320,12 @@ inittiger(struct tiger_hw *card)
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 	for (i = 0; i < 2; i++) {
 	for (i = 0; i < 2; i++) {
-		card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_KERNEL);
+		card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
 		if (!card->bc[i].hsbuf) {
 		if (!card->bc[i].hsbuf) {
 			pr_info("%s: no B%d send buffer\n", card->name, i + 1);
 			pr_info("%s: no B%d send buffer\n", card->name, i + 1);
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
-		card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_KERNEL);
+		card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
 		if (!card->bc[i].hrbuf) {
 		if (!card->bc[i].hrbuf) {
 			pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
 			pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
 			return -ENOMEM;
 			return -ENOMEM;

+ 3 - 3
drivers/net/3c527.h

@@ -34,7 +34,7 @@ struct mc32_mailbox
 {
 {
  	u16 mbox;
  	u16 mbox;
  	u16 data[1];
  	u16 data[1];
-} __attribute((packed));
+} __packed;
 
 
 struct skb_header
 struct skb_header
 {
 {
@@ -43,7 +43,7 @@ struct skb_header
 	u16 next;	/* Do not change! */
 	u16 next;	/* Do not change! */
 	u16 length;
 	u16 length;
 	u32 data;
 	u32 data;
-} __attribute((packed));
+} __packed;
 
 
 struct mc32_stats
 struct mc32_stats
 {
 {
@@ -68,7 +68,7 @@ struct mc32_stats
 	u32 dataA[6];
 	u32 dataA[6];
 	u16 dataB[5];
 	u16 dataB[5];
 	u32 dataC[14];
 	u32 dataC[14];
-} __attribute((packed));
+} __packed;
 
 
 #define STATUS_MASK	0x0F
 #define STATUS_MASK	0x0F
 #define COMPLETED	(1<<7)
 #define COMPLETED	(1<<7)

+ 2 - 2
drivers/net/8139cp.c

@@ -322,7 +322,7 @@ struct cp_dma_stats {
 	__le32			rx_ok_mcast;
 	__le32			rx_ok_mcast;
 	__le16			tx_abort;
 	__le16			tx_abort;
 	__le16			tx_underrun;
 	__le16			tx_underrun;
-} __attribute__((packed));
+} __packed;
 
 
 struct cp_extra_stats {
 struct cp_extra_stats {
 	unsigned long		rx_frags;
 	unsigned long		rx_frags;
@@ -598,8 +598,8 @@ rx_next:
 			goto rx_status_loop;
 			goto rx_status_loop;
 
 
 		spin_lock_irqsave(&cp->lock, flags);
 		spin_lock_irqsave(&cp->lock, flags);
-		cpw16_f(IntrMask, cp_intr_mask);
 		__napi_complete(napi);
 		__napi_complete(napi);
+		cpw16_f(IntrMask, cp_intr_mask);
 		spin_unlock_irqrestore(&cp->lock, flags);
 		spin_unlock_irqrestore(&cp->lock, flags);
 	}
 	}
 
 

+ 2 - 1
drivers/net/8139too.c

@@ -860,6 +860,7 @@ retry:
 		}
 		}
 
 
 	/* if unknown chip, assume array element #0, original RTL-8139 in this case */
 	/* if unknown chip, assume array element #0, original RTL-8139 in this case */
+	i = 0;
 	dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
 	dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
 	dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
 	dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
 	tp->chipset = 0;
 	tp->chipset = 0;
@@ -2088,8 +2089,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
 		 * again when we think we are done.
 		 * again when we think we are done.
 		 */
 		 */
 		spin_lock_irqsave(&tp->lock, flags);
 		spin_lock_irqsave(&tp->lock, flags);
-		RTL_W16_F(IntrMask, rtl8139_intr_mask);
 		__napi_complete(napi);
 		__napi_complete(napi);
+		RTL_W16_F(IntrMask, rtl8139_intr_mask);
 		spin_unlock_irqrestore(&tp->lock, flags);
 		spin_unlock_irqrestore(&tp->lock, flags);
 	}
 	}
 	spin_unlock(&tp->rx_lock);
 	spin_unlock(&tp->rx_lock);

+ 1 - 0
drivers/net/Kconfig

@@ -1659,6 +1659,7 @@ config R6040
 	depends on NET_PCI && PCI
 	depends on NET_PCI && PCI
 	select CRC32
 	select CRC32
 	select MII
 	select MII
+	select PHYLIB
 	help
 	help
 	  This is a driver for the R6040 Fast Ethernet MACs found in the
 	  This is a driver for the R6040 Fast Ethernet MACs found in the
 	  the RDC R-321x System-on-chips.
 	  the RDC R-321x System-on-chips.

+ 11 - 0
drivers/net/arm/ixp4xx_eth.c

@@ -738,6 +738,17 @@ static void eth_set_mcast_list(struct net_device *dev)
 	struct netdev_hw_addr *ha;
 	struct netdev_hw_addr *ha;
 	u8 diffs[ETH_ALEN], *addr;
 	u8 diffs[ETH_ALEN], *addr;
 	int i;
 	int i;
+	static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+	if (dev->flags & IFF_ALLMULTI) {
+		for (i = 0; i < ETH_ALEN; i++) {
+			__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
+			__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
+		}
+		__raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
+			&port->regs->rx_control[0]);
+		return;
+	}
 
 
 	if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
 	if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
 		__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
 		__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,

+ 7 - 2
drivers/net/atl1c/atl1c.h

@@ -73,7 +73,8 @@
 #define FULL_DUPLEX        2
 #define FULL_DUPLEX        2
 
 
 #define AT_RX_BUF_SIZE		(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
 #define AT_RX_BUF_SIZE		(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MAX_JUMBO_FRAME_SIZE 	(9*1024)
+#define MAX_JUMBO_FRAME_SIZE	(6*1024)
+#define MAX_TSO_FRAME_SIZE      (7*1024)
 #define MAX_TX_OFFLOAD_THRESH	(9*1024)
 #define MAX_TX_OFFLOAD_THRESH	(9*1024)
 
 
 #define AT_MAX_RECEIVE_QUEUE    4
 #define AT_MAX_RECEIVE_QUEUE    4
@@ -87,10 +88,11 @@
 #define AT_MAX_INT_WORK		5
 #define AT_MAX_INT_WORK		5
 #define AT_TWSI_EEPROM_TIMEOUT 	100
 #define AT_TWSI_EEPROM_TIMEOUT 	100
 #define AT_HW_MAX_IDLE_DELAY 	10
 #define AT_HW_MAX_IDLE_DELAY 	10
-#define AT_SUSPEND_LINK_TIMEOUT 28
+#define AT_SUSPEND_LINK_TIMEOUT 100
 
 
 #define AT_ASPM_L0S_TIMER	6
 #define AT_ASPM_L0S_TIMER	6
 #define AT_ASPM_L1_TIMER	12
 #define AT_ASPM_L1_TIMER	12
+#define AT_LCKDET_TIMER		12
 
 
 #define ATL1C_PCIE_L0S_L1_DISABLE 	0x01
 #define ATL1C_PCIE_L0S_L1_DISABLE 	0x01
 #define ATL1C_PCIE_PHY_RESET		0x02
 #define ATL1C_PCIE_PHY_RESET		0x02
@@ -316,6 +318,7 @@ enum atl1c_nic_type {
 	athr_l2c_b,
 	athr_l2c_b,
 	athr_l2c_b2,
 	athr_l2c_b2,
 	athr_l1d,
 	athr_l1d,
+	athr_l1d_2,
 };
 };
 
 
 enum atl1c_trans_queue {
 enum atl1c_trans_queue {
@@ -392,6 +395,8 @@ struct atl1c_hw {
 	u16 subsystem_id;
 	u16 subsystem_id;
 	u16 subsystem_vendor_id;
 	u16 subsystem_vendor_id;
 	u8 revision_id;
 	u8 revision_id;
+	u16 phy_id1;
+	u16 phy_id2;
 
 
 	u32 intr_mask;
 	u32 intr_mask;
 	u8 dmaw_dly_cnt;
 	u8 dmaw_dly_cnt;

+ 92 - 15
drivers/net/atl1c/atl1c_hw.c

@@ -37,6 +37,9 @@ int atl1c_check_eeprom_exist(struct atl1c_hw *hw)
 	if (data & TWSI_DEBUG_DEV_EXIST)
 	if (data & TWSI_DEBUG_DEV_EXIST)
 		return 1;
 		return 1;
 
 
+	AT_READ_REG(hw, REG_MASTER_CTRL, &data);
+	if (data & MASTER_CTRL_OTP_SEL)
+		return 1;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -69,6 +72,8 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
 	u32 i;
 	u32 i;
 	u32 otp_ctrl_data;
 	u32 otp_ctrl_data;
 	u32 twsi_ctrl_data;
 	u32 twsi_ctrl_data;
+	u32 ltssm_ctrl_data;
+	u32 wol_data;
 	u8  eth_addr[ETH_ALEN];
 	u8  eth_addr[ETH_ALEN];
 	u16 phy_data;
 	u16 phy_data;
 	bool raise_vol = false;
 	bool raise_vol = false;
@@ -104,6 +109,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
 			udelay(20);
 			udelay(20);
 			raise_vol = true;
 			raise_vol = true;
 		}
 		}
+		/* close open bit of ReadOnly*/
+		AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &ltssm_ctrl_data);
+		ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO;
+		AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data);
+
+		/* clear any WOL settings */
+		AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+		AT_READ_REG(hw, REG_WOL_CTRL, &wol_data);
+
 
 
 		AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
 		AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
 		twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
 		twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
@@ -119,17 +133,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
 	}
 	}
 	/* Disable OTP_CLK */
 	/* Disable OTP_CLK */
 	if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
 	if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
-		if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
-			otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
-			AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
-			AT_WRITE_FLUSH(hw);
-			msleep(1);
-		}
+		otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
+		AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+		msleep(1);
 	}
 	}
 	if (raise_vol) {
 	if (raise_vol) {
 		if (hw->nic_type == athr_l2c_b ||
 		if (hw->nic_type == athr_l2c_b ||
 		    hw->nic_type == athr_l2c_b2 ||
 		    hw->nic_type == athr_l2c_b2 ||
-		    hw->nic_type == athr_l1d) {
+		    hw->nic_type == athr_l1d ||
+		    hw->nic_type == athr_l1d_2) {
 			atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
 			atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
 			if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
 			if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
 				goto out;
 				goto out;
@@ -456,14 +468,22 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
 
 
 	if (hw->nic_type == athr_l2c_b ||
 	if (hw->nic_type == athr_l2c_b ||
 	    hw->nic_type == athr_l2c_b2 ||
 	    hw->nic_type == athr_l2c_b2 ||
-	    hw->nic_type == athr_l1d) {
+	    hw->nic_type == athr_l1d ||
+	    hw->nic_type == athr_l1d_2) {
 		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
 		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
 		atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
 		atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
 		atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
 		atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
 		msleep(20);
 		msleep(20);
 	}
 	}
-
-	/*Enable PHY LinkChange Interrupt */
+	if (hw->nic_type == athr_l1d) {
+		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+		atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
+	}
+	if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
+		|| hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) {
+		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+		atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
+	}
 	err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
 	err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
 	if (err) {
 	if (err) {
 		if (netif_msg_hw(adapter))
 		if (netif_msg_hw(adapter))
@@ -482,12 +502,10 @@ int atl1c_phy_init(struct atl1c_hw *hw)
 	struct pci_dev *pdev = adapter->pdev;
 	struct pci_dev *pdev = adapter->pdev;
 	int ret_val;
 	int ret_val;
 	u16 mii_bmcr_data = BMCR_RESET;
 	u16 mii_bmcr_data = BMCR_RESET;
-	u16 phy_id1, phy_id2;
 
 
-	if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) ||
-		(atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) {
-			if (netif_msg_link(adapter))
-				dev_err(&pdev->dev, "Error get phy ID\n");
+	if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
+		(atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
+		dev_err(&pdev->dev, "Error get phy ID\n");
 		return -1;
 		return -1;
 	}
 	}
 	switch (hw->media_type) {
 	switch (hw->media_type) {
@@ -572,6 +590,65 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
 	return 0;
 	return 0;
 }
 }
 
 
+int atl1c_phy_power_saving(struct atl1c_hw *hw)
+{
+	struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+	struct pci_dev *pdev = adapter->pdev;
+	int ret = 0;
+	u16 autoneg_advertised = ADVERTISED_10baseT_Half;
+	u16 save_autoneg_advertised;
+	u16 phy_data;
+	u16 mii_lpa_data;
+	u16 speed = SPEED_0;
+	u16 duplex = FULL_DUPLEX;
+	int i;
+
+	atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+	atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+	if (phy_data & BMSR_LSTATUS) {
+		atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data);
+		if (mii_lpa_data & LPA_10FULL)
+			autoneg_advertised = ADVERTISED_10baseT_Full;
+		else if (mii_lpa_data & LPA_10HALF)
+			autoneg_advertised = ADVERTISED_10baseT_Half;
+		else if (mii_lpa_data & LPA_100HALF)
+			autoneg_advertised = ADVERTISED_100baseT_Half;
+		else if (mii_lpa_data & LPA_100FULL)
+			autoneg_advertised = ADVERTISED_100baseT_Full;
+
+		save_autoneg_advertised = hw->autoneg_advertised;
+		hw->phy_configured = false;
+		hw->autoneg_advertised = autoneg_advertised;
+		if (atl1c_restart_autoneg(hw) != 0) {
+			dev_dbg(&pdev->dev, "phy autoneg failed\n");
+			ret = -1;
+		}
+		hw->autoneg_advertised = save_autoneg_advertised;
+
+		if (mii_lpa_data) {
+			for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
+				mdelay(100);
+				atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+				atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+				if (phy_data & BMSR_LSTATUS) {
+					if (atl1c_get_speed_and_duplex(hw, &speed,
+									&duplex) != 0)
+						dev_dbg(&pdev->dev,
+							"get speed and duplex failed\n");
+					break;
+				}
+			}
+		}
+	} else {
+		speed = SPEED_10;
+		duplex = HALF_DUPLEX;
+	}
+	adapter->link_speed = speed;
+	adapter->link_duplex = duplex;
+
+	return ret;
+}
+
 int atl1c_restart_autoneg(struct atl1c_hw *hw)
 int atl1c_restart_autoneg(struct atl1c_hw *hw)
 {
 {
 	int err = 0;
 	int err = 0;

+ 46 - 3
drivers/net/atl1c/atl1c_hw.h

@@ -42,7 +42,7 @@ bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value);
 int atl1c_phy_init(struct atl1c_hw *hw);
 int atl1c_phy_init(struct atl1c_hw *hw);
 int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
 int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
 int atl1c_restart_autoneg(struct atl1c_hw *hw);
 int atl1c_restart_autoneg(struct atl1c_hw *hw);
-
+int atl1c_phy_power_saving(struct atl1c_hw *hw);
 /* register definition */
 /* register definition */
 #define REG_DEVICE_CAP              	0x5C
 #define REG_DEVICE_CAP              	0x5C
 #define DEVICE_CAP_MAX_PAYLOAD_MASK     0x7
 #define DEVICE_CAP_MAX_PAYLOAD_MASK     0x7
@@ -120,6 +120,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
 #define REG_PCIE_PHYMISC	    	0x1000
 #define REG_PCIE_PHYMISC	    	0x1000
 #define PCIE_PHYMISC_FORCE_RCV_DET	0x4
 #define PCIE_PHYMISC_FORCE_RCV_DET	0x4
 
 
+#define REG_PCIE_PHYMISC2		0x1004
+#define PCIE_PHYMISC2_SERDES_CDR_MASK	0x3
+#define PCIE_PHYMISC2_SERDES_CDR_SHIFT	16
+#define PCIE_PHYMISC2_SERDES_TH_MASK	0x3
+#define PCIE_PHYMISC2_SERDES_TH_SHIFT	18
+
 #define REG_TWSI_DEBUG			0x1108
 #define REG_TWSI_DEBUG			0x1108
 #define TWSI_DEBUG_DEV_EXIST		0x20000000
 #define TWSI_DEBUG_DEV_EXIST		0x20000000
 
 
@@ -150,24 +156,28 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
 #define PM_CTRL_ASPM_L0S_EN		0x00001000
 #define PM_CTRL_ASPM_L0S_EN		0x00001000
 #define PM_CTRL_CLK_SWH_L1		0x00002000
 #define PM_CTRL_CLK_SWH_L1		0x00002000
 #define PM_CTRL_CLK_PWM_VER1_1		0x00004000
 #define PM_CTRL_CLK_PWM_VER1_1		0x00004000
-#define PM_CTRL_PCIE_RECV		0x00008000
+#define PM_CTRL_RCVR_WT_TIMER		0x00008000
 #define PM_CTRL_L1_ENTRY_TIMER_MASK	0xF
 #define PM_CTRL_L1_ENTRY_TIMER_MASK	0xF
 #define PM_CTRL_L1_ENTRY_TIMER_SHIFT	16
 #define PM_CTRL_L1_ENTRY_TIMER_SHIFT	16
 #define PM_CTRL_PM_REQ_TIMER_MASK	0xF
 #define PM_CTRL_PM_REQ_TIMER_MASK	0xF
 #define PM_CTRL_PM_REQ_TIMER_SHIFT	20
 #define PM_CTRL_PM_REQ_TIMER_SHIFT	20
-#define PM_CTRL_LCKDET_TIMER_MASK	0x3F
+#define PM_CTRL_LCKDET_TIMER_MASK	0xF
 #define PM_CTRL_LCKDET_TIMER_SHIFT	24
 #define PM_CTRL_LCKDET_TIMER_SHIFT	24
 #define PM_CTRL_EN_BUFS_RX_L0S		0x10000000
 #define PM_CTRL_EN_BUFS_RX_L0S		0x10000000
 #define PM_CTRL_SA_DLY_EN		0x20000000
 #define PM_CTRL_SA_DLY_EN		0x20000000
 #define PM_CTRL_MAC_ASPM_CHK		0x40000000
 #define PM_CTRL_MAC_ASPM_CHK		0x40000000
 #define PM_CTRL_HOTRST			0x80000000
 #define PM_CTRL_HOTRST			0x80000000
 
 
+#define REG_LTSSM_ID_CTRL		0x12FC
+#define LTSSM_ID_EN_WRO			0x1000
 /* Selene Master Control Register */
 /* Selene Master Control Register */
 #define REG_MASTER_CTRL			0x1400
 #define REG_MASTER_CTRL			0x1400
 #define MASTER_CTRL_SOFT_RST            0x1
 #define MASTER_CTRL_SOFT_RST            0x1
 #define MASTER_CTRL_TEST_MODE_MASK	0x3
 #define MASTER_CTRL_TEST_MODE_MASK	0x3
 #define MASTER_CTRL_TEST_MODE_SHIFT	2
 #define MASTER_CTRL_TEST_MODE_SHIFT	2
 #define MASTER_CTRL_BERT_START		0x10
 #define MASTER_CTRL_BERT_START		0x10
+#define MASTER_CTRL_OOB_DIS_OFF		0x40
+#define MASTER_CTRL_SA_TIMER_EN		0x80
 #define MASTER_CTRL_MTIMER_EN           0x100
 #define MASTER_CTRL_MTIMER_EN           0x100
 #define MASTER_CTRL_MANUAL_INT          0x200
 #define MASTER_CTRL_MANUAL_INT          0x200
 #define MASTER_CTRL_TX_ITIMER_EN	0x400
 #define MASTER_CTRL_TX_ITIMER_EN	0x400
@@ -220,6 +230,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
 		GPHY_CTRL_PWDOWN_HW	|\
 		GPHY_CTRL_PWDOWN_HW	|\
 		GPHY_CTRL_PHY_IDDQ)
 		GPHY_CTRL_PHY_IDDQ)
 
 
+#define GPHY_CTRL_POWER_SAVING (	\
+		GPHY_CTRL_SEL_ANA_RST	|\
+		GPHY_CTRL_HIB_EN	|\
+		GPHY_CTRL_HIB_PULSE	|\
+		GPHY_CTRL_PWDOWN_HW	|\
+		GPHY_CTRL_PHY_IDDQ)
 /* Block IDLE Status Register */
 /* Block IDLE Status Register */
 #define REG_IDLE_STATUS  		0x1410
 #define REG_IDLE_STATUS  		0x1410
 #define IDLE_STATUS_MASK		0x00FF
 #define IDLE_STATUS_MASK		0x00FF
@@ -287,6 +303,14 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
 #define SERDES_LOCK_DETECT          	0x1  /* SerDes lock detected. This signal
 #define SERDES_LOCK_DETECT          	0x1  /* SerDes lock detected. This signal
 					      * comes from Analog SerDes */
 					      * comes from Analog SerDes */
 #define SERDES_LOCK_DETECT_EN       	0x2  /* 1: Enable SerDes Lock detect function */
 #define SERDES_LOCK_DETECT_EN       	0x2  /* 1: Enable SerDes Lock detect function */
+#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE
+#define SERDES_LOCK_STS_SELFB_PLL_MASK  0x3
+#define SERDES_OVCLK_18_25		0x0
+#define SERDES_OVCLK_12_18		0x1
+#define SERDES_OVCLK_0_4		0x2
+#define SERDES_OVCLK_4_12		0x3
+#define SERDES_MAC_CLK_SLOWDOWN		0x20000
+#define SERDES_PYH_CLK_SLOWDOWN		0x40000
 
 
 /* MAC Control Register  */
 /* MAC Control Register  */
 #define REG_MAC_CTRL         		0x1480
 #define REG_MAC_CTRL         		0x1480
@@ -693,6 +717,21 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
 #define REG_MAC_TX_STATUS_BIN 		0x1760
 #define REG_MAC_TX_STATUS_BIN 		0x1760
 #define REG_MAC_TX_STATUS_END 		0x17c0
 #define REG_MAC_TX_STATUS_END 		0x17c0
 
 
+#define REG_CLK_GATING_CTRL		0x1814
+#define CLK_GATING_DMAW_EN		0x0001
+#define CLK_GATING_DMAR_EN		0x0002
+#define CLK_GATING_TXQ_EN		0x0004
+#define CLK_GATING_RXQ_EN		0x0008
+#define CLK_GATING_TXMAC_EN		0x0010
+#define CLK_GATING_RXMAC_EN		0x0020
+
+#define CLK_GATING_EN_ALL	(CLK_GATING_DMAW_EN |\
+				 CLK_GATING_DMAR_EN |\
+				 CLK_GATING_TXQ_EN  |\
+				 CLK_GATING_RXQ_EN  |\
+				 CLK_GATING_TXMAC_EN|\
+				 CLK_GATING_RXMAC_EN)
+
 /* DEBUG ADDR */
 /* DEBUG ADDR */
 #define REG_DEBUG_DATA0 		0x1900
 #define REG_DEBUG_DATA0 		0x1900
 #define REG_DEBUG_DATA1 		0x1904
 #define REG_DEBUG_DATA1 		0x1904
@@ -734,6 +773,10 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
 
 
 #define MII_PHYSID1			0x02
 #define MII_PHYSID1			0x02
 #define MII_PHYSID2			0x03
 #define MII_PHYSID2			0x03
+#define L1D_MPW_PHYID1			0xD01C  /* V7 */
+#define L1D_MPW_PHYID2			0xD01D  /* V1-V6 */
+#define L1D_MPW_PHYID3			0xD01E  /* V8 */
+
 
 
 /* Autoneg Advertisement Register */
 /* Autoneg Advertisement Register */
 #define MII_ADVERTISE			0x04
 #define MII_ADVERTISE			0x04

+ 200 - 148
drivers/net/atl1c/atl1c_main.c

@@ -21,7 +21,7 @@
 
 
 #include "atl1c.h"
 #include "atl1c.h"
 
 
-#define ATL1C_DRV_VERSION "1.0.0.2-NAPI"
+#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
 char atl1c_driver_name[] = "atl1c";
 char atl1c_driver_name[] = "atl1c";
 char atl1c_driver_version[] = ATL1C_DRV_VERSION;
 char atl1c_driver_version[] = ATL1C_DRV_VERSION;
 #define PCI_DEVICE_ID_ATTANSIC_L2C      0x1062
 #define PCI_DEVICE_ID_ATTANSIC_L2C      0x1062
@@ -29,7 +29,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
 #define PCI_DEVICE_ID_ATHEROS_L2C_B	0x2060 /* AR8152 v1.1 Fast 10/100 */
 #define PCI_DEVICE_ID_ATHEROS_L2C_B	0x2060 /* AR8152 v1.1 Fast 10/100 */
 #define PCI_DEVICE_ID_ATHEROS_L2C_B2	0x2062 /* AR8152 v2.0 Fast 10/100 */
 #define PCI_DEVICE_ID_ATHEROS_L2C_B2	0x2062 /* AR8152 v2.0 Fast 10/100 */
 #define PCI_DEVICE_ID_ATHEROS_L1D	0x1073 /* AR8151 v1.0 Gigabit 1000 */
 #define PCI_DEVICE_ID_ATHEROS_L1D	0x1073 /* AR8151 v1.0 Gigabit 1000 */
-
+#define PCI_DEVICE_ID_ATHEROS_L1D_2_0	0x1083 /* AR8151 v2.0 Gigabit 1000 */
 #define L2CB_V10			0xc0
 #define L2CB_V10			0xc0
 #define L2CB_V11			0xc1
 #define L2CB_V11			0xc1
 
 
@@ -97,7 +97,28 @@ static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
 
 
 static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
 	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
+static void atl1c_pcie_patch(struct atl1c_hw *hw)
+{
+	u32 data;
 
 
+	AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
+	data |= PCIE_PHYMISC_FORCE_RCV_DET;
+	AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
+
+	if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
+		AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
+
+		data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK <<
+			PCIE_PHYMISC2_SERDES_CDR_SHIFT);
+		data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
+		data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
+			PCIE_PHYMISC2_SERDES_TH_SHIFT);
+		data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
+		AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
+	}
+}
+
+/* FIXME: no need any more ? */
 /*
 /*
  * atl1c_init_pcie - init PCIE module
  * atl1c_init_pcie - init PCIE module
  */
  */
@@ -127,6 +148,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
 	data &= ~PCIE_UC_SERVRITY_FCP;
 	data &= ~PCIE_UC_SERVRITY_FCP;
 	AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
 	AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
 
 
+	AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
+	data &= ~LTSSM_ID_EN_WRO;
+	AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
+
+	atl1c_pcie_patch(hw);
 	if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
 	if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
 		atl1c_disable_l0s_l1(hw);
 		atl1c_disable_l0s_l1(hw);
 	if (flag & ATL1C_PCIE_PHY_RESET)
 	if (flag & ATL1C_PCIE_PHY_RESET)
@@ -135,7 +161,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
 		AT_WRITE_REG(hw, REG_GPHY_CTRL,
 		AT_WRITE_REG(hw, REG_GPHY_CTRL,
 			GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
 			GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
 
 
-	msleep(1);
+	msleep(5);
 }
 }
 
 
 /*
 /*
@@ -159,6 +185,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
 {
 {
 	atomic_inc(&adapter->irq_sem);
 	atomic_inc(&adapter->irq_sem);
 	AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
 	AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
+	AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
 	AT_WRITE_FLUSH(&adapter->hw);
 	AT_WRITE_FLUSH(&adapter->hw);
 	synchronize_irq(adapter->pdev->irq);
 	synchronize_irq(adapter->pdev->irq);
 }
 }
@@ -231,15 +258,15 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
 
 
 	if ((phy_data & BMSR_LSTATUS) == 0) {
 	if ((phy_data & BMSR_LSTATUS) == 0) {
 		/* link down */
 		/* link down */
-		if (netif_carrier_ok(netdev)) {
-			hw->hibernate = true;
-			if (atl1c_stop_mac(hw) != 0)
-				if (netif_msg_hw(adapter))
-					dev_warn(&pdev->dev,
-						"stop mac failed\n");
-			atl1c_set_aspm(hw, false);
-		}
+		hw->hibernate = true;
+		if (atl1c_stop_mac(hw) != 0)
+			if (netif_msg_hw(adapter))
+				dev_warn(&pdev->dev, "stop mac failed\n");
+		atl1c_set_aspm(hw, false);
 		netif_carrier_off(netdev);
 		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+		atl1c_phy_reset(hw);
+		atl1c_phy_init(&adapter->hw);
 	} else {
 	} else {
 		/* Link Up */
 		/* Link Up */
 		hw->hibernate = false;
 		hw->hibernate = false;
@@ -308,6 +335,7 @@ static void atl1c_common_task(struct work_struct *work)
 	netdev = adapter->netdev;
 	netdev = adapter->netdev;
 
 
 	if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
 	if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
+		adapter->work_event &= ~ATL1C_WORK_EVENT_RESET;
 		netif_device_detach(netdev);
 		netif_device_detach(netdev);
 		atl1c_down(adapter);
 		atl1c_down(adapter);
 		atl1c_up(adapter);
 		atl1c_up(adapter);
@@ -315,8 +343,11 @@ static void atl1c_common_task(struct work_struct *work)
 		return;
 		return;
 	}
 	}
 
 
-	if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
+	if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) {
+		adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE;
 		atl1c_check_link_status(adapter);
 		atl1c_check_link_status(adapter);
+	}
+	return;
 }
 }
 
 
 
 
@@ -476,6 +507,13 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
 		netdev->mtu = new_mtu;
 		netdev->mtu = new_mtu;
 		adapter->hw.max_frame_size = new_mtu;
 		adapter->hw.max_frame_size = new_mtu;
 		atl1c_set_rxbufsize(adapter, netdev);
 		atl1c_set_rxbufsize(adapter, netdev);
+		if (new_mtu > MAX_TSO_FRAME_SIZE) {
+			adapter->netdev->features &= ~NETIF_F_TSO;
+			adapter->netdev->features &= ~NETIF_F_TSO6;
+		} else {
+			adapter->netdev->features |= NETIF_F_TSO;
+			adapter->netdev->features |= NETIF_F_TSO6;
+		}
 		atl1c_down(adapter);
 		atl1c_down(adapter);
 		atl1c_up(adapter);
 		atl1c_up(adapter);
 		clear_bit(__AT_RESETTING, &adapter->flags);
 		clear_bit(__AT_RESETTING, &adapter->flags);
@@ -613,6 +651,9 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
 	case PCI_DEVICE_ID_ATHEROS_L1D:
 	case PCI_DEVICE_ID_ATHEROS_L1D:
 		hw->nic_type = athr_l1d;
 		hw->nic_type = athr_l1d;
 		break;
 		break;
+	case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
+		hw->nic_type = athr_l1d_2;
+		break;
 	default:
 	default:
 		break;
 		break;
 	}
 	}
@@ -627,9 +668,7 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
 	AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
 	AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
 	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
 	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
 
 
-	hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ |
-			 ATL1C_INTR_MODRT_ENABLE  |
-			 ATL1C_RX_IPV6_CHKSUM	  |
+	hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE  |
 			 ATL1C_TXQ_MODE_ENHANCE;
 			 ATL1C_TXQ_MODE_ENHANCE;
 	if (link_ctrl_data & LINK_CTRL_L0S_EN)
 	if (link_ctrl_data & LINK_CTRL_L0S_EN)
 		hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
 		hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
@@ -637,12 +676,12 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
 		hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
 		hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
 	if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
 	if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
 		hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
 		hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
+	hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
 
 
 	if (hw->nic_type == athr_l1c ||
 	if (hw->nic_type == athr_l1c ||
-	    hw->nic_type == athr_l1d) {
-		hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
+	    hw->nic_type == athr_l1d ||
+	    hw->nic_type == athr_l1d_2)
 		hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
 		hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
-	}
 	return 0;
 	return 0;
 }
 }
 /*
 /*
@@ -657,6 +696,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
 {
 {
 	struct atl1c_hw *hw   = &adapter->hw;
 	struct atl1c_hw *hw   = &adapter->hw;
 	struct pci_dev	*pdev = adapter->pdev;
 	struct pci_dev	*pdev = adapter->pdev;
+	u32 revision;
+
 
 
 	adapter->wol = 0;
 	adapter->wol = 0;
 	adapter->link_speed = SPEED_0;
 	adapter->link_speed = SPEED_0;
@@ -669,7 +710,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
 	hw->device_id = pdev->device;
 	hw->device_id = pdev->device;
 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 	hw->subsystem_id = pdev->subsystem_device;
 	hw->subsystem_id = pdev->subsystem_device;
-
+	AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
+	hw->revision_id = revision & 0xFF;
 	/* before link up, we assume hibernate is true */
 	/* before link up, we assume hibernate is true */
 	hw->hibernate = true;
 	hw->hibernate = true;
 	hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
 	hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
@@ -974,6 +1016,7 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
 	struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
 	struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
 	struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
 	struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
 	int i;
 	int i;
+	u32 data;
 
 
 	/* TPD */
 	/* TPD */
 	AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
 	AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
@@ -1017,6 +1060,23 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
 			(u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
 			(u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
 	AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
 	AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
 			(u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
 			(u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
+	if (hw->nic_type == athr_l2c_b) {
+		AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
+		AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
+		AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
+		AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
+		AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
+		AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
+		AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0);	/* TX watermark, to enter l1 state.*/
+		AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0);		/* RXD threshold.*/
+	}
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
+			/* Power Saving for L2c_B */
+		AT_READ_REG(hw, REG_SERDES_LOCK, &data);
+		data |= SERDES_MAC_CLK_SLOWDOWN;
+		data |= SERDES_PYH_CLK_SLOWDOWN;
+		AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
+	}
 	/* Load all of base address above */
 	/* Load all of base address above */
 	AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
 	AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
 }
 }
@@ -1029,6 +1089,7 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
 	u16 tx_offload_thresh;
 	u16 tx_offload_thresh;
 	u32 txq_ctrl_data;
 	u32 txq_ctrl_data;
 	u32 extra_size = 0;     /* Jumbo frame threshold in QWORD unit */
 	u32 extra_size = 0;     /* Jumbo frame threshold in QWORD unit */
+	u32 max_pay_load_data;
 
 
 	extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
 	extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
 	tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
 	tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
@@ -1046,8 +1107,11 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
 			TXQ_NUM_TPD_BURST_SHIFT;
 			TXQ_NUM_TPD_BURST_SHIFT;
 	if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
 	if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
 		txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
 		txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
-	txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] &
+	max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] &
 			TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
 			TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2)
+		max_pay_load_data >>= 1;
+	txq_ctrl_data |= max_pay_load_data;
 
 
 	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
 	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
 }
 }
@@ -1078,7 +1142,7 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter)
 	rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
 	rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
 			RSS_HASH_BITS_SHIFT;
 			RSS_HASH_BITS_SHIFT;
 	if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
 	if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
-		rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M &
+		rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
 			ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
 			ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
 
 
 	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
 	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
@@ -1198,21 +1262,23 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
 {
 {
 	struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
 	struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
 	struct pci_dev *pdev = adapter->pdev;
 	struct pci_dev *pdev = adapter->pdev;
-	int ret;
+	u32 master_ctrl_data = 0;
 
 
 	AT_WRITE_REG(hw, REG_IMR, 0);
 	AT_WRITE_REG(hw, REG_IMR, 0);
 	AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
 	AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
 
 
-	ret = atl1c_stop_mac(hw);
-	if (ret)
-		return ret;
+	atl1c_stop_mac(hw);
 	/*
 	/*
 	 * Issue Soft Reset to the MAC.  This will reset the chip's
 	 * Issue Soft Reset to the MAC.  This will reset the chip's
 	 * transmit, receive, DMA.  It will not effect
 	 * transmit, receive, DMA.  It will not effect
 	 * the current PCI configuration.  The global reset bit is self-
 	 * the current PCI configuration.  The global reset bit is self-
 	 * clearing, and should clear within a microsecond.
 	 * clearing, and should clear within a microsecond.
 	 */
 	 */
-	AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
+	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+	master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF;
+	AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST)
+			& 0xFFFF));
+
 	AT_WRITE_FLUSH(hw);
 	AT_WRITE_FLUSH(hw);
 	msleep(10);
 	msleep(10);
 	/* Wait at least 10ms for All module to be Idle */
 	/* Wait at least 10ms for All module to be Idle */
@@ -1253,42 +1319,39 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
 {
 {
 	u32 pm_ctrl_data;
 	u32 pm_ctrl_data;
 	u32 link_ctrl_data;
 	u32 link_ctrl_data;
+	u32 link_l1_timer = 0xF;
 
 
 	AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
 	AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
 	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
 	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
-	pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
 
 
+	pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
 	pm_ctrl_data &=  ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
 	pm_ctrl_data &=  ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
 			PM_CTRL_L1_ENTRY_TIMER_SHIFT);
 			PM_CTRL_L1_ENTRY_TIMER_SHIFT);
 	pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
 	pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
-			  PM_CTRL_LCKDET_TIMER_SHIFT);
-
-	pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
-	pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-	pm_ctrl_data |= PM_CTRL_RBER_EN;
-	pm_ctrl_data |= PM_CTRL_SDES_EN;
+			PM_CTRL_LCKDET_TIMER_SHIFT);
+	pm_ctrl_data |= AT_LCKDET_TIMER	<< PM_CTRL_LCKDET_TIMER_SHIFT;
 
 
-	if (hw->nic_type == athr_l2c_b ||
-	    hw->nic_type == athr_l1d ||
-	    hw->nic_type == athr_l2c_b2) {
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+		hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
 		link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
 		link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
 		if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
 		if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
-			if (hw->nic_type == athr_l2c_b &&
-			    hw->revision_id == L2CB_V10)
+			if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10)
 				link_ctrl_data |= LINK_CTRL_EXT_SYNC;
 				link_ctrl_data |= LINK_CTRL_EXT_SYNC;
 		}
 		}
 
 
 		AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
 		AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
 
 
-		pm_ctrl_data |= PM_CTRL_PCIE_RECV;
-		pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT;
-		pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S;
+		pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER;
+		pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK <<
+			PM_CTRL_PM_REQ_TIMER_SHIFT);
+		pm_ctrl_data |= AT_ASPM_L1_TIMER <<
+			PM_CTRL_PM_REQ_TIMER_SHIFT;
 		pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
 		pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
 		pm_ctrl_data &= ~PM_CTRL_HOTRST;
 		pm_ctrl_data &= ~PM_CTRL_HOTRST;
 		pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
 		pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
 		pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
 		pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
 	}
 	}
-
+	pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
 	if (linkup) {
 	if (linkup) {
 		pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
 		pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
 		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
 		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
@@ -1297,27 +1360,26 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
 		if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
 		if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
 			pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
 			pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
 
 
-		if (hw->nic_type == athr_l2c_b ||
-		    hw->nic_type == athr_l1d ||
-		    hw->nic_type == athr_l2c_b2) {
+		if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+			hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
 			if (hw->nic_type == athr_l2c_b)
 			if (hw->nic_type == athr_l2c_b)
 				if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
 				if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
-					pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN;
+					pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
 			pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
 			pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
 			pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
 			pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
 			pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
 			pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
 			pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
 			pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
-			if (hw->adapter->link_speed == SPEED_100 ||
-			    hw->adapter->link_speed == SPEED_1000) {
-				pm_ctrl_data &=
-					~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
-					  PM_CTRL_L1_ENTRY_TIMER_SHIFT);
-				if (hw->nic_type == athr_l1d)
-					pm_ctrl_data |= 0xF <<
-						PM_CTRL_L1_ENTRY_TIMER_SHIFT;
-				else
-					pm_ctrl_data |= 7 <<
-						PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+		if (hw->adapter->link_speed == SPEED_100 ||
+				hw->adapter->link_speed == SPEED_1000) {
+				pm_ctrl_data &=  ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
+					PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+				if (hw->nic_type == athr_l2c_b)
+					link_l1_timer = 7;
+				else if (hw->nic_type == athr_l2c_b2 ||
+					hw->nic_type == athr_l1d_2)
+					link_l1_timer = 4;
+				pm_ctrl_data |= link_l1_timer <<
+					PM_CTRL_L1_ENTRY_TIMER_SHIFT;
 			}
 			}
 		} else {
 		} else {
 			pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
 			pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
@@ -1326,24 +1388,12 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
 			pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
 			pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
 			pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
 			pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
 			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
 			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-		}
-		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
-		if (hw->adapter->link_speed == SPEED_10)
-			if (hw->nic_type == athr_l1d)
-				atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D);
-			else
-				atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
-		else if (hw->adapter->link_speed == SPEED_100)
-			atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD);
-		else
-			atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD);
 
 
+		}
 	} else {
 	} else {
-		pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
 		pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
 		pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
 		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
 		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
 		pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
 		pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
-
 		pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
 		pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
 
 
 		if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
 		if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
@@ -1351,8 +1401,9 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
 		else
 		else
 			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
 			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
 	}
 	}
-
 	AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
 	AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
+
+	return;
 }
 }
 
 
 static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
 static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
@@ -1391,7 +1442,8 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
 		mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
 		mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
 
 
 	mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
 	mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
-	if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) {
+	if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
+	    hw->nic_type == athr_l1d_2) {
 		mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
 		mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
 		mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
 		mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
 	}
 	}
@@ -1409,6 +1461,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
 	struct atl1c_hw *hw = &adapter->hw;
 	struct atl1c_hw *hw = &adapter->hw;
 	u32 master_ctrl_data = 0;
 	u32 master_ctrl_data = 0;
 	u32 intr_modrt_data;
 	u32 intr_modrt_data;
+	u32 data;
 
 
 	/* clear interrupt status */
 	/* clear interrupt status */
 	AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
 	AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
@@ -1418,6 +1471,15 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
 	 * HW will enable self to assert interrupt event to system after
 	 * HW will enable self to assert interrupt event to system after
 	 * waiting x-time for software to notify it accept interrupt.
 	 * waiting x-time for software to notify it accept interrupt.
 	 */
 	 */
+
+	data = CLK_GATING_EN_ALL;
+	if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
+		if (hw->nic_type == athr_l2c_b)
+			data &= ~CLK_GATING_RXMAC_EN;
+	} else
+		data = 0;
+	AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
+
 	AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
 	AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
 		hw->ict & INT_RETRIG_TIMER_MASK);
 		hw->ict & INT_RETRIG_TIMER_MASK);
 
 
@@ -1436,6 +1498,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
 	if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
 	if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
 		master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
 		master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
 
 
+	master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
 	AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
 	AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
 
 
 	if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
 	if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
@@ -1624,11 +1687,9 @@ static irqreturn_t atl1c_intr(int irq, void *data)
 					"atl1c hardware error (status = 0x%x)\n",
 					"atl1c hardware error (status = 0x%x)\n",
 					status & ISR_ERROR);
 					status & ISR_ERROR);
 			/* reset MAC */
 			/* reset MAC */
-			hw->intr_mask &= ~ISR_ERROR;
-			AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
 			adapter->work_event |= ATL1C_WORK_EVENT_RESET;
 			adapter->work_event |= ATL1C_WORK_EVENT_RESET;
 			schedule_work(&adapter->common_task);
 			schedule_work(&adapter->common_task);
-			break;
+			return IRQ_HANDLED;
 		}
 		}
 
 
 		if (status & ISR_OVER)
 		if (status & ISR_OVER)
@@ -2303,7 +2364,6 @@ void atl1c_down(struct atl1c_adapter *adapter)
 	napi_disable(&adapter->napi);
 	napi_disable(&adapter->napi);
 	atl1c_irq_disable(adapter);
 	atl1c_irq_disable(adapter);
 	atl1c_free_irq(adapter);
 	atl1c_free_irq(adapter);
-	AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
 	/* reset MAC to disable all RX/TX */
 	/* reset MAC to disable all RX/TX */
 	atl1c_reset_mac(&adapter->hw);
 	atl1c_reset_mac(&adapter->hw);
 	msleep(1);
 	msleep(1);
@@ -2387,79 +2447,68 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 	struct atl1c_hw *hw = &adapter->hw;
 	struct atl1c_hw *hw = &adapter->hw;
-	u32 ctrl;
-	u32 mac_ctrl_data;
-	u32 master_ctrl_data;
+	u32 mac_ctrl_data = 0;
+	u32 master_ctrl_data = 0;
 	u32 wol_ctrl_data = 0;
 	u32 wol_ctrl_data = 0;
-	u16 mii_bmsr_data;
-	u16 save_autoneg_advertised;
-	u16 mii_intr_status_data;
+	u16 mii_intr_status_data = 0;
 	u32 wufc = adapter->wol;
 	u32 wufc = adapter->wol;
-	u32 i;
 	int retval = 0;
 	int retval = 0;
 
 
+	atl1c_disable_l0s_l1(hw);
 	if (netif_running(netdev)) {
 	if (netif_running(netdev)) {
 		WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
 		WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
 		atl1c_down(adapter);
 		atl1c_down(adapter);
 	}
 	}
 	netif_device_detach(netdev);
 	netif_device_detach(netdev);
-	atl1c_disable_l0s_l1(hw);
 	retval = pci_save_state(pdev);
 	retval = pci_save_state(pdev);
 	if (retval)
 	if (retval)
 		return retval;
 		return retval;
+
+	if (wufc)
+		if (atl1c_phy_power_saving(hw) != 0)
+			dev_dbg(&pdev->dev, "phy power saving failed");
+
+	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+	AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
+
+	master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
+	mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
+	mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
+			MAC_CTRL_PRMLEN_MASK) <<
+			MAC_CTRL_PRMLEN_SHIFT);
+	mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
+	mac_ctrl_data &= ~MAC_CTRL_DUPLX;
+
 	if (wufc) {
 	if (wufc) {
-		AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
-		master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
-
-		/* get link status */
-		atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
-		atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
-		save_autoneg_advertised = hw->autoneg_advertised;
-		hw->autoneg_advertised = ADVERTISED_10baseT_Half;
-		if (atl1c_restart_autoneg(hw) != 0)
-			if (netif_msg_link(adapter))
-				dev_warn(&pdev->dev, "phy autoneg failed\n");
-		hw->phy_configured = false; /* re-init PHY when resume */
-		hw->autoneg_advertised = save_autoneg_advertised;
+		mac_ctrl_data |= MAC_CTRL_RX_EN;
+		if (adapter->link_speed == SPEED_1000 ||
+			adapter->link_speed == SPEED_0) {
+			mac_ctrl_data |= atl1c_mac_speed_1000 <<
+					MAC_CTRL_SPEED_SHIFT;
+			mac_ctrl_data |= MAC_CTRL_DUPLX;
+		} else
+			mac_ctrl_data |= atl1c_mac_speed_10_100 <<
+					MAC_CTRL_SPEED_SHIFT;
+
+		if (adapter->link_duplex == DUPLEX_FULL)
+			mac_ctrl_data |= MAC_CTRL_DUPLX;
+
 		/* turn on magic packet wol */
 		/* turn on magic packet wol */
 		if (wufc & AT_WUFC_MAG)
 		if (wufc & AT_WUFC_MAG)
-			wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+			wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
 
 
 		if (wufc & AT_WUFC_LNKC) {
 		if (wufc & AT_WUFC_LNKC) {
-			for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
-				msleep(100);
-				atl1c_read_phy_reg(hw, MII_BMSR,
-					(u16 *)&mii_bmsr_data);
-				if (mii_bmsr_data & BMSR_LSTATUS)
-					break;
-			}
-			if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
-				if (netif_msg_link(adapter))
-					dev_warn(&pdev->dev,
-						"%s: Link may change"
-						"when suspend\n",
-						atl1c_driver_name);
 			wol_ctrl_data |=  WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
 			wol_ctrl_data |=  WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
 			/* only link up can wake up */
 			/* only link up can wake up */
 			if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
 			if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
-				if (netif_msg_link(adapter))
-					dev_err(&pdev->dev,
-						"%s: read write phy "
-						"register failed.\n",
-						atl1c_driver_name);
-				goto wol_dis;
+				dev_dbg(&pdev->dev, "%s: read write phy "
+						  "register failed.\n",
+						  atl1c_driver_name);
 			}
 			}
 		}
 		}
 		/* clear phy interrupt */
 		/* clear phy interrupt */
 		atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
 		atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
 		/* Config MAC Ctrl register */
 		/* Config MAC Ctrl register */
-		mac_ctrl_data = MAC_CTRL_RX_EN;
-		/* set to 10/100M halt duplex */
-		mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
-		mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
-				 MAC_CTRL_PRMLEN_MASK) <<
-				 MAC_CTRL_PRMLEN_SHIFT);
-
 		if (adapter->vlgrp)
 		if (adapter->vlgrp)
 			mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
 			mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
 
 
@@ -2467,37 +2516,30 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
 		if (wufc & AT_WUFC_MAG)
 		if (wufc & AT_WUFC_MAG)
 			mac_ctrl_data |= MAC_CTRL_BC_EN;
 			mac_ctrl_data |= MAC_CTRL_BC_EN;
 
 
-		if (netif_msg_hw(adapter))
-			dev_dbg(&pdev->dev,
-				"%s: suspend MAC=0x%x\n",
-				atl1c_driver_name, mac_ctrl_data);
+		dev_dbg(&pdev->dev,
+			"%s: suspend MAC=0x%x\n",
+			atl1c_driver_name, mac_ctrl_data);
 		AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
 		AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
 		AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
 		AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
 		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
 		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
 
 
 		/* pcie patch */
 		/* pcie patch */
-		AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
-		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
-		AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+		device_set_wakeup_enable(&pdev->dev, 1);
 
 
-		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-		goto suspend_exit;
+		AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
+			GPHY_CTRL_EXT_RESET);
+		pci_prepare_to_sleep(pdev);
+	} else {
+		AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
+		master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
+		mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
+		mac_ctrl_data |= MAC_CTRL_DUPLX;
+		AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
+		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+		AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+		hw->phy_configured = false; /* re-init PHY when resume */
+		pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
 	}
 	}
-wol_dis:
-
-	/* WOL disabled */
-	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
-
-	/* pcie patch */
-	AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
-	ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
-	AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
-
-	atl1c_phy_disable(hw);
-	hw->phy_configured = false; /* re-init PHY when resume */
-
-	pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-suspend_exit:
 
 
 	pci_disable_device(pdev);
 	pci_disable_device(pdev);
 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -2516,9 +2558,19 @@ static int atl1c_resume(struct pci_dev *pdev)
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
 
 	AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
 	AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
+	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
+			ATL1C_PCIE_PHY_RESET);
 
 
 	atl1c_phy_reset(&adapter->hw);
 	atl1c_phy_reset(&adapter->hw);
 	atl1c_reset_mac(&adapter->hw);
 	atl1c_reset_mac(&adapter->hw);
+	atl1c_phy_init(&adapter->hw);
+
+#if 0
+	AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
+	pm_data &= ~PM_CTRLSTAT_PME_EN;
+	AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
+#endif
+
 	netif_device_attach(netdev);
 	netif_device_attach(netdev);
 	if (netif_running(netdev))
 	if (netif_running(netdev))
 		atl1c_up(adapter);
 		atl1c_up(adapter);

+ 2 - 2
drivers/net/atlx/atl1.h

@@ -436,8 +436,8 @@ struct rx_free_desc {
 	__le16 buf_len;		/* Size of the receive buffer in host memory */
 	__le16 buf_len;		/* Size of the receive buffer in host memory */
 	u16 coalese;		/* Update consumer index to host after the
 	u16 coalese;		/* Update consumer index to host after the
 				 * reception of this frame */
 				 * reception of this frame */
-	/* __attribute__ ((packed)) is required */
-} __attribute__ ((packed));
+	/* __packed is required */
+} __packed;
 
 
 /*
 /*
  * The L1 transmit packet descriptor is comprised of four 32-bit words.
  * The L1 transmit packet descriptor is comprised of four 32-bit words.

+ 70 - 74
drivers/net/b44.c

@@ -150,9 +150,8 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
 						unsigned long offset,
 						unsigned long offset,
 						enum dma_data_direction dir)
 						enum dma_data_direction dir)
 {
 {
-	ssb_dma_sync_single_range_for_device(sdev, dma_base,
-					     offset & dma_desc_align_mask,
-					     dma_desc_sync_size, dir);
+	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
+				   dma_desc_sync_size, dir);
 }
 }
 
 
 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
@@ -160,9 +159,8 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
 					     unsigned long offset,
 					     unsigned long offset,
 					     enum dma_data_direction dir)
 					     enum dma_data_direction dir)
 {
 {
-	ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
-					  offset & dma_desc_align_mask,
-					  dma_desc_sync_size, dir);
+	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
+				dma_desc_sync_size, dir);
 }
 }
 
 
 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
@@ -608,10 +606,10 @@ static void b44_tx(struct b44 *bp)
 
 
 		BUG_ON(skb == NULL);
 		BUG_ON(skb == NULL);
 
 
-		ssb_dma_unmap_single(bp->sdev,
-				     rp->mapping,
-				     skb->len,
-				     DMA_TO_DEVICE);
+		dma_unmap_single(bp->sdev->dma_dev,
+				 rp->mapping,
+				 skb->len,
+				 DMA_TO_DEVICE);
 		rp->skb = NULL;
 		rp->skb = NULL;
 		dev_kfree_skb_irq(skb);
 		dev_kfree_skb_irq(skb);
 	}
 	}
@@ -648,29 +646,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 	if (skb == NULL)
 	if (skb == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	mapping = ssb_dma_map_single(bp->sdev, skb->data,
-				     RX_PKT_BUF_SZ,
-				     DMA_FROM_DEVICE);
+	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+				 RX_PKT_BUF_SZ,
+				 DMA_FROM_DEVICE);
 
 
 	/* Hardware bug work-around, the chip is unable to do PCI DMA
 	/* Hardware bug work-around, the chip is unable to do PCI DMA
 	   to/from anything above 1GB :-( */
 	   to/from anything above 1GB :-( */
-	if (ssb_dma_mapping_error(bp->sdev, mapping) ||
+	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
 		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
 		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
 		/* Sigh... */
 		/* Sigh... */
-		if (!ssb_dma_mapping_error(bp->sdev, mapping))
-			ssb_dma_unmap_single(bp->sdev, mapping,
+		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+			dma_unmap_single(bp->sdev->dma_dev, mapping,
 					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
 					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
 		dev_kfree_skb_any(skb);
 		dev_kfree_skb_any(skb);
 		skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
 		skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
 		if (skb == NULL)
 		if (skb == NULL)
 			return -ENOMEM;
 			return -ENOMEM;
-		mapping = ssb_dma_map_single(bp->sdev, skb->data,
-					     RX_PKT_BUF_SZ,
-					     DMA_FROM_DEVICE);
-		if (ssb_dma_mapping_error(bp->sdev, mapping) ||
-			mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
-			if (!ssb_dma_mapping_error(bp->sdev, mapping))
-				ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+					 RX_PKT_BUF_SZ,
+					 DMA_FROM_DEVICE);
+		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
 			dev_kfree_skb_any(skb);
 			dev_kfree_skb_any(skb);
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
@@ -745,9 +743,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 					     dest_idx * sizeof(*dest_desc),
 					     dest_idx * sizeof(*dest_desc),
 					     DMA_BIDIRECTIONAL);
 					     DMA_BIDIRECTIONAL);
 
 
-	ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
-				       RX_PKT_BUF_SZ,
-				       DMA_FROM_DEVICE);
+	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
+				   RX_PKT_BUF_SZ,
+				   DMA_FROM_DEVICE);
 }
 }
 
 
 static int b44_rx(struct b44 *bp, int budget)
 static int b44_rx(struct b44 *bp, int budget)
@@ -767,9 +765,9 @@ static int b44_rx(struct b44 *bp, int budget)
 		struct rx_header *rh;
 		struct rx_header *rh;
 		u16 len;
 		u16 len;
 
 
-		ssb_dma_sync_single_for_cpu(bp->sdev, map,
-					    RX_PKT_BUF_SZ,
-					    DMA_FROM_DEVICE);
+		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
+					RX_PKT_BUF_SZ,
+					DMA_FROM_DEVICE);
 		rh = (struct rx_header *) skb->data;
 		rh = (struct rx_header *) skb->data;
 		len = le16_to_cpu(rh->len);
 		len = le16_to_cpu(rh->len);
 		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
 		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
@@ -801,8 +799,8 @@ static int b44_rx(struct b44 *bp, int budget)
 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
 			if (skb_size < 0)
 			if (skb_size < 0)
 				goto drop_it;
 				goto drop_it;
-			ssb_dma_unmap_single(bp->sdev, map,
-					     skb_size, DMA_FROM_DEVICE);
+			dma_unmap_single(bp->sdev->dma_dev, map,
+					 skb_size, DMA_FROM_DEVICE);
 			/* Leave out rx_header */
 			/* Leave out rx_header */
 			skb_put(skb, len + RX_PKT_OFFSET);
 			skb_put(skb, len + RX_PKT_OFFSET);
 			skb_pull(skb, RX_PKT_OFFSET);
 			skb_pull(skb, RX_PKT_OFFSET);
@@ -954,24 +952,24 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		goto err_out;
 		goto err_out;
 	}
 	}
 
 
-	mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
-	if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
 		struct sk_buff *bounce_skb;
 		struct sk_buff *bounce_skb;
 
 
 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
-		if (!ssb_dma_mapping_error(bp->sdev, mapping))
-			ssb_dma_unmap_single(bp->sdev, mapping, len,
+		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
 					     DMA_TO_DEVICE);
 					     DMA_TO_DEVICE);
 
 
 		bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
 		bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
 		if (!bounce_skb)
 		if (!bounce_skb)
 			goto err_out;
 			goto err_out;
 
 
-		mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
-					     len, DMA_TO_DEVICE);
-		if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
-			if (!ssb_dma_mapping_error(bp->sdev, mapping))
-				ssb_dma_unmap_single(bp->sdev, mapping,
+		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
+					 len, DMA_TO_DEVICE);
+		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+				dma_unmap_single(bp->sdev->dma_dev, mapping,
 						     len, DMA_TO_DEVICE);
 						     len, DMA_TO_DEVICE);
 			dev_kfree_skb_any(bounce_skb);
 			dev_kfree_skb_any(bounce_skb);
 			goto err_out;
 			goto err_out;
@@ -1068,8 +1066,8 @@ static void b44_free_rings(struct b44 *bp)
 
 
 		if (rp->skb == NULL)
 		if (rp->skb == NULL)
 			continue;
 			continue;
-		ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
-				     DMA_FROM_DEVICE);
+		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
+				 DMA_FROM_DEVICE);
 		dev_kfree_skb_any(rp->skb);
 		dev_kfree_skb_any(rp->skb);
 		rp->skb = NULL;
 		rp->skb = NULL;
 	}
 	}
@@ -1080,8 +1078,8 @@ static void b44_free_rings(struct b44 *bp)
 
 
 		if (rp->skb == NULL)
 		if (rp->skb == NULL)
 			continue;
 			continue;
-		ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
-				     DMA_TO_DEVICE);
+		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
+				 DMA_TO_DEVICE);
 		dev_kfree_skb_any(rp->skb);
 		dev_kfree_skb_any(rp->skb);
 		rp->skb = NULL;
 		rp->skb = NULL;
 	}
 	}
@@ -1103,14 +1101,12 @@ static void b44_init_rings(struct b44 *bp)
 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
 
 
 	if (bp->flags & B44_FLAG_RX_RING_HACK)
 	if (bp->flags & B44_FLAG_RX_RING_HACK)
-		ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
-					       DMA_TABLE_BYTES,
-					       DMA_BIDIRECTIONAL);
+		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
+					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
 
 
 	if (bp->flags & B44_FLAG_TX_RING_HACK)
 	if (bp->flags & B44_FLAG_TX_RING_HACK)
-		ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
-					       DMA_TABLE_BYTES,
-					       DMA_TO_DEVICE);
+		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
+					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
 
 
 	for (i = 0; i < bp->rx_pending; i++) {
 	for (i = 0; i < bp->rx_pending; i++) {
 		if (b44_alloc_rx_skb(bp, -1, i) < 0)
 		if (b44_alloc_rx_skb(bp, -1, i) < 0)
@@ -1130,27 +1126,23 @@ static void b44_free_consistent(struct b44 *bp)
 	bp->tx_buffers = NULL;
 	bp->tx_buffers = NULL;
 	if (bp->rx_ring) {
 	if (bp->rx_ring) {
 		if (bp->flags & B44_FLAG_RX_RING_HACK) {
 		if (bp->flags & B44_FLAG_RX_RING_HACK) {
-			ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
-					     DMA_TABLE_BYTES,
-					     DMA_BIDIRECTIONAL);
+			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
+					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
 			kfree(bp->rx_ring);
 			kfree(bp->rx_ring);
 		} else
 		} else
-			ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
-						bp->rx_ring, bp->rx_ring_dma,
-						GFP_KERNEL);
+			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+					  bp->rx_ring, bp->rx_ring_dma);
 		bp->rx_ring = NULL;
 		bp->rx_ring = NULL;
 		bp->flags &= ~B44_FLAG_RX_RING_HACK;
 		bp->flags &= ~B44_FLAG_RX_RING_HACK;
 	}
 	}
 	if (bp->tx_ring) {
 	if (bp->tx_ring) {
 		if (bp->flags & B44_FLAG_TX_RING_HACK) {
 		if (bp->flags & B44_FLAG_TX_RING_HACK) {
-			ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
-					     DMA_TABLE_BYTES,
-					     DMA_TO_DEVICE);
+			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
+					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
 			kfree(bp->tx_ring);
 			kfree(bp->tx_ring);
 		} else
 		} else
-			ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
-						bp->tx_ring, bp->tx_ring_dma,
-						GFP_KERNEL);
+			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+					  bp->tx_ring, bp->tx_ring_dma);
 		bp->tx_ring = NULL;
 		bp->tx_ring = NULL;
 		bp->flags &= ~B44_FLAG_TX_RING_HACK;
 		bp->flags &= ~B44_FLAG_TX_RING_HACK;
 	}
 	}
@@ -1175,7 +1167,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
 		goto out_err;
 		goto out_err;
 
 
 	size = DMA_TABLE_BYTES;
 	size = DMA_TABLE_BYTES;
-	bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
+	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+					 &bp->rx_ring_dma, gfp);
 	if (!bp->rx_ring) {
 	if (!bp->rx_ring) {
 		/* Allocation may have failed due to pci_alloc_consistent
 		/* Allocation may have failed due to pci_alloc_consistent
 		   insisting on use of GFP_DMA, which is more restrictive
 		   insisting on use of GFP_DMA, which is more restrictive
@@ -1187,11 +1180,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
 		if (!rx_ring)
 		if (!rx_ring)
 			goto out_err;
 			goto out_err;
 
 
-		rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
-						 DMA_TABLE_BYTES,
-						 DMA_BIDIRECTIONAL);
+		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
+					     DMA_TABLE_BYTES,
+					     DMA_BIDIRECTIONAL);
 
 
-		if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
+		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
 			rx_ring_dma + size > DMA_BIT_MASK(30)) {
 			rx_ring_dma + size > DMA_BIT_MASK(30)) {
 			kfree(rx_ring);
 			kfree(rx_ring);
 			goto out_err;
 			goto out_err;
@@ -1202,7 +1195,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
 		bp->flags |= B44_FLAG_RX_RING_HACK;
 		bp->flags |= B44_FLAG_RX_RING_HACK;
 	}
 	}
 
 
-	bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
+	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+					 &bp->tx_ring_dma, gfp);
 	if (!bp->tx_ring) {
 	if (!bp->tx_ring) {
 		/* Allocation may have failed due to ssb_dma_alloc_consistent
 		/* Allocation may have failed due to ssb_dma_alloc_consistent
 		   insisting on use of GFP_DMA, which is more restrictive
 		   insisting on use of GFP_DMA, which is more restrictive
@@ -1214,11 +1208,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
 		if (!tx_ring)
 		if (!tx_ring)
 			goto out_err;
 			goto out_err;
 
 
-		tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
-			                    DMA_TABLE_BYTES,
-			                    DMA_TO_DEVICE);
+		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
+					     DMA_TABLE_BYTES,
+					     DMA_TO_DEVICE);
 
 
-		if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
+		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
 			tx_ring_dma + size > DMA_BIT_MASK(30)) {
 			tx_ring_dma + size > DMA_BIT_MASK(30)) {
 			kfree(tx_ring);
 			kfree(tx_ring);
 			goto out_err;
 			goto out_err;
@@ -2176,12 +2170,14 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
 			"Failed to powerup the bus\n");
 			"Failed to powerup the bus\n");
 		goto err_out_free_dev;
 		goto err_out_free_dev;
 	}
 	}
-	err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
-	if (err) {
+
+	if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
+	    dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
 		dev_err(sdev->dev,
 		dev_err(sdev->dev,
 			"Required 30BIT DMA mask unsupported by the system\n");
 			"Required 30BIT DMA mask unsupported by the system\n");
 		goto err_out_powerdown;
 		goto err_out_powerdown;
 	}
 	}
+
 	err = b44_get_invariants(bp);
 	err = b44_get_invariants(bp);
 	if (err) {
 	if (err) {
 		dev_err(sdev->dev,
 		dev_err(sdev->dev,

+ 6 - 7
drivers/net/benet/be_cmds.c

@@ -186,7 +186,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
 
 
 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 {
 {
-	int cnt = 0, wait = 5;
+	int msecs = 0;
 	u32 ready;
 	u32 ready;
 
 
 	do {
 	do {
@@ -201,15 +201,14 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 		if (ready)
 		if (ready)
 			break;
 			break;
 
 
-		if (cnt > 4000000) {
+		if (msecs > 4000) {
 			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
 			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
 			return -1;
 			return -1;
 		}
 		}
 
 
-		if (cnt > 50)
-			wait = 200;
-		cnt += wait;
-		udelay(wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(msecs_to_jiffies(1));
+		msecs++;
 	} while (true);
 	} while (true);
 
 
 	return 0;
 	return 0;
@@ -1593,7 +1592,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
 
 
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
 			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
 			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
-	req->hdr.timeout = 4;
+	req->hdr.timeout = cpu_to_le32(4);
 
 
 	req->pattern = cpu_to_le64(pattern);
 	req->pattern = cpu_to_le64(pattern);
 	req->src_port = cpu_to_le32(port_num);
 	req->src_port = cpu_to_le32(port_num);

+ 49 - 46
drivers/net/benet/be_main.c

@@ -1735,6 +1735,44 @@ done:
 	adapter->isr_registered = false;
 	adapter->isr_registered = false;
 }
 }
 
 
+static int be_close(struct net_device *netdev)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+	struct be_eq_obj *rx_eq = &adapter->rx_eq;
+	struct be_eq_obj *tx_eq = &adapter->tx_eq;
+	int vec;
+
+	cancel_delayed_work_sync(&adapter->work);
+
+	be_async_mcc_disable(adapter);
+
+	netif_stop_queue(netdev);
+	netif_carrier_off(netdev);
+	adapter->link_up = false;
+
+	be_intr_set(adapter, false);
+
+	if (adapter->msix_enabled) {
+		vec = be_msix_vec_get(adapter, tx_eq->q.id);
+		synchronize_irq(vec);
+		vec = be_msix_vec_get(adapter, rx_eq->q.id);
+		synchronize_irq(vec);
+	} else {
+		synchronize_irq(netdev->irq);
+	}
+	be_irq_unregister(adapter);
+
+	napi_disable(&rx_eq->napi);
+	napi_disable(&tx_eq->napi);
+
+	/* Wait for all pending tx completions to arrive so that
+	 * all tx skbs are freed.
+	 */
+	be_tx_compl_clean(adapter);
+
+	return 0;
+}
+
 static int be_open(struct net_device *netdev)
 static int be_open(struct net_device *netdev)
 {
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 	struct be_adapter *adapter = netdev_priv(netdev);
@@ -1765,27 +1803,29 @@ static int be_open(struct net_device *netdev)
 	/* Now that interrupts are on we can process async mcc */
 	/* Now that interrupts are on we can process async mcc */
 	be_async_mcc_enable(adapter);
 	be_async_mcc_enable(adapter);
 
 
+	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+
 	status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
 	status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
 			&link_speed);
 			&link_speed);
 	if (status)
 	if (status)
-		goto ret_sts;
+		goto err;
 	be_link_status_update(adapter, link_up);
 	be_link_status_update(adapter, link_up);
 
 
-	if (be_physfn(adapter))
+	if (be_physfn(adapter)) {
 		status = be_vid_config(adapter);
 		status = be_vid_config(adapter);
-	if (status)
-		goto ret_sts;
+		if (status)
+			goto err;
 
 
-	if (be_physfn(adapter)) {
 		status = be_cmd_set_flow_control(adapter,
 		status = be_cmd_set_flow_control(adapter,
 				adapter->tx_fc, adapter->rx_fc);
 				adapter->tx_fc, adapter->rx_fc);
 		if (status)
 		if (status)
-			goto ret_sts;
+			goto err;
 	}
 	}
 
 
-	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-ret_sts:
-	return status;
+	return 0;
+err:
+	be_close(adapter->netdev);
+	return -EIO;
 }
 }
 
 
 static int be_setup_wol(struct be_adapter *adapter, bool enable)
 static int be_setup_wol(struct be_adapter *adapter, bool enable)
@@ -1913,43 +1953,6 @@ static int be_clear(struct be_adapter *adapter)
 	return 0;
 	return 0;
 }
 }
 
 
-static int be_close(struct net_device *netdev)
-{
-	struct be_adapter *adapter = netdev_priv(netdev);
-	struct be_eq_obj *rx_eq = &adapter->rx_eq;
-	struct be_eq_obj *tx_eq = &adapter->tx_eq;
-	int vec;
-
-	cancel_delayed_work_sync(&adapter->work);
-
-	be_async_mcc_disable(adapter);
-
-	netif_stop_queue(netdev);
-	netif_carrier_off(netdev);
-	adapter->link_up = false;
-
-	be_intr_set(adapter, false);
-
-	if (adapter->msix_enabled) {
-		vec = be_msix_vec_get(adapter, tx_eq->q.id);
-		synchronize_irq(vec);
-		vec = be_msix_vec_get(adapter, rx_eq->q.id);
-		synchronize_irq(vec);
-	} else {
-		synchronize_irq(netdev->irq);
-	}
-	be_irq_unregister(adapter);
-
-	napi_disable(&rx_eq->napi);
-	napi_disable(&tx_eq->napi);
-
-	/* Wait for all pending tx completions to arrive so that
-	 * all tx skbs are freed.
-	 */
-	be_tx_compl_clean(adapter);
-
-	return 0;
-}
 
 
 #define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
 #define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
 char flash_cookie[2][16] =	{"*** SE FLAS",
 char flash_cookie[2][16] =	{"*** SE FLAS",

+ 41 - 16
drivers/net/bnx2.c

@@ -247,6 +247,7 @@ static const struct flash_spec flash_5709 = {
 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 
 
 static void bnx2_init_napi(struct bnx2 *bp);
 static void bnx2_init_napi(struct bnx2 *bp);
+static void bnx2_del_napi(struct bnx2 *bp);
 
 
 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 {
 {
@@ -1445,7 +1446,8 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
 static void
 static void
 bnx2_enable_forced_2g5(struct bnx2 *bp)
 bnx2_enable_forced_2g5(struct bnx2 *bp)
 {
 {
-	u32 bmcr;
+	u32 uninitialized_var(bmcr);
+	int err;
 
 
 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
 		return;
 		return;
@@ -1455,22 +1457,28 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
 
 
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
-		bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
-		val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
-		val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
-		bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
+			val |= MII_BNX2_SD_MISC1_FORCE |
+				MII_BNX2_SD_MISC1_FORCE_2_5G;
+			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		}
 
 
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
-		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 
 
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
-		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
-		bmcr |= BCM5708S_BMCR_FORCE_2500;
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (!err)
+			bmcr |= BCM5708S_BMCR_FORCE_2500;
 	} else {
 	} else {
 		return;
 		return;
 	}
 	}
 
 
+	if (err)
+		return;
+
 	if (bp->autoneg & AUTONEG_SPEED) {
 	if (bp->autoneg & AUTONEG_SPEED) {
 		bmcr &= ~BMCR_ANENABLE;
 		bmcr &= ~BMCR_ANENABLE;
 		if (bp->req_duplex == DUPLEX_FULL)
 		if (bp->req_duplex == DUPLEX_FULL)
@@ -1482,7 +1490,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
 static void
 static void
 bnx2_disable_forced_2g5(struct bnx2 *bp)
 bnx2_disable_forced_2g5(struct bnx2 *bp)
 {
 {
-	u32 bmcr;
+	u32 uninitialized_var(bmcr);
+	int err;
 
 
 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
 		return;
 		return;
@@ -1492,21 +1501,26 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
 
 
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
-		bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
-		val &= ~MII_BNX2_SD_MISC1_FORCE;
-		bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+			val &= ~MII_BNX2_SD_MISC1_FORCE;
+			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		}
 
 
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
-		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 
 
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
-		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
-		bmcr &= ~BCM5708S_BMCR_FORCE_2500;
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (!err)
+			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
 	} else {
 	} else {
 		return;
 		return;
 	}
 	}
 
 
+	if (err)
+		return;
+
 	if (bp->autoneg & AUTONEG_SPEED)
 	if (bp->autoneg & AUTONEG_SPEED)
 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
@@ -6270,6 +6284,7 @@ open_err:
 	bnx2_free_skbs(bp);
 	bnx2_free_skbs(bp);
 	bnx2_free_irq(bp);
 	bnx2_free_irq(bp);
 	bnx2_free_mem(bp);
 	bnx2_free_mem(bp);
+	bnx2_del_napi(bp);
 	return rc;
 	return rc;
 }
 }
 
 
@@ -6537,6 +6552,7 @@ bnx2_close(struct net_device *dev)
 	bnx2_free_irq(bp);
 	bnx2_free_irq(bp);
 	bnx2_free_skbs(bp);
 	bnx2_free_skbs(bp);
 	bnx2_free_mem(bp);
 	bnx2_free_mem(bp);
+	bnx2_del_napi(bp);
 	bp->link_up = 0;
 	bp->link_up = 0;
 	netif_carrier_off(bp->dev);
 	netif_carrier_off(bp->dev);
 	bnx2_set_power_state(bp, PCI_D3hot);
 	bnx2_set_power_state(bp, PCI_D3hot);
@@ -8227,7 +8243,16 @@ bnx2_bus_string(struct bnx2 *bp, char *str)
 	return str;
 	return str;
 }
 }
 
 
-static void __devinit
+static void
+bnx2_del_napi(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++)
+		netif_napi_del(&bp->bnx2_napi[i].napi);
+}
+
+static void
 bnx2_init_napi(struct bnx2 *bp)
 bnx2_init_napi(struct bnx2 *bp)
 {
 {
 	int i;
 	int i;

+ 5 - 3
drivers/net/bnx2x_link.c

@@ -4266,14 +4266,16 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
 					       MDIO_PMA_REG_10G_CTRL2, 0x0008);
 					       MDIO_PMA_REG_10G_CTRL2, 0x0008);
 			}
 			}
 
 
-			/* Set 2-wire transfer rate to 400Khz since 100Khz
-			is not operational */
+			/* Set 2-wire transfer rate of SFP+ module EEPROM
+			 * to 100Khz since some DACs(direct attached cables) do
+			 * not work at 400Khz.
+			 */
 			bnx2x_cl45_write(bp, params->port,
 			bnx2x_cl45_write(bp, params->port,
 				       ext_phy_type,
 				       ext_phy_type,
 				       ext_phy_addr,
 				       ext_phy_addr,
 				       MDIO_PMA_DEVAD,
 				       MDIO_PMA_DEVAD,
 				       MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
 				       MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
-				       0xa101);
+				       0xa001);
 
 
 			/* Set TX PreEmphasis if needed */
 			/* Set TX PreEmphasis if needed */
 			if ((params->feature_config_flags &
 			if ((params->feature_config_flags &

+ 13 - 20
drivers/net/bonding/bond_alb.c

@@ -233,34 +233,27 @@ static void tlb_deinitialize(struct bonding *bond)
 	_unlock_tx_hashtbl(bond);
 	_unlock_tx_hashtbl(bond);
 }
 }
 
 
+static long long compute_gap(struct slave *slave)
+{
+	return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
+	       (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
+}
+
 /* Caller must hold bond lock for read */
 /* Caller must hold bond lock for read */
 static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 {
 {
 	struct slave *slave, *least_loaded;
 	struct slave *slave, *least_loaded;
-	s64 max_gap;
-	int i, found = 0;
-
-	/* Find the first enabled slave */
-	bond_for_each_slave(bond, slave, i) {
-		if (SLAVE_IS_OK(slave)) {
-			found = 1;
-			break;
-		}
-	}
-
-	if (!found) {
-		return NULL;
-	}
+	long long max_gap;
+	int i;
 
 
-	least_loaded = slave;
-	max_gap = (s64)(slave->speed << 20) - /* Convert to Megabit per sec */
-			(s64)(SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
+	least_loaded = NULL;
+	max_gap = LLONG_MIN;
 
 
 	/* Find the slave with the largest gap */
 	/* Find the slave with the largest gap */
-	bond_for_each_slave_from(bond, slave, i, least_loaded) {
+	bond_for_each_slave(bond, slave, i) {
 		if (SLAVE_IS_OK(slave)) {
 		if (SLAVE_IS_OK(slave)) {
-			s64 gap = (s64)(slave->speed << 20) -
-					(s64)(SLAVE_TLB_INFO(slave).load << 3);
+			long long gap = compute_gap(slave);
+
 			if (max_gap < gap) {
 			if (max_gap < gap) {
 				least_loaded = slave;
 				least_loaded = slave;
 				max_gap = gap;
 				max_gap = gap;

+ 114 - 16
drivers/net/bonding/bond_main.c

@@ -90,6 +90,7 @@
 #define BOND_LINK_ARP_INTERV	0
 #define BOND_LINK_ARP_INTERV	0
 
 
 static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
 static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
+static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
 static int num_grat_arp = 1;
 static int num_grat_arp = 1;
 static int num_unsol_na = 1;
 static int num_unsol_na = 1;
 static int miimon	= BOND_LINK_MON_INTERV;
 static int miimon	= BOND_LINK_MON_INTERV;
@@ -106,10 +107,13 @@ static int arp_interval = BOND_LINK_ARP_INTERV;
 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
 static char *arp_validate;
 static char *arp_validate;
 static char *fail_over_mac;
 static char *fail_over_mac;
+static int all_slaves_active = 0;
 static struct bond_params bonding_defaults;
 static struct bond_params bonding_defaults;
 
 
 module_param(max_bonds, int, 0);
 module_param(max_bonds, int, 0);
 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
+module_param(tx_queues, int, 0);
+MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
 module_param(num_grat_arp, int, 0644);
 module_param(num_grat_arp, int, 0644);
 MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
 MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
 module_param(num_unsol_na, int, 0644);
 module_param(num_unsol_na, int, 0644);
@@ -155,6 +159,10 @@ module_param(arp_validate, charp, 0);
 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
 module_param(fail_over_mac, charp, 0);
 module_param(fail_over_mac, charp, 0);
 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC.  none (default), active or follow");
 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC.  none (default), active or follow");
+module_param(all_slaves_active, int, 0);
+MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
+				     "by setting active flag for all slaves.  "
+				     "0 for never (default), 1 for always.");
 
 
 /*----------------------------- Global variables ----------------------------*/
 /*----------------------------- Global variables ----------------------------*/
 
 
@@ -1522,16 +1530,32 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 		}
 		}
 	}
 	}
 
 
+	/* If this is the first slave, then we need to set the master's hardware
+	 * address to be the same as the slave's. */
+	if (bond->slave_cnt == 0)
+		memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
+		       slave_dev->addr_len);
+
+
 	new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
 	new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
 	if (!new_slave) {
 	if (!new_slave) {
 		res = -ENOMEM;
 		res = -ENOMEM;
 		goto err_undo_flags;
 		goto err_undo_flags;
 	}
 	}
 
 
-	/* save slave's original flags before calling
-	 * netdev_set_master and dev_open
+	/*
+	 * Set the new_slave's queue_id to be zero.  Queue ID mapping
+	 * is set via sysfs or module option if desired.
 	 */
 	 */
-	new_slave->original_flags = slave_dev->flags;
+	new_slave->queue_id = 0;
+
+	/* Save slave's original mtu and then set it to match the bond */
+	new_slave->original_mtu = slave_dev->mtu;
+	res = dev_set_mtu(slave_dev, bond->dev->mtu);
+	if (res) {
+		pr_debug("Error %d calling dev_set_mtu\n", res);
+		goto err_free;
+	}
 
 
 	/*
 	/*
 	 * Save slave's original ("permanent") mac address for modes
 	 * Save slave's original ("permanent") mac address for modes
@@ -1550,7 +1574,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 		res = dev_set_mac_address(slave_dev, &addr);
 		res = dev_set_mac_address(slave_dev, &addr);
 		if (res) {
 		if (res) {
 			pr_debug("Error %d calling set_mac_address\n", res);
 			pr_debug("Error %d calling set_mac_address\n", res);
-			goto err_free;
+			goto err_restore_mtu;
 		}
 		}
 	}
 	}
 
 
@@ -1785,6 +1809,9 @@ err_restore_mac:
 		dev_set_mac_address(slave_dev, &addr);
 		dev_set_mac_address(slave_dev, &addr);
 	}
 	}
 
 
+err_restore_mtu:
+	dev_set_mtu(slave_dev, new_slave->original_mtu);
+
 err_free:
 err_free:
 	kfree(new_slave);
 	kfree(new_slave);
 
 
@@ -1969,6 +1996,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
 		dev_set_mac_address(slave_dev, &addr);
 		dev_set_mac_address(slave_dev, &addr);
 	}
 	}
 
 
+	dev_set_mtu(slave_dev, slave->original_mtu);
+
 	slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
 	slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
 				   IFF_SLAVE_INACTIVE | IFF_BONDING |
 				   IFF_SLAVE_INACTIVE | IFF_BONDING |
 				   IFF_SLAVE_NEEDARP);
 				   IFF_SLAVE_NEEDARP);
@@ -2555,7 +2584,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 		/*
 		/*
 		 * This target is not on a VLAN
 		 * This target is not on a VLAN
 		 */
 		 */
-		if (rt->u.dst.dev == bond->dev) {
+		if (rt->dst.dev == bond->dev) {
 			ip_rt_put(rt);
 			ip_rt_put(rt);
 			pr_debug("basa: rtdev == bond->dev: arp_send\n");
 			pr_debug("basa: rtdev == bond->dev: arp_send\n");
 			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
 			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
@@ -2566,7 +2595,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 		vlan_id = 0;
 		vlan_id = 0;
 		list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
 		list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
 			vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
 			vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
-			if (vlan_dev == rt->u.dst.dev) {
+			if (vlan_dev == rt->dst.dev) {
 				vlan_id = vlan->vlan_id;
 				vlan_id = vlan->vlan_id;
 				pr_debug("basa: vlan match on %s %d\n",
 				pr_debug("basa: vlan match on %s %d\n",
 				       vlan_dev->name, vlan_id);
 				       vlan_dev->name, vlan_id);
@@ -2584,7 +2613,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 		if (net_ratelimit()) {
 		if (net_ratelimit()) {
 			pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
 			pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
 				   bond->dev->name, &fl.fl4_dst,
 				   bond->dev->name, &fl.fl4_dst,
-				   rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
+				   rt->dst.dev ? rt->dst.dev->name : "NULL");
 		}
 		}
 		ip_rt_put(rt);
 		ip_rt_put(rt);
 	}
 	}
@@ -3265,6 +3294,7 @@ static void bond_info_show_slave(struct seq_file *seq,
 		else
 		else
 			seq_puts(seq, "Aggregator ID: N/A\n");
 			seq_puts(seq, "Aggregator ID: N/A\n");
 	}
 	}
+	seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
 }
 }
 
 
 static int bond_info_seq_show(struct seq_file *seq, void *v)
 static int bond_info_seq_show(struct seq_file *seq, void *v)
@@ -3774,20 +3804,21 @@ static int bond_close(struct net_device *bond_dev)
 	return 0;
 	return 0;
 }
 }
 
 
-static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
+static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev)
 {
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct bonding *bond = netdev_priv(bond_dev);
-	struct net_device_stats *stats = &bond_dev->stats;
-	struct net_device_stats local_stats;
+	struct rtnl_link_stats64 *stats = &bond_dev->stats64;
+	struct rtnl_link_stats64 local_stats;
 	struct slave *slave;
 	struct slave *slave;
 	int i;
 	int i;
 
 
-	memset(&local_stats, 0, sizeof(struct net_device_stats));
+	memset(&local_stats, 0, sizeof(local_stats));
 
 
 	read_lock_bh(&bond->lock);
 	read_lock_bh(&bond->lock);
 
 
 	bond_for_each_slave(bond, slave, i) {
 	bond_for_each_slave(bond, slave, i) {
-		const struct net_device_stats *sstats = dev_get_stats(slave->dev);
+		const struct rtnl_link_stats64 *sstats =
+			dev_get_stats(slave->dev);
 
 
 		local_stats.rx_packets += sstats->rx_packets;
 		local_stats.rx_packets += sstats->rx_packets;
 		local_stats.rx_bytes += sstats->rx_bytes;
 		local_stats.rx_bytes += sstats->rx_bytes;
@@ -4401,9 +4432,59 @@ static void bond_set_xmit_hash_policy(struct bonding *bond)
 	}
 	}
 }
 }
 
 
+/*
+ * Lookup the slave that corresponds to a qid
+ */
+static inline int bond_slave_override(struct bonding *bond,
+				      struct sk_buff *skb)
+{
+	int i, res = 1;
+	struct slave *slave = NULL;
+	struct slave *check_slave;
+
+	read_lock(&bond->lock);
+
+	if (!BOND_IS_OK(bond) || !skb->queue_mapping)
+		goto out;
+
+	/* Find out if any slaves have the same mapping as this skb. */
+	bond_for_each_slave(bond, check_slave, i) {
+		if (check_slave->queue_id == skb->queue_mapping) {
+			slave = check_slave;
+			break;
+		}
+	}
+
+	/* If the slave isn't UP, use default transmit policy. */
+	if (slave && slave->queue_id && IS_UP(slave->dev) &&
+	    (slave->link == BOND_LINK_UP)) {
+		res = bond_dev_queue_xmit(bond, skb, slave->dev);
+	}
+
+out:
+	read_unlock(&bond->lock);
+	return res;
+}
+
+static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	/*
+	 * This helper function exists to help dev_pick_tx get the correct
+	 * destination queue.  Using a helper function skips the a call to
+	 * skb_tx_hash and will put the skbs in the queue we expect on their
+	 * way down to the bonding driver.
+	 */
+	return skb->queue_mapping;
+}
+
 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 {
-	const struct bonding *bond = netdev_priv(dev);
+	struct bonding *bond = netdev_priv(dev);
+
+	if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
+		if (!bond_slave_override(bond, skb))
+			return NETDEV_TX_OK;
+	}
 
 
 	switch (bond->params.mode) {
 	switch (bond->params.mode) {
 	case BOND_MODE_ROUNDROBIN:
 	case BOND_MODE_ROUNDROBIN:
@@ -4488,7 +4569,8 @@ static const struct net_device_ops bond_netdev_ops = {
 	.ndo_open		= bond_open,
 	.ndo_open		= bond_open,
 	.ndo_stop		= bond_close,
 	.ndo_stop		= bond_close,
 	.ndo_start_xmit		= bond_start_xmit,
 	.ndo_start_xmit		= bond_start_xmit,
-	.ndo_get_stats		= bond_get_stats,
+	.ndo_select_queue	= bond_select_queue,
+	.ndo_get_stats64	= bond_get_stats,
 	.ndo_do_ioctl		= bond_do_ioctl,
 	.ndo_do_ioctl		= bond_do_ioctl,
 	.ndo_set_multicast_list	= bond_set_multicast_list,
 	.ndo_set_multicast_list	= bond_set_multicast_list,
 	.ndo_change_mtu		= bond_change_mtu,
 	.ndo_change_mtu		= bond_change_mtu,
@@ -4756,6 +4838,20 @@ static int bond_check_params(struct bond_params *params)
 		}
 		}
 	}
 	}
 
 
+	if (tx_queues < 1 || tx_queues > 255) {
+		pr_warning("Warning: tx_queues (%d) should be between "
+			   "1 and 255, resetting to %d\n",
+			   tx_queues, BOND_DEFAULT_TX_QUEUES);
+		tx_queues = BOND_DEFAULT_TX_QUEUES;
+	}
+
+	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
+		pr_warning("Warning: all_slaves_active module parameter (%d), "
+			   "not of valid value (0/1), so it was set to "
+			   "0\n", all_slaves_active);
+		all_slaves_active = 0;
+	}
+
 	/* reset values for TLB/ALB */
 	/* reset values for TLB/ALB */
 	if ((bond_mode == BOND_MODE_TLB) ||
 	if ((bond_mode == BOND_MODE_TLB) ||
 	    (bond_mode == BOND_MODE_ALB)) {
 	    (bond_mode == BOND_MODE_ALB)) {
@@ -4926,6 +5022,8 @@ static int bond_check_params(struct bond_params *params)
 	params->primary[0] = 0;
 	params->primary[0] = 0;
 	params->primary_reselect = primary_reselect_value;
 	params->primary_reselect = primary_reselect_value;
 	params->fail_over_mac = fail_over_mac_value;
 	params->fail_over_mac = fail_over_mac_value;
+	params->tx_queues = tx_queues;
+	params->all_slaves_active = all_slaves_active;
 
 
 	if (primary) {
 	if (primary) {
 		strncpy(params->primary, primary, IFNAMSIZ);
 		strncpy(params->primary, primary, IFNAMSIZ);
@@ -5012,8 +5110,8 @@ int bond_create(struct net *net, const char *name)
 
 
 	rtnl_lock();
 	rtnl_lock();
 
 
-	bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
-				bond_setup);
+	bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "",
+				bond_setup, tx_queues);
 	if (!bond_dev) {
 	if (!bond_dev) {
 		pr_err("%s: eek! can't alloc netdev!\n", name);
 		pr_err("%s: eek! can't alloc netdev!\n", name);
 		rtnl_unlock();
 		rtnl_unlock();

+ 193 - 86
drivers/net/bonding/bond_sysfs.c

@@ -211,7 +211,8 @@ static ssize_t bonding_show_slaves(struct device *d,
 /*
 /*
  * Set the slaves in the current bond.  The bond interface must be
  * Set the slaves in the current bond.  The bond interface must be
  * up for this to succeed.
  * up for this to succeed.
- * This function is largely the same flow as bonding_update_bonds().
+ * This is supposed to be only thin wrapper for bond_enslave and bond_release.
+ * All hard work should be done there.
  */
  */
 static ssize_t bonding_store_slaves(struct device *d,
 static ssize_t bonding_store_slaves(struct device *d,
 				    struct device_attribute *attr,
 				    struct device_attribute *attr,
@@ -219,10 +220,8 @@ static ssize_t bonding_store_slaves(struct device *d,
 {
 {
 	char command[IFNAMSIZ + 1] = { 0, };
 	char command[IFNAMSIZ + 1] = { 0, };
 	char *ifname;
 	char *ifname;
-	int i, res, found, ret = count;
-	u32 original_mtu;
-	struct slave *slave;
-	struct net_device *dev = NULL;
+	int res, ret = count;
+	struct net_device *dev;
 	struct bonding *bond = to_bond(d);
 	struct bonding *bond = to_bond(d);
 
 
 	/* Quick sanity check -- is the bond interface up? */
 	/* Quick sanity check -- is the bond interface up? */
@@ -231,8 +230,6 @@ static ssize_t bonding_store_slaves(struct device *d,
 			   bond->dev->name);
 			   bond->dev->name);
 	}
 	}
 
 
-	/* Note:  We can't hold bond->lock here, as bond_create grabs it. */
-
 	if (!rtnl_trylock())
 	if (!rtnl_trylock())
 		return restart_syscall();
 		return restart_syscall();
 
 
@@ -242,91 +239,33 @@ static ssize_t bonding_store_slaves(struct device *d,
 	    !dev_valid_name(ifname))
 	    !dev_valid_name(ifname))
 		goto err_no_cmd;
 		goto err_no_cmd;
 
 
-	if (command[0] == '+') {
-
-		/* Got a slave name in ifname.  Is it already in the list? */
-		found = 0;
-
-		dev = __dev_get_by_name(dev_net(bond->dev), ifname);
-		if (!dev) {
-			pr_info("%s: Interface %s does not exist!\n",
-				bond->dev->name, ifname);
-			ret = -ENODEV;
-			goto out;
-		}
-
-		if (dev->flags & IFF_UP) {
-			pr_err("%s: Error: Unable to enslave %s because it is already up.\n",
-			       bond->dev->name, dev->name);
-			ret = -EPERM;
-			goto out;
-		}
-
-		read_lock(&bond->lock);
-		bond_for_each_slave(bond, slave, i)
-			if (slave->dev == dev) {
-				pr_err("%s: Interface %s is already enslaved!\n",
-				       bond->dev->name, ifname);
-				ret = -EPERM;
-				read_unlock(&bond->lock);
-				goto out;
-			}
-		read_unlock(&bond->lock);
-
-		pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname);
-
-		/* If this is the first slave, then we need to set
-		   the master's hardware address to be the same as the
-		   slave's. */
-		if (is_zero_ether_addr(bond->dev->dev_addr))
-			memcpy(bond->dev->dev_addr, dev->dev_addr,
-			       dev->addr_len);
-
-		/* Set the slave's MTU to match the bond */
-		original_mtu = dev->mtu;
-		res = dev_set_mtu(dev, bond->dev->mtu);
-		if (res) {
-			ret = res;
-			goto out;
-		}
+	dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+	if (!dev) {
+		pr_info("%s: Interface %s does not exist!\n",
+			bond->dev->name, ifname);
+		ret = -ENODEV;
+		goto out;
+	}
 
 
+	switch (command[0]) {
+	case '+':
+		pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
 		res = bond_enslave(bond->dev, dev);
 		res = bond_enslave(bond->dev, dev);
-		bond_for_each_slave(bond, slave, i)
-			if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0)
-				slave->original_mtu = original_mtu;
-		if (res)
-			ret = res;
+		break;
 
 
-		goto out;
-	}
+	case '-':
+		pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+		res = bond_release(bond->dev, dev);
+		break;
 
 
-	if (command[0] == '-') {
-		dev = NULL;
-		original_mtu = 0;
-		bond_for_each_slave(bond, slave, i)
-			if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
-				dev = slave->dev;
-				original_mtu = slave->original_mtu;
-				break;
-			}
-		if (dev) {
-			pr_info("%s: Removing slave %s\n",
-				bond->dev->name, dev->name);
-				res = bond_release(bond->dev, dev);
-			if (res) {
-				ret = res;
-				goto out;
-			}
-			/* set the slave MTU to the default */
-			dev_set_mtu(dev, original_mtu);
-		} else {
-			pr_err("unable to remove non-existent slave %s for bond %s.\n",
-			       ifname, bond->dev->name);
-			ret = -ENODEV;
-		}
-		goto out;
+	default:
+		goto err_no_cmd;
 	}
 	}
 
 
+	if (res)
+		ret = res;
+	goto out;
+
 err_no_cmd:
 err_no_cmd:
 	pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
 	pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
 	       bond->dev->name);
 	       bond->dev->name);
@@ -1472,7 +1411,173 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
 }
 }
 static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
 static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
 
 
+/*
+ * Show the queue_ids of the slaves in the current bond.
+ */
+static ssize_t bonding_show_queue_id(struct device *d,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct slave *slave;
+	int i, res = 0;
+	struct bonding *bond = to_bond(d);
+
+	if (!rtnl_trylock())
+		return restart_syscall();
 
 
+	read_lock(&bond->lock);
+	bond_for_each_slave(bond, slave, i) {
+		if (res > (PAGE_SIZE - 6)) {
+			/* not enough space for another interface name */
+			if ((PAGE_SIZE - res) > 10)
+				res = PAGE_SIZE - 10;
+			res += sprintf(buf + res, "++more++ ");
+			break;
+		}
+		res += sprintf(buf + res, "%s:%d ",
+			       slave->dev->name, slave->queue_id);
+	}
+	read_unlock(&bond->lock);
+	if (res)
+		buf[res-1] = '\n'; /* eat the leftover space */
+	rtnl_unlock();
+	return res;
+}
+
+/*
+ * Set the queue_ids of the  slaves in the current bond.  The bond
+ * interface must be enslaved for this to work.
+ */
+static ssize_t bonding_store_queue_id(struct device *d,
+				      struct device_attribute *attr,
+				      const char *buffer, size_t count)
+{
+	struct slave *slave, *update_slave;
+	struct bonding *bond = to_bond(d);
+	u16 qid;
+	int i, ret = count;
+	char *delim;
+	struct net_device *sdev = NULL;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	/* delim will point to queue id if successful */
+	delim = strchr(buffer, ':');
+	if (!delim)
+		goto err_no_cmd;
+
+	/*
+	 * Terminate string that points to device name and bump it
+	 * up one, so we can read the queue id there.
+	 */
+	*delim = '\0';
+	if (sscanf(++delim, "%hd\n", &qid) != 1)
+		goto err_no_cmd;
+
+	/* Check buffer length, valid ifname and queue id */
+	if (strlen(buffer) > IFNAMSIZ ||
+	    !dev_valid_name(buffer) ||
+	    qid > bond->params.tx_queues)
+		goto err_no_cmd;
+
+	/* Get the pointer to that interface if it exists */
+	sdev = __dev_get_by_name(dev_net(bond->dev), buffer);
+	if (!sdev)
+		goto err_no_cmd;
+
+	read_lock(&bond->lock);
+
+	/* Search for thes slave and check for duplicate qids */
+	update_slave = NULL;
+	bond_for_each_slave(bond, slave, i) {
+		if (sdev == slave->dev)
+			/*
+			 * We don't need to check the matching
+			 * slave for dups, since we're overwriting it
+			 */
+			update_slave = slave;
+		else if (qid && qid == slave->queue_id) {
+			goto err_no_cmd_unlock;
+		}
+	}
+
+	if (!update_slave)
+		goto err_no_cmd_unlock;
+
+	/* Actually set the qids for the slave */
+	update_slave->queue_id = qid;
+
+	read_unlock(&bond->lock);
+out:
+	rtnl_unlock();
+	return ret;
+
+err_no_cmd_unlock:
+	read_unlock(&bond->lock);
+err_no_cmd:
+	pr_info("invalid input for queue_id set for %s.\n",
+		bond->dev->name);
+	ret = -EPERM;
+	goto out;
+}
+
+static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
+		   bonding_store_queue_id);
+
+
+/*
+ * Show and set the all_slaves_active flag.
+ */
+static ssize_t bonding_show_slaves_active(struct device *d,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct bonding *bond = to_bond(d);
+
+	return sprintf(buf, "%d\n", bond->params.all_slaves_active);
+}
+
+static ssize_t bonding_store_slaves_active(struct device *d,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count)
+{
+	int i, new_value, ret = count;
+	struct bonding *bond = to_bond(d);
+	struct slave *slave;
+
+	if (sscanf(buf, "%d", &new_value) != 1) {
+		pr_err("%s: no all_slaves_active value specified.\n",
+		       bond->dev->name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (new_value == bond->params.all_slaves_active)
+		goto out;
+
+	if ((new_value == 0) || (new_value == 1)) {
+		bond->params.all_slaves_active = new_value;
+	} else {
+		pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
+			bond->dev->name, new_value);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	bond_for_each_slave(bond, slave, i) {
+		if (slave->state == BOND_STATE_BACKUP) {
+			if (new_value)
+				slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE;
+			else
+				slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
+		}
+	}
+out:
+	return count;
+}
+static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
+		   bonding_show_slaves_active, bonding_store_slaves_active);
 
 
 static struct attribute *per_bond_attrs[] = {
 static struct attribute *per_bond_attrs[] = {
 	&dev_attr_slaves.attr,
 	&dev_attr_slaves.attr,
@@ -1499,6 +1604,8 @@ static struct attribute *per_bond_attrs[] = {
 	&dev_attr_ad_actor_key.attr,
 	&dev_attr_ad_actor_key.attr,
 	&dev_attr_ad_partner_key.attr,
 	&dev_attr_ad_partner_key.attr,
 	&dev_attr_ad_partner_mac.attr,
 	&dev_attr_ad_partner_mac.attr,
+	&dev_attr_queue_id.attr,
+	&dev_attr_all_slaves_active.attr,
 	NULL,
 	NULL,
 };
 };
 
 

+ 10 - 4
drivers/net/bonding/bonding.h

@@ -23,8 +23,8 @@
 #include "bond_3ad.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
 #include "bond_alb.h"
 
 
-#define DRV_VERSION	"3.6.0"
-#define DRV_RELDATE	"September 26, 2009"
+#define DRV_VERSION	"3.7.0"
+#define DRV_RELDATE	"June 2, 2010"
 #define DRV_NAME	"bonding"
 #define DRV_NAME	"bonding"
 #define DRV_DESCRIPTION	"Ethernet Channel Bonding Driver"
 #define DRV_DESCRIPTION	"Ethernet Channel Bonding Driver"
 
 
@@ -60,6 +60,9 @@
 		 ((mode) == BOND_MODE_TLB)          ||	\
 		 ((mode) == BOND_MODE_TLB)          ||	\
 		 ((mode) == BOND_MODE_ALB))
 		 ((mode) == BOND_MODE_ALB))
 
 
+#define TX_QUEUE_OVERRIDE(mode)				\
+			(((mode) == BOND_MODE_ACTIVEBACKUP) ||	\
+			 ((mode) == BOND_MODE_ROUNDROBIN))
 /*
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
  * done some other way to get the call out of interrupt context.
@@ -131,6 +134,8 @@ struct bond_params {
 	char primary[IFNAMSIZ];
 	char primary[IFNAMSIZ];
 	int primary_reselect;
 	int primary_reselect;
 	__be32 arp_targets[BOND_MAX_ARP_TARGETS];
 	__be32 arp_targets[BOND_MAX_ARP_TARGETS];
+	int tx_queues;
+	int all_slaves_active;
 };
 };
 
 
 struct bond_parm_tbl {
 struct bond_parm_tbl {
@@ -159,12 +164,12 @@ struct slave {
 	s8     link;    /* one of BOND_LINK_XXXX */
 	s8     link;    /* one of BOND_LINK_XXXX */
 	s8     new_link;
 	s8     new_link;
 	s8     state;   /* one of BOND_STATE_XXXX */
 	s8     state;   /* one of BOND_STATE_XXXX */
-	u32    original_flags;
 	u32    original_mtu;
 	u32    original_mtu;
 	u32    link_failure_count;
 	u32    link_failure_count;
 	u8     perm_hwaddr[ETH_ALEN];
 	u8     perm_hwaddr[ETH_ALEN];
 	u16    speed;
 	u16    speed;
 	u8     duplex;
 	u8     duplex;
+	u16    queue_id;
 	struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
 	struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
 	struct tlb_slave_info tlb_info;
 	struct tlb_slave_info tlb_info;
 };
 };
@@ -291,7 +296,8 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave)
 	struct bonding *bond = netdev_priv(slave->dev->master);
 	struct bonding *bond = netdev_priv(slave->dev->master);
 	if (!bond_is_lb(bond))
 	if (!bond_is_lb(bond))
 		slave->state = BOND_STATE_BACKUP;
 		slave->state = BOND_STATE_BACKUP;
-	slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
+	if (!bond->params.all_slaves_active)
+		slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
 	if (slave_do_arp_validate(bond, slave))
 	if (slave_do_arp_validate(bond, slave))
 		slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
 		slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
 }
 }

+ 9 - 2
drivers/net/caif/caif_serial.c

@@ -174,6 +174,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
 	struct ser_device *ser;
 	struct ser_device *ser;
 	int ret;
 	int ret;
 	u8 *p;
 	u8 *p;
+
 	ser = tty->disc_data;
 	ser = tty->disc_data;
 
 
 	/*
 	/*
@@ -221,6 +222,7 @@ static int handle_tx(struct ser_device *ser)
 	struct tty_struct *tty;
 	struct tty_struct *tty;
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 	int tty_wr, len, room;
 	int tty_wr, len, room;
+
 	tty = ser->tty;
 	tty = ser->tty;
 	ser->tx_started = true;
 	ser->tx_started = true;
 
 
@@ -281,6 +283,7 @@ error:
 static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
 static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 {
 	struct ser_device *ser;
 	struct ser_device *ser;
+
 	BUG_ON(dev == NULL);
 	BUG_ON(dev == NULL);
 	ser = netdev_priv(dev);
 	ser = netdev_priv(dev);
 
 
@@ -299,6 +302,7 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
 static void ldisc_tx_wakeup(struct tty_struct *tty)
 static void ldisc_tx_wakeup(struct tty_struct *tty)
 {
 {
 	struct ser_device *ser;
 	struct ser_device *ser;
+
 	ser = tty->disc_data;
 	ser = tty->disc_data;
 	BUG_ON(ser == NULL);
 	BUG_ON(ser == NULL);
 	BUG_ON(ser->tty != tty);
 	BUG_ON(ser->tty != tty);
@@ -348,6 +352,7 @@ static void ldisc_close(struct tty_struct *tty)
 	struct ser_device *ser = tty->disc_data;
 	struct ser_device *ser = tty->disc_data;
 	/* Remove may be called inside or outside of rtnl_lock */
 	/* Remove may be called inside or outside of rtnl_lock */
 	int islocked = rtnl_is_locked();
 	int islocked = rtnl_is_locked();
+
 	if (!islocked)
 	if (!islocked)
 		rtnl_lock();
 		rtnl_lock();
 	/* device is freed automagically by net-sysfs */
 	/* device is freed automagically by net-sysfs */
@@ -374,6 +379,7 @@ static struct tty_ldisc_ops caif_ldisc = {
 static int register_ldisc(void)
 static int register_ldisc(void)
 {
 {
 	int result;
 	int result;
+
 	result = tty_register_ldisc(N_CAIF, &caif_ldisc);
 	result = tty_register_ldisc(N_CAIF, &caif_ldisc);
 	if (result < 0) {
 	if (result < 0) {
 		pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
 		pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
@@ -391,6 +397,7 @@ static const struct net_device_ops netdev_ops = {
 static void caifdev_setup(struct net_device *dev)
 static void caifdev_setup(struct net_device *dev)
 {
 {
 	struct ser_device *serdev = netdev_priv(dev);
 	struct ser_device *serdev = netdev_priv(dev);
+
 	dev->features = 0;
 	dev->features = 0;
 	dev->netdev_ops = &netdev_ops;
 	dev->netdev_ops = &netdev_ops;
 	dev->type = ARPHRD_CAIF;
 	dev->type = ARPHRD_CAIF;
@@ -410,8 +417,6 @@ static void caifdev_setup(struct net_device *dev)
 
 
 static int caif_net_open(struct net_device *dev)
 static int caif_net_open(struct net_device *dev)
 {
 {
-	struct ser_device *ser;
-	ser = netdev_priv(dev);
 	netif_wake_queue(dev);
 	netif_wake_queue(dev);
 	return 0;
 	return 0;
 }
 }
@@ -425,6 +430,7 @@ static int caif_net_close(struct net_device *dev)
 static int __init caif_ser_init(void)
 static int __init caif_ser_init(void)
 {
 {
 	int ret;
 	int ret;
+
 	ret = register_ldisc();
 	ret = register_ldisc();
 	debugfsdir = debugfs_create_dir("caif_serial", NULL);
 	debugfsdir = debugfs_create_dir("caif_serial", NULL);
 	return ret;
 	return ret;
@@ -435,6 +441,7 @@ static void __exit caif_ser_exit(void)
 	struct ser_device *ser = NULL;
 	struct ser_device *ser = NULL;
 	struct list_head *node;
 	struct list_head *node;
 	struct list_head *_tmp;
 	struct list_head *_tmp;
+
 	list_for_each_safe(node, _tmp, &ser_list) {
 	list_for_each_safe(node, _tmp, &ser_list) {
 		ser = list_entry(node, struct ser_device, node);
 		ser = list_entry(node, struct ser_device, node);
 		dev_close(ser->dev);
 		dev_close(ser->dev);

+ 5 - 5
drivers/net/can/mscan/mpc5xxx_can.c

@@ -73,7 +73,7 @@ static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
 	else
 	else
 		*mscan_clksrc = MSCAN_CLKSRC_XTAL;
 		*mscan_clksrc = MSCAN_CLKSRC_XTAL;
 
 
-	freq = mpc5xxx_get_bus_frequency(ofdev->node);
+	freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
 	if (!freq)
 	if (!freq)
 		return 0;
 		return 0;
 
 
@@ -152,7 +152,7 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
 	}
 	}
 
 
 	/* Determine the MSCAN device index from the physical address */
 	/* Determine the MSCAN device index from the physical address */
-	pval = of_get_property(ofdev->node, "reg", &plen);
+	pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
 	BUG_ON(!pval || plen < sizeof(*pval));
 	BUG_ON(!pval || plen < sizeof(*pval));
 	clockidx = (*pval & 0x80) ? 1 : 0;
 	clockidx = (*pval & 0x80) ? 1 : 0;
 	if (*pval & 0x2000)
 	if (*pval & 0x2000)
@@ -168,11 +168,11 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
 	 */
 	 */
 	if (clock_name && !strcmp(clock_name, "ip")) {
 	if (clock_name && !strcmp(clock_name, "ip")) {
 		*mscan_clksrc = MSCAN_CLKSRC_IPS;
 		*mscan_clksrc = MSCAN_CLKSRC_IPS;
-		freq = mpc5xxx_get_bus_frequency(ofdev->node);
+		freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
 	} else {
 	} else {
 		*mscan_clksrc = MSCAN_CLKSRC_BUS;
 		*mscan_clksrc = MSCAN_CLKSRC_BUS;
 
 
-		pval = of_get_property(ofdev->node,
+		pval = of_get_property(ofdev->dev.of_node,
 				       "fsl,mscan-clock-divider", &plen);
 				       "fsl,mscan-clock-divider", &plen);
 		if (pval && plen == sizeof(*pval))
 		if (pval && plen == sizeof(*pval))
 			clockdiv = *pval;
 			clockdiv = *pval;
@@ -251,7 +251,7 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
 				       const struct of_device_id *id)
 				       const struct of_device_id *id)
 {
 {
 	struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
 	struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
-	struct device_node *np = ofdev->node;
+	struct device_node *np = ofdev->dev.of_node;
 	struct net_device *dev;
 	struct net_device *dev;
 	struct mscan_priv *priv;
 	struct mscan_priv *priv;
 	void __iomem *base;
 	void __iomem *base;

+ 1 - 1
drivers/net/can/mscan/mscan.h

@@ -227,7 +227,7 @@ struct mscan_regs {
 		u16 time;			/* + 0x7c     0x3e */
 		u16 time;			/* + 0x7c     0x3e */
 	} tx;
 	} tx;
 	_MSCAN_RESERVED_(32, 2);		/* + 0x7e          */
 	_MSCAN_RESERVED_(32, 2);		/* + 0x7e          */
-} __attribute__ ((packed));
+} __packed;
 
 
 #undef _MSCAN_RESERVED_
 #undef _MSCAN_RESERVED_
 #define MSCAN_REGION 	sizeof(struct mscan)
 #define MSCAN_REGION 	sizeof(struct mscan)

+ 1 - 1
drivers/net/can/usb/ems_usb.c

@@ -197,7 +197,7 @@ struct cpc_can_err_counter {
 };
 };
 
 
 /* Main message type used between library and application */
 /* Main message type used between library and application */
-struct __attribute__ ((packed)) ems_cpc_msg {
+struct __packed ems_cpc_msg {
 	u8 type;	/* type of message */
 	u8 type;	/* type of message */
 	u8 length;	/* length of data within union 'msg' */
 	u8 length;	/* length of data within union 'msg' */
 	u8 msgid;	/* confirmation handle */
 	u8 msgid;	/* confirmation handle */

+ 0 - 1
drivers/net/chelsio/common.h

@@ -286,7 +286,6 @@ struct board_info {
 	unsigned int            clock_mc3;
 	unsigned int            clock_mc3;
 	unsigned int            clock_mc4;
 	unsigned int            clock_mc4;
 	unsigned int            espi_nports;
 	unsigned int            espi_nports;
-	unsigned int            clock_cspi;
 	unsigned int            clock_elmer0;
 	unsigned int            clock_elmer0;
 	unsigned char           mdio_mdien;
 	unsigned char           mdio_mdien;
 	unsigned char           mdio_mdiinv;
 	unsigned char           mdio_mdiinv;

+ 2 - 47
drivers/net/chelsio/subr.c

@@ -185,9 +185,6 @@ static int t1_pci_intr_handler(adapter_t *adapter)
 	return 0;
 	return 0;
 }
 }
 
 
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-#include "cspi.h"
-#endif
 #ifdef CONFIG_CHELSIO_T1_1G
 #ifdef CONFIG_CHELSIO_T1_1G
 #include "fpga_defs.h"
 #include "fpga_defs.h"
 
 
@@ -280,7 +277,7 @@ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
 	t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
 	t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
 }
 }
 
 
-#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+#if defined(CONFIG_CHELSIO_T1_1G)
 /*
 /*
  * Elmer MI1 MDIO read/write operations.
  * Elmer MI1 MDIO read/write operations.
  */
  */
@@ -317,7 +314,7 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
 	return 0;
 	return 0;
 }
 }
 
 
-#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+#if defined(CONFIG_CHELSIO_T1_1G)
 static const struct mdio_ops mi1_mdio_ops = {
 static const struct mdio_ops mi1_mdio_ops = {
 	.init = mi1_mdio_init,
 	.init = mi1_mdio_init,
 	.read = mi1_mdio_read,
 	.read = mi1_mdio_read,
@@ -752,31 +749,6 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
 					 mod_detect ? "removed" : "inserted");
 					 mod_detect ? "removed" : "inserted");
 		}
 		}
 		break;
 		break;
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-	case CHBT_BOARD_COUGAR:
-		if (adapter->params.nports == 1) {
-			if (cause & ELMER0_GP_BIT1) {         /* Vitesse MAC */
-				struct cmac *mac = adapter->port[0].mac;
-				mac->ops->interrupt_handler(mac);
-			}
-			if (cause & ELMER0_GP_BIT5) {     /* XPAK MOD_DETECT */
-			}
-		} else {
-			int i, port_bit;
-
-			for_each_port(adapter, i) {
-				port_bit = i ? i + 1 : 0;
-				if (!(cause & (1 << port_bit)))
-					continue;
-
-				phy = adapter->port[i].phy;
-				phy_cause = phy->ops->interrupt_handler(phy);
-				if (phy_cause & cphy_cause_link_change)
-					t1_link_changed(adapter, i);
-			}
-		}
-		break;
-#endif
 	}
 	}
 	t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
 	t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
 	return 0;
 	return 0;
@@ -955,7 +927,6 @@ static int board_init(adapter_t *adapter, const struct board_info *bi)
 	case CHBT_BOARD_N110:
 	case CHBT_BOARD_N110:
 	case CHBT_BOARD_N210:
 	case CHBT_BOARD_N210:
 	case CHBT_BOARD_CHT210:
 	case CHBT_BOARD_CHT210:
-	case CHBT_BOARD_COUGAR:
 		t1_tpi_par(adapter, 0xf);
 		t1_tpi_par(adapter, 0xf);
 		t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
 		t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
 		break;
 		break;
@@ -1004,10 +975,6 @@ int t1_init_hw_modules(adapter_t *adapter)
 		       adapter->regs + A_MC5_CONFIG);
 		       adapter->regs + A_MC5_CONFIG);
 	}
 	}
 
 
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-	if (adapter->cspi && t1_cspi_init(adapter->cspi))
-		goto out_err;
-#endif
 	if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
 	if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
 					  bi->espi_nports))
 					  bi->espi_nports))
 		goto out_err;
 		goto out_err;
@@ -1061,10 +1028,6 @@ void t1_free_sw_modules(adapter_t *adapter)
 		t1_tp_destroy(adapter->tp);
 		t1_tp_destroy(adapter->tp);
 	if (adapter->espi)
 	if (adapter->espi)
 		t1_espi_destroy(adapter->espi);
 		t1_espi_destroy(adapter->espi);
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-	if (adapter->cspi)
-		t1_cspi_destroy(adapter->cspi);
-#endif
 }
 }
 
 
 static void __devinit init_link_config(struct link_config *lc,
 static void __devinit init_link_config(struct link_config *lc,
@@ -1084,14 +1047,6 @@ static void __devinit init_link_config(struct link_config *lc,
 	}
 	}
 }
 }
 
 
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-	if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
-		pr_err("%s: CSPI initialization failed\n",
-		       adapter->name);
-		goto error;
-	}
-#endif
-
 /*
 /*
  * Allocate and initialize the data structures that hold the SW state of
  * Allocate and initialize the data structures that hold the SW state of
  * the Terminator HW modules.
  * the Terminator HW modules.

+ 1 - 1
drivers/net/cnic.c

@@ -2824,7 +2824,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
 
 
 	err = ip_route_output_key(&init_net, &rt, &fl);
 	err = ip_route_output_key(&init_net, &rt, &fl);
 	if (!err)
 	if (!err)
-		*dst = &rt->u.dst;
+		*dst = &rt->dst;
 	return err;
 	return err;
 #else
 #else
 	return -ENETUNREACH;
 	return -ENETUNREACH;

+ 1 - 1
drivers/net/dm9000.c

@@ -961,7 +961,7 @@ struct dm9000_rxhdr {
 	u8	RxPktReady;
 	u8	RxPktReady;
 	u8	RxStatus;
 	u8	RxStatus;
 	__le16	RxLen;
 	__le16	RxLen;
-} __attribute__((__packed__));
+} __packed;
 
 
 /*
 /*
  *  Received a packet and pass to upper layer
  *  Received a packet and pass to upper layer

+ 8 - 9
drivers/net/e1000/e1000_main.c

@@ -1047,15 +1047,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 		goto err_register;
 		goto err_register;
 
 
 	/* print bus type/speed/width info */
 	/* print bus type/speed/width info */
-	e_info("(PCI%s:%s:%s) ",
-		((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
-		((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
-		 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
-		 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
-		 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
-		((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
-
-	e_info("%pM\n", netdev->dev_addr);
+	e_info("(PCI%s:%dMHz:%d-bit) %pM\n",
+	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
+	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
+		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
+		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
+		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
+	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
+	       netdev->dev_addr);
 
 
 	/* carrier off reporting is important to ethtool even BEFORE open */
 	/* carrier off reporting is important to ethtool even BEFORE open */
 	netif_carrier_off(netdev);
 	netif_carrier_off(netdev);

+ 1 - 1
drivers/net/e1000e/netdev.c

@@ -2554,7 +2554,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
 			mdef = er32(MDEF(i));
 			mdef = er32(MDEF(i));
 
 
 			/* Ignore filters with anything other than IPMI ports */
 			/* Ignore filters with anything other than IPMI ports */
-			if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+			if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
 				continue;
 				continue;
 
 
 			/* Enable this decision filter in MANC2H */
 			/* Enable this decision filter in MANC2H */

+ 1 - 1
drivers/net/ehea/ehea_qmr.h

@@ -126,7 +126,7 @@ struct ehea_swqe {
 			u8 immediate_data[SWQE2_MAX_IMM];
 			u8 immediate_data[SWQE2_MAX_IMM];
 			/* 0xd0 */
 			/* 0xd0 */
 			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
 			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
-		} immdata_desc __attribute__ ((packed));
+		} immdata_desc __packed;
 
 
 		/*  Send WQE Format 3 */
 		/*  Send WQE Format 3 */
 		struct {
 		struct {

+ 7 - 0
drivers/net/enic/enic.h

@@ -74,7 +74,14 @@ struct enic_msix_entry {
 	void *devid;
 	void *devid;
 };
 };
 
 
+#define ENIC_SET_APPLIED		(1 << 0)
+#define ENIC_SET_REQUEST		(1 << 1)
+#define ENIC_SET_NAME			(1 << 2)
+#define ENIC_SET_INSTANCE		(1 << 3)
+#define ENIC_SET_HOST			(1 << 4)
+
 struct enic_port_profile {
 struct enic_port_profile {
+	u32 set;
 	u8 request;
 	u8 request;
 	char name[PORT_PROFILE_MAX];
 	char name[PORT_PROFILE_MAX];
 	u8 instance_uuid[PORT_UUID_MAX];
 	u8 instance_uuid[PORT_UUID_MAX];

+ 97 - 103
drivers/net/enic/enic_main.c

@@ -1029,8 +1029,7 @@ static int enic_dev_init_done(struct enic *enic, int *done, int *error)
 	return err;
 	return err;
 }
 }
 
 
-static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
-	char *name, u8 *instance_uuid, u8 *host_uuid)
+static int enic_set_port_profile(struct enic *enic, u8 *mac)
 {
 {
 	struct vic_provinfo *vp;
 	struct vic_provinfo *vp;
 	u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
 	u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
@@ -1040,97 +1039,112 @@ static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
 		"%02X%02X-%02X%02X%02X%02X%0X%02X";
 		"%02X%02X-%02X%02X%02X%02X%0X%02X";
 	int err;
 	int err;
 
 
-	if (!name)
-		return -EINVAL;
+	err = enic_vnic_dev_deinit(enic);
+	if (err)
+		return err;
 
 
-	if (!is_valid_ether_addr(mac))
-		return -EADDRNOTAVAIL;
+	switch (enic->pp.request) {
 
 
-	vp = vic_provinfo_alloc(GFP_KERNEL, oui, VIC_PROVINFO_LINUX_TYPE);
-	if (!vp)
-		return -ENOMEM;
+	case PORT_REQUEST_ASSOCIATE:
 
 
-	vic_provinfo_add_tlv(vp,
-		VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
-		strlen(name) + 1, name);
-
-	vic_provinfo_add_tlv(vp,
-		VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
-		ETH_ALEN, mac);
-
-	if (instance_uuid) {
-		uuid = instance_uuid;
-		sprintf(uuid_str, uuid_fmt,
-			uuid[0],  uuid[1],  uuid[2],  uuid[3],
-			uuid[4],  uuid[5],  uuid[6],  uuid[7],
-			uuid[8],  uuid[9],  uuid[10], uuid[11],
-			uuid[12], uuid[13], uuid[14], uuid[15]);
-		vic_provinfo_add_tlv(vp,
-			VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
-			sizeof(uuid_str), uuid_str);
-	}
+		if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
+			return -EINVAL;
 
 
-	if (host_uuid) {
-		uuid = host_uuid;
-		sprintf(uuid_str, uuid_fmt,
-			uuid[0],  uuid[1],  uuid[2],  uuid[3],
-			uuid[4],  uuid[5],  uuid[6],  uuid[7],
-			uuid[8],  uuid[9],  uuid[10], uuid[11],
-			uuid[12], uuid[13], uuid[14], uuid[15]);
-		vic_provinfo_add_tlv(vp,
-			VIC_LINUX_PROV_TLV_HOST_UUID_STR,
-			sizeof(uuid_str), uuid_str);
-	}
+		if (!is_valid_ether_addr(mac))
+			return -EADDRNOTAVAIL;
 
 
-	err = enic_vnic_dev_deinit(enic);
-	if (err)
-		goto err_out;
+		vp = vic_provinfo_alloc(GFP_KERNEL, oui,
+			VIC_PROVINFO_LINUX_TYPE);
+		if (!vp)
+			return -ENOMEM;
 
 
-	memset(&enic->pp, 0, sizeof(enic->pp));
+		vic_provinfo_add_tlv(vp,
+			VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
+			strlen(enic->pp.name) + 1, enic->pp.name);
 
 
-	err = enic_dev_init_prov(enic, vp);
-	if (err)
-		goto err_out;
+		vic_provinfo_add_tlv(vp,
+			VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
+			ETH_ALEN, mac);
+
+		if (enic->pp.set & ENIC_SET_INSTANCE) {
+			uuid = enic->pp.instance_uuid;
+			sprintf(uuid_str, uuid_fmt,
+				uuid[0],  uuid[1],  uuid[2],  uuid[3],
+				uuid[4],  uuid[5],  uuid[6],  uuid[7],
+				uuid[8],  uuid[9],  uuid[10], uuid[11],
+				uuid[12], uuid[13], uuid[14], uuid[15]);
+			vic_provinfo_add_tlv(vp,
+				VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
+				sizeof(uuid_str), uuid_str);
+		}
 
 
-	enic->pp.request = request;
-	memcpy(enic->pp.name, name, PORT_PROFILE_MAX);
-	if (instance_uuid)
-		memcpy(enic->pp.instance_uuid,
-			instance_uuid, PORT_UUID_MAX);
-	if (host_uuid)
-		memcpy(enic->pp.host_uuid,
-			host_uuid, PORT_UUID_MAX);
+		if (enic->pp.set & ENIC_SET_HOST) {
+			uuid = enic->pp.host_uuid;
+			sprintf(uuid_str, uuid_fmt,
+				uuid[0],  uuid[1],  uuid[2],  uuid[3],
+				uuid[4],  uuid[5],  uuid[6],  uuid[7],
+				uuid[8],  uuid[9],  uuid[10], uuid[11],
+				uuid[12], uuid[13], uuid[14], uuid[15]);
+			vic_provinfo_add_tlv(vp,
+				VIC_LINUX_PROV_TLV_HOST_UUID_STR,
+				sizeof(uuid_str), uuid_str);
+		}
 
 
-err_out:
-	vic_provinfo_free(vp);
+		err = enic_dev_init_prov(enic, vp);
+		vic_provinfo_free(vp);
+		if (err)
+			return err;
+		break;
 
 
-	return err;
-}
+	case PORT_REQUEST_DISASSOCIATE:
+		break;
 
 
-static int enic_unset_port_profile(struct enic *enic)
-{
-	memset(&enic->pp, 0, sizeof(enic->pp));
-	return enic_vnic_dev_deinit(enic);
+	default:
+		return -EINVAL;
+	}
+
+	enic->pp.set |= ENIC_SET_APPLIED;
+	return 0;
 }
 }
 
 
 static int enic_set_vf_port(struct net_device *netdev, int vf,
 static int enic_set_vf_port(struct net_device *netdev, int vf,
 	struct nlattr *port[])
 	struct nlattr *port[])
 {
 {
 	struct enic *enic = netdev_priv(netdev);
 	struct enic *enic = netdev_priv(netdev);
-	char *name = NULL;
-	u8 *instance_uuid = NULL;
-	u8 *host_uuid = NULL;
-	u8 request = PORT_REQUEST_DISASSOCIATE;
+
+	memset(&enic->pp, 0, sizeof(enic->pp));
+
+	if (port[IFLA_PORT_REQUEST]) {
+		enic->pp.set |= ENIC_SET_REQUEST;
+		enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+	}
+
+	if (port[IFLA_PORT_PROFILE]) {
+		enic->pp.set |= ENIC_SET_NAME;
+		memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
+			PORT_PROFILE_MAX);
+	}
+
+	if (port[IFLA_PORT_INSTANCE_UUID]) {
+		enic->pp.set |= ENIC_SET_INSTANCE;
+		memcpy(enic->pp.instance_uuid,
+			nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
+	}
+
+	if (port[IFLA_PORT_HOST_UUID]) {
+		enic->pp.set |= ENIC_SET_HOST;
+		memcpy(enic->pp.host_uuid,
+			nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
+	}
 
 
 	/* don't support VFs, yet */
 	/* don't support VFs, yet */
 	if (vf != PORT_SELF_VF)
 	if (vf != PORT_SELF_VF)
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 
 
-	if (port[IFLA_PORT_REQUEST])
-		request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+	if (!(enic->pp.set & ENIC_SET_REQUEST))
+		return -EOPNOTSUPP;
 
 
-	switch (request) {
-	case PORT_REQUEST_ASSOCIATE:
+	if (enic->pp.request == PORT_REQUEST_ASSOCIATE) {
 
 
 		/* If the interface mac addr hasn't been assigned,
 		/* If the interface mac addr hasn't been assigned,
 		 * assign a random mac addr before setting port-
 		 * assign a random mac addr before setting port-
@@ -1139,30 +1153,9 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
 
 
 		if (is_zero_ether_addr(netdev->dev_addr))
 		if (is_zero_ether_addr(netdev->dev_addr))
 			random_ether_addr(netdev->dev_addr);
 			random_ether_addr(netdev->dev_addr);
-
-		if (port[IFLA_PORT_PROFILE])
-			name = nla_data(port[IFLA_PORT_PROFILE]);
-
-		if (port[IFLA_PORT_INSTANCE_UUID])
-			instance_uuid =
-				nla_data(port[IFLA_PORT_INSTANCE_UUID]);
-
-		if (port[IFLA_PORT_HOST_UUID])
-			host_uuid = nla_data(port[IFLA_PORT_HOST_UUID]);
-
-		return enic_set_port_profile(enic, request,
-			netdev->dev_addr, name,
-			instance_uuid, host_uuid);
-
-	case PORT_REQUEST_DISASSOCIATE:
-
-		return enic_unset_port_profile(enic);
-
-	default:
-		break;
 	}
 	}
 
 
-	return -EOPNOTSUPP;
+	return enic_set_port_profile(enic, netdev->dev_addr);
 }
 }
 
 
 static int enic_get_vf_port(struct net_device *netdev, int vf,
 static int enic_get_vf_port(struct net_device *netdev, int vf,
@@ -1172,14 +1165,12 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
 	int err, error, done;
 	int err, error, done;
 	u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
 	u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
 
 
-	/* don't support VFs, yet */
-	if (vf != PORT_SELF_VF)
-		return -EOPNOTSUPP;
+	if (!(enic->pp.set & ENIC_SET_APPLIED))
+		return -ENODATA;
 
 
 	err = enic_dev_init_done(enic, &done, &error);
 	err = enic_dev_init_done(enic, &done, &error);
-
 	if (err)
 	if (err)
-		return err;
+		error = err;
 
 
 	switch (error) {
 	switch (error) {
 	case ERR_SUCCESS:
 	case ERR_SUCCESS:
@@ -1202,12 +1193,15 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
 
 
 	NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
 	NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
 	NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
 	NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
-	NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
-		enic->pp.name);
-	NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
-		enic->pp.instance_uuid);
-	NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
-		enic->pp.host_uuid);
+	if (enic->pp.set & ENIC_SET_NAME)
+		NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
+			enic->pp.name);
+	if (enic->pp.set & ENIC_SET_INSTANCE)
+		NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
+			enic->pp.instance_uuid);
+	if (enic->pp.set & ENIC_SET_HOST)
+		NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
+			enic->pp.host_uuid);
 
 
 	return 0;
 	return 0;
 
 

+ 1 - 1
drivers/net/enic/vnic_dev.c

@@ -709,7 +709,7 @@ int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
 {
 {
 	u64 a0, a1 = len;
 	u64 a0, a1 = len;
 	int wait = 1000;
 	int wait = 1000;
-	u64 prov_pa;
+	dma_addr_t prov_pa;
 	void *prov_buf;
 	void *prov_buf;
 	int ret;
 	int ret;
 
 

+ 3 - 2
drivers/net/enic/vnic_vic.c

@@ -25,9 +25,10 @@
 
 
 struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
 struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
 {
 {
-	struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+	struct vic_provinfo *vp;
 
 
-	if (!vp || !oui)
+	vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+	if (!vp)
 		return NULL;
 		return NULL;
 
 
 	memcpy(vp->oui, oui, sizeof(vp->oui));
 	memcpy(vp->oui, oui, sizeof(vp->oui));

+ 1 - 1
drivers/net/enic/vnic_vic.h

@@ -44,7 +44,7 @@ struct vic_provinfo {
 		u16 length;
 		u16 length;
 		u8 value[0];
 		u8 value[0];
 	} tlv[0];
 	} tlv[0];
-} __attribute__ ((packed));
+} __packed;
 
 
 #define VIC_PROVINFO_MAX_DATA		1385
 #define VIC_PROVINFO_MAX_DATA		1385
 #define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
 #define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \

+ 4 - 3
drivers/net/epic100.c

@@ -87,6 +87,7 @@ static int rx_copybreak;
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 #include <asm/io.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
+#include <asm/byteorder.h>
 
 
 /* These identify the driver base version and may not be removed. */
 /* These identify the driver base version and may not be removed. */
 static char version[] __devinitdata =
 static char version[] __devinitdata =
@@ -230,7 +231,7 @@ static const u16 media2miictl[16] = {
  * The EPIC100 Rx and Tx buffer descriptors.  Note that these
  * The EPIC100 Rx and Tx buffer descriptors.  Note that these
  * really ARE host-endian; it's not a misannotation.  We tell
  * really ARE host-endian; it's not a misannotation.  We tell
  * the card to byteswap them internally on big-endian hosts -
  * the card to byteswap them internally on big-endian hosts -
- * look for #ifdef CONFIG_BIG_ENDIAN in epic_open().
+ * look for #ifdef __BIG_ENDIAN in epic_open().
  */
  */
 
 
 struct epic_tx_desc {
 struct epic_tx_desc {
@@ -690,7 +691,7 @@ static int epic_open(struct net_device *dev)
 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
 
 
 	/* Tell the chip to byteswap descriptors on big-endian hosts */
 	/* Tell the chip to byteswap descriptors on big-endian hosts */
-#ifdef CONFIG_BIG_ENDIAN
+#ifdef __BIG_ENDIAN
 	outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 	outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 	inl(ioaddr + GENCTL);
 	inl(ioaddr + GENCTL);
 	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
@@ -806,7 +807,7 @@ static void epic_restart(struct net_device *dev)
 	for (i = 16; i > 0; i--)
 	for (i = 16; i > 0; i--)
 		outl(0x0008, ioaddr + TEST1);
 		outl(0x0008, ioaddr + TEST1);
 
 
-#ifdef CONFIG_BIG_ENDIAN
+#ifdef __BIG_ENDIAN
 	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 #else
 #else
 	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);

+ 44 - 59
drivers/net/ethoc.c

@@ -180,6 +180,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
  * @dty_tx:	last buffer actually sent
  * @dty_tx:	last buffer actually sent
  * @num_rx:	number of receive buffers
  * @num_rx:	number of receive buffers
  * @cur_rx:	current receive buffer
  * @cur_rx:	current receive buffer
+ * @vma:        pointer to array of virtual memory addresses for buffers
  * @netdev:	pointer to network device structure
  * @netdev:	pointer to network device structure
  * @napi:	NAPI structure
  * @napi:	NAPI structure
  * @stats:	network device statistics
  * @stats:	network device statistics
@@ -203,6 +204,8 @@ struct ethoc {
 	unsigned int num_rx;
 	unsigned int num_rx;
 	unsigned int cur_rx;
 	unsigned int cur_rx;
 
 
+	void** vma;
+
 	struct net_device *netdev;
 	struct net_device *netdev;
 	struct napi_struct napi;
 	struct napi_struct napi;
 	struct net_device_stats stats;
 	struct net_device_stats stats;
@@ -285,18 +288,22 @@ static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
 	ethoc_write(dev, MODER, mode);
 	ethoc_write(dev, MODER, mode);
 }
 }
 
 
-static int ethoc_init_ring(struct ethoc *dev)
+static int ethoc_init_ring(struct ethoc *dev, void* mem_start)
 {
 {
 	struct ethoc_bd bd;
 	struct ethoc_bd bd;
 	int i;
 	int i;
+	void* vma;
 
 
 	dev->cur_tx = 0;
 	dev->cur_tx = 0;
 	dev->dty_tx = 0;
 	dev->dty_tx = 0;
 	dev->cur_rx = 0;
 	dev->cur_rx = 0;
 
 
+	ethoc_write(dev, TX_BD_NUM, dev->num_tx);
+
 	/* setup transmission buffers */
 	/* setup transmission buffers */
-	bd.addr = virt_to_phys(dev->membase);
+	bd.addr = mem_start;
 	bd.stat = TX_BD_IRQ | TX_BD_CRC;
 	bd.stat = TX_BD_IRQ | TX_BD_CRC;
+	vma = dev->membase;
 
 
 	for (i = 0; i < dev->num_tx; i++) {
 	for (i = 0; i < dev->num_tx; i++) {
 		if (i == dev->num_tx - 1)
 		if (i == dev->num_tx - 1)
@@ -304,6 +311,9 @@ static int ethoc_init_ring(struct ethoc *dev)
 
 
 		ethoc_write_bd(dev, i, &bd);
 		ethoc_write_bd(dev, i, &bd);
 		bd.addr += ETHOC_BUFSIZ;
 		bd.addr += ETHOC_BUFSIZ;
+
+		dev->vma[i] = vma;
+		vma += ETHOC_BUFSIZ;
 	}
 	}
 
 
 	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
 	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
@@ -314,6 +324,9 @@ static int ethoc_init_ring(struct ethoc *dev)
 
 
 		ethoc_write_bd(dev, dev->num_tx + i, &bd);
 		ethoc_write_bd(dev, dev->num_tx + i, &bd);
 		bd.addr += ETHOC_BUFSIZ;
 		bd.addr += ETHOC_BUFSIZ;
+
+		dev->vma[dev->num_tx + i] = vma;
+		vma += ETHOC_BUFSIZ;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -415,7 +428,7 @@ static int ethoc_rx(struct net_device *dev, int limit)
 			skb = netdev_alloc_skb_ip_align(dev, size);
 			skb = netdev_alloc_skb_ip_align(dev, size);
 
 
 			if (likely(skb)) {
 			if (likely(skb)) {
-				void *src = phys_to_virt(bd.addr);
+				void *src = priv->vma[entry];
 				memcpy_fromio(skb_put(skb, size), src, size);
 				memcpy_fromio(skb_put(skb, size), src, size);
 				skb->protocol = eth_type_trans(skb, dev);
 				skb->protocol = eth_type_trans(skb, dev);
 				priv->stats.rx_packets++;
 				priv->stats.rx_packets++;
@@ -600,8 +613,11 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
 
 
 	while (time_before(jiffies, timeout)) {
 	while (time_before(jiffies, timeout)) {
 		u32 stat = ethoc_read(priv, MIISTATUS);
 		u32 stat = ethoc_read(priv, MIISTATUS);
-		if (!(stat & MIISTATUS_BUSY))
+		if (!(stat & MIISTATUS_BUSY)) {
+			/* reset MII command register */
+			ethoc_write(priv, MIICOMMAND, 0);
 			return 0;
 			return 0;
+		}
 
 
 		schedule();
 		schedule();
 	}
 	}
@@ -622,21 +638,12 @@ static int ethoc_mdio_probe(struct net_device *dev)
 {
 {
 	struct ethoc *priv = netdev_priv(dev);
 	struct ethoc *priv = netdev_priv(dev);
 	struct phy_device *phy;
 	struct phy_device *phy;
-	int i;
+	int err;
 
 
-	for (i = 0; i < PHY_MAX_ADDR; i++) {
-		phy = priv->mdio->phy_map[i];
-		if (phy) {
-			if (priv->phy_id != -1) {
-				/* attach to specified PHY */
-				if (priv->phy_id == phy->addr)
-					break;
-			} else {
-				/* autoselect PHY if none was specified */
-				if (phy->addr != 0)
-					break;
-			}
-		}
+	if (priv->phy_id != -1) {
+		phy = priv->mdio->phy_map[priv->phy_id];
+	} else {
+		phy = phy_find_first(priv->mdio);
 	}
 	}
 
 
 	if (!phy) {
 	if (!phy) {
@@ -644,11 +651,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
 		return -ENXIO;
 		return -ENXIO;
 	}
 	}
 
 
-	phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0,
+	err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
 			PHY_INTERFACE_MODE_GMII);
 			PHY_INTERFACE_MODE_GMII);
-	if (IS_ERR(phy)) {
+	if (err) {
 		dev_err(&dev->dev, "could not attach to PHY\n");
 		dev_err(&dev->dev, "could not attach to PHY\n");
-		return PTR_ERR(phy);
+		return err;
 	}
 	}
 
 
 	priv->phy = phy;
 	priv->phy = phy;
@@ -658,8 +665,6 @@ static int ethoc_mdio_probe(struct net_device *dev)
 static int ethoc_open(struct net_device *dev)
 static int ethoc_open(struct net_device *dev)
 {
 {
 	struct ethoc *priv = netdev_priv(dev);
 	struct ethoc *priv = netdev_priv(dev);
-	unsigned int min_tx = 2;
-	unsigned int num_bd;
 	int ret;
 	int ret;
 
 
 	ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
 	ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
@@ -667,14 +672,7 @@ static int ethoc_open(struct net_device *dev)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/* calculate the number of TX/RX buffers, maximum 128 supported */
-	num_bd = min_t(unsigned int,
-		128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
-	priv->num_tx = max(min_tx, num_bd / 4);
-	priv->num_rx = num_bd - priv->num_tx;
-	ethoc_write(priv, TX_BD_NUM, priv->num_tx);
-
-	ethoc_init_ring(priv);
+	ethoc_init_ring(priv, (void*)dev->mem_start);
 	ethoc_reset(priv);
 	ethoc_reset(priv);
 
 
 	if (netif_queue_stopped(dev)) {
 	if (netif_queue_stopped(dev)) {
@@ -838,7 +836,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	else
 	else
 		bd.stat &= ~TX_BD_PAD;
 		bd.stat &= ~TX_BD_PAD;
 
 
-	dest = phys_to_virt(bd.addr);
+	dest = priv->vma[entry];
 	memcpy_toio(dest, skb->data, skb->len);
 	memcpy_toio(dest, skb->data, skb->len);
 
 
 	bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
 	bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -884,6 +882,7 @@ static int ethoc_probe(struct platform_device *pdev)
 	struct resource *mem = NULL;
 	struct resource *mem = NULL;
 	struct ethoc *priv = NULL;
 	struct ethoc *priv = NULL;
 	unsigned int phy;
 	unsigned int phy;
+	int num_bd;
 	int ret = 0;
 	int ret = 0;
 
 
 	/* allocate networking device */
 	/* allocate networking device */
@@ -965,7 +964,7 @@ static int ethoc_probe(struct platform_device *pdev)
 		}
 		}
 	} else {
 	} else {
 		/* Allocate buffer memory */
 		/* Allocate buffer memory */
-		priv->membase = dma_alloc_coherent(NULL,
+		priv->membase = dmam_alloc_coherent(&pdev->dev,
 			buffer_size, (void *)&netdev->mem_start,
 			buffer_size, (void *)&netdev->mem_start,
 			GFP_KERNEL);
 			GFP_KERNEL);
 		if (!priv->membase) {
 		if (!priv->membase) {
@@ -978,6 +977,18 @@ static int ethoc_probe(struct platform_device *pdev)
 		priv->dma_alloc = buffer_size;
 		priv->dma_alloc = buffer_size;
 	}
 	}
 
 
+	/* calculate the number of TX/RX buffers, maximum 128 supported */
+	num_bd = min_t(unsigned int,
+		128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
+	priv->num_tx = max(2, num_bd / 4);
+	priv->num_rx = num_bd - priv->num_tx;
+
+	priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
+	if (!priv->vma) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
 	/* Allow the platform setup code to pass in a MAC address. */
 	/* Allow the platform setup code to pass in a MAC address. */
 	if (pdev->dev.platform_data) {
 	if (pdev->dev.platform_data) {
 		struct ethoc_platform_data *pdata =
 		struct ethoc_platform_data *pdata =
@@ -1063,21 +1074,6 @@ free_mdio:
 	kfree(priv->mdio->irq);
 	kfree(priv->mdio->irq);
 	mdiobus_free(priv->mdio);
 	mdiobus_free(priv->mdio);
 free:
 free:
-	if (priv) {
-		if (priv->dma_alloc)
-			dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
-					  netdev->mem_start);
-		else if (priv->membase)
-			devm_iounmap(&pdev->dev, priv->membase);
-		if (priv->iobase)
-			devm_iounmap(&pdev->dev, priv->iobase);
-	}
-	if (mem)
-		devm_release_mem_region(&pdev->dev, mem->start,
-					mem->end - mem->start + 1);
-	if (mmio)
-		devm_release_mem_region(&pdev->dev, mmio->start,
-					mmio->end - mmio->start + 1);
 	free_netdev(netdev);
 	free_netdev(netdev);
 out:
 out:
 	return ret;
 	return ret;
@@ -1104,17 +1100,6 @@ static int ethoc_remove(struct platform_device *pdev)
 			kfree(priv->mdio->irq);
 			kfree(priv->mdio->irq);
 			mdiobus_free(priv->mdio);
 			mdiobus_free(priv->mdio);
 		}
 		}
-		if (priv->dma_alloc)
-			dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
-				netdev->mem_start);
-		else {
-			devm_iounmap(&pdev->dev, priv->membase);
-			devm_release_mem_region(&pdev->dev, netdev->mem_start,
-				netdev->mem_end - netdev->mem_start + 1);
-		}
-		devm_iounmap(&pdev->dev, priv->iobase);
-		devm_release_mem_region(&pdev->dev, netdev->base_addr,
-			priv->io_region_size);
 		unregister_netdev(netdev);
 		unregister_netdev(netdev);
 		free_netdev(netdev);
 		free_netdev(netdev);
 	}
 	}

+ 34 - 22
drivers/net/fec.c

@@ -210,7 +210,7 @@ static void fec_stop(struct net_device *dev);
 /* Transmitter timeout */
 /* Transmitter timeout */
 #define TX_TIMEOUT (2 * HZ)
 #define TX_TIMEOUT (2 * HZ)
 
 
-static int
+static netdev_tx_t
 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
 	struct fec_enet_private *fep = netdev_priv(dev);
@@ -679,30 +679,24 @@ static int fec_enet_mii_probe(struct net_device *dev)
 {
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
 	struct fec_enet_private *fep = netdev_priv(dev);
 	struct phy_device *phy_dev = NULL;
 	struct phy_device *phy_dev = NULL;
-	int phy_addr;
+	int ret;
 
 
 	fep->phy_dev = NULL;
 	fep->phy_dev = NULL;
 
 
 	/* find the first phy */
 	/* find the first phy */
-	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
-		if (fep->mii_bus->phy_map[phy_addr]) {
-			phy_dev = fep->mii_bus->phy_map[phy_addr];
-			break;
-		}
-	}
-
+	phy_dev = phy_find_first(fep->mii_bus);
 	if (!phy_dev) {
 	if (!phy_dev) {
 		printk(KERN_ERR "%s: no PHY found\n", dev->name);
 		printk(KERN_ERR "%s: no PHY found\n", dev->name);
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 
 
 	/* attach the mac to the phy */
 	/* attach the mac to the phy */
-	phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
+	ret = phy_connect_direct(dev, phy_dev,
 			     &fec_enet_adjust_link, 0,
 			     &fec_enet_adjust_link, 0,
 			     PHY_INTERFACE_MODE_MII);
 			     PHY_INTERFACE_MODE_MII);
-	if (IS_ERR(phy_dev)) {
+	if (ret) {
 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
-		return PTR_ERR(phy_dev);
+		return ret;
 	}
 	}
 
 
 	/* mask with MAC supported features */
 	/* mask with MAC supported features */
@@ -1365,6 +1359,8 @@ fec_drv_remove(struct platform_device *pdev)
 	return 0;
 	return 0;
 }
 }
 
 
+#ifdef CONFIG_PM
+
 static int
 static int
 fec_suspend(struct platform_device *dev, pm_message_t state)
 fec_suspend(struct platform_device *dev, pm_message_t state)
 {
 {
@@ -1373,10 +1369,9 @@ fec_suspend(struct platform_device *dev, pm_message_t state)
 
 
 	if (ndev) {
 	if (ndev) {
 		fep = netdev_priv(ndev);
 		fep = netdev_priv(ndev);
-		if (netif_running(ndev)) {
-			netif_device_detach(ndev);
-			fec_stop(ndev);
-		}
+		if (netif_running(ndev))
+			fec_enet_close(ndev);
+		clk_disable(fep->clk);
 	}
 	}
 	return 0;
 	return 0;
 }
 }
@@ -1385,25 +1380,42 @@ static int
 fec_resume(struct platform_device *dev)
 fec_resume(struct platform_device *dev)
 {
 {
 	struct net_device *ndev = platform_get_drvdata(dev);
 	struct net_device *ndev = platform_get_drvdata(dev);
+	struct fec_enet_private *fep;
 
 
 	if (ndev) {
 	if (ndev) {
-		if (netif_running(ndev)) {
-			fec_enet_init(ndev, 0);
-			netif_device_attach(ndev);
-		}
+		fep = netdev_priv(ndev);
+		clk_enable(fep->clk);
+		if (netif_running(ndev))
+			fec_enet_open(ndev);
 	}
 	}
 	return 0;
 	return 0;
 }
 }
 
 
+static const struct dev_pm_ops fec_pm_ops = {
+	.suspend	= fec_suspend,
+	.resume		= fec_resume,
+	.freeze		= fec_suspend,
+	.thaw		= fec_resume,
+	.poweroff	= fec_suspend,
+	.restore	= fec_resume,
+};
+
+#define FEC_PM_OPS (&fec_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define FEC_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
 static struct platform_driver fec_driver = {
 static struct platform_driver fec_driver = {
 	.driver	= {
 	.driver	= {
 		.name    = "fec",
 		.name    = "fec",
 		.owner	 = THIS_MODULE,
 		.owner	 = THIS_MODULE,
+		.pm		 = FEC_PM_OPS,
 	},
 	},
 	.probe   = fec_probe,
 	.probe   = fec_probe,
 	.remove  = __devexit_p(fec_drv_remove),
 	.remove  = __devexit_p(fec_drv_remove),
-	.suspend = fec_suspend,
-	.resume  = fec_resume,
 };
 };
 
 
 static int __init
 static int __init

+ 5 - 19
drivers/net/fec_mpc52xx_phy.c

@@ -29,15 +29,14 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
 		int reg, u32 value)
 		int reg, u32 value)
 {
 {
 	struct mpc52xx_fec_mdio_priv *priv = bus->priv;
 	struct mpc52xx_fec_mdio_priv *priv = bus->priv;
-	struct mpc52xx_fec __iomem *fec;
+	struct mpc52xx_fec __iomem *fec = priv->regs;
 	int tries = 3;
 	int tries = 3;
 
 
 	value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
 	value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
 	value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
 	value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
 
 
-	fec = priv->regs;
 	out_be32(&fec->ievent, FEC_IEVENT_MII);
 	out_be32(&fec->ievent, FEC_IEVENT_MII);
-	out_be32(&priv->regs->mii_data, value);
+	out_be32(&fec->mii_data, value);
 
 
 	/* wait for it to finish, this takes about 23 us on lite5200b */
 	/* wait for it to finish, this takes about 23 us on lite5200b */
 	while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
 	while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
@@ -47,7 +46,7 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
 		return -ETIMEDOUT;
 		return -ETIMEDOUT;
 
 
 	return value & FEC_MII_DATA_OP_RD ?
 	return value & FEC_MII_DATA_OP_RD ?
-		in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0;
+		in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
 }
 }
 
 
 static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
 static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
@@ -69,9 +68,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
 	struct device_node *np = of->dev.of_node;
 	struct device_node *np = of->dev.of_node;
 	struct mii_bus *bus;
 	struct mii_bus *bus;
 	struct mpc52xx_fec_mdio_priv *priv;
 	struct mpc52xx_fec_mdio_priv *priv;
-	struct resource res = {};
+	struct resource res;
 	int err;
 	int err;
-	int i;
 
 
 	bus = mdiobus_alloc();
 	bus = mdiobus_alloc();
 	if (bus == NULL)
 	if (bus == NULL)
@@ -93,7 +91,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
 	err = of_address_to_resource(np, 0, &res);
 	err = of_address_to_resource(np, 0, &res);
 	if (err)
 	if (err)
 		goto out_free;
 		goto out_free;
-	priv->regs = ioremap(res.start, res.end - res.start + 1);
+	priv->regs = ioremap(res.start, resource_size(&res));
 	if (priv->regs == NULL) {
 	if (priv->regs == NULL) {
 		err = -ENOMEM;
 		err = -ENOMEM;
 		goto out_free;
 		goto out_free;
@@ -118,10 +116,6 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
  out_unmap:
  out_unmap:
 	iounmap(priv->regs);
 	iounmap(priv->regs);
  out_free:
  out_free:
-	for (i=0; i<PHY_MAX_ADDR; i++)
-		if (bus->irq[i] != PHY_POLL)
-			irq_dispose_mapping(bus->irq[i]);
-	kfree(bus->irq);
 	kfree(priv);
 	kfree(priv);
 	mdiobus_free(bus);
 	mdiobus_free(bus);
 
 
@@ -133,23 +127,16 @@ static int mpc52xx_fec_mdio_remove(struct of_device *of)
 	struct device *dev = &of->dev;
 	struct device *dev = &of->dev;
 	struct mii_bus *bus = dev_get_drvdata(dev);
 	struct mii_bus *bus = dev_get_drvdata(dev);
 	struct mpc52xx_fec_mdio_priv *priv = bus->priv;
 	struct mpc52xx_fec_mdio_priv *priv = bus->priv;
-	int i;
 
 
 	mdiobus_unregister(bus);
 	mdiobus_unregister(bus);
 	dev_set_drvdata(dev, NULL);
 	dev_set_drvdata(dev, NULL);
-
 	iounmap(priv->regs);
 	iounmap(priv->regs);
-	for (i=0; i<PHY_MAX_ADDR; i++)
-		if (bus->irq[i] != PHY_POLL)
-			irq_dispose_mapping(bus->irq[i]);
 	kfree(priv);
 	kfree(priv);
-	kfree(bus->irq);
 	mdiobus_free(bus);
 	mdiobus_free(bus);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-
 static struct of_device_id mpc52xx_fec_mdio_match[] = {
 static struct of_device_id mpc52xx_fec_mdio_match[] = {
 	{ .compatible = "fsl,mpc5200b-mdio", },
 	{ .compatible = "fsl,mpc5200b-mdio", },
 	{ .compatible = "fsl,mpc5200-mdio", },
 	{ .compatible = "fsl,mpc5200-mdio", },
@@ -171,5 +158,4 @@ struct of_platform_driver mpc52xx_fec_mdio_driver = {
 /* let fec driver call it, since this has to be registered before it */
 /* let fec driver call it, since this has to be registered before it */
 EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
 EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
 
 
-
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_LICENSE("Dual BSD/GPL");

+ 1 - 1
drivers/net/fsl_pq_mdio.h

@@ -39,7 +39,7 @@ struct fsl_pq_mdio {
 	u8 reserved[28];	/* Space holder */
 	u8 reserved[28];	/* Space holder */
 	u32 utbipar;		/* TBI phy address reg (only on UCC) */
 	u32 utbipar;		/* TBI phy address reg (only on UCC) */
 	u8 res4[2728];
 	u8 res4[2728];
-} __attribute__ ((packed));
+} __packed;
 
 
 int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
 int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
 int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
 int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);

+ 5 - 6
drivers/net/gianfar.c

@@ -681,8 +681,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
 		priv->rx_queue[i] = NULL;
 		priv->rx_queue[i] = NULL;
 
 
 	for (i = 0; i < priv->num_tx_queues; i++) {
 	for (i = 0; i < priv->num_tx_queues; i++) {
-		priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kzalloc(
-				sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
+		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+					    GFP_KERNEL);
 		if (!priv->tx_queue[i]) {
 		if (!priv->tx_queue[i]) {
 			err = -ENOMEM;
 			err = -ENOMEM;
 			goto tx_alloc_failed;
 			goto tx_alloc_failed;
@@ -694,8 +694,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
 	}
 	}
 
 
 	for (i = 0; i < priv->num_rx_queues; i++) {
 	for (i = 0; i < priv->num_rx_queues; i++) {
-		priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
-					sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
+		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+					    GFP_KERNEL);
 		if (!priv->rx_queue[i]) {
 		if (!priv->rx_queue[i]) {
 			err = -ENOMEM;
 			err = -ENOMEM;
 			goto rx_alloc_failed;
 			goto rx_alloc_failed;
@@ -747,8 +747,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
 			FSL_GIANFAR_DEV_HAS_CSUM |
 			FSL_GIANFAR_DEV_HAS_CSUM |
 			FSL_GIANFAR_DEV_HAS_VLAN |
 			FSL_GIANFAR_DEV_HAS_VLAN |
 			FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 			FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
-			FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
-			FSL_GIANFAR_DEV_HAS_TIMER;
+			FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
 
 
 	ctype = of_get_property(np, "phy-connection-type", NULL);
 	ctype = of_get_property(np, "phy-connection-type", NULL);
 
 

+ 5 - 7
drivers/net/greth.c

@@ -1555,7 +1555,6 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
 	}
 	}
 
 
 	/* setup NAPI */
 	/* setup NAPI */
-	memset(&greth->napi, 0, sizeof(greth->napi));
 	netif_napi_add(dev, &greth->napi, greth_poll, 64);
 	netif_napi_add(dev, &greth->napi, greth_poll, 64);
 
 
 	return 0;
 	return 0;
@@ -1607,14 +1606,13 @@ static struct of_device_id greth_of_match[] = {
 MODULE_DEVICE_TABLE(of, greth_of_match);
 MODULE_DEVICE_TABLE(of, greth_of_match);
 
 
 static struct of_platform_driver greth_of_driver = {
 static struct of_platform_driver greth_of_driver = {
-	.name = "grlib-greth",
-	.match_table = greth_of_match,
+	.driver = {
+		.name = "grlib-greth",
+		.owner = THIS_MODULE,
+		.of_match_table = greth_of_match,
+	},
 	.probe = greth_of_probe,
 	.probe = greth_of_probe,
 	.remove = __devexit_p(greth_of_remove),
 	.remove = __devexit_p(greth_of_remove),
-	.driver = {
-		   .owner = THIS_MODULE,
-		   .name = "grlib-greth",
-		   },
 };
 };
 
 
 static int __init greth_init(void)
 static int __init greth_init(void)

+ 1 - 1
drivers/net/irda/donauboe.h

@@ -273,7 +273,7 @@ struct OboeSlot
   __u8 control;                 /*Slot control/status see below */
   __u8 control;                 /*Slot control/status see below */
   __u32 address;                /*Slot buffer address */
   __u32 address;                /*Slot buffer address */
 }
 }
-__attribute__ ((packed));
+__packed;
 
 
 #define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
 #define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
 
 

+ 1 - 1
drivers/net/irda/irda-usb.h

@@ -125,7 +125,7 @@ struct irda_class_desc {
 	__u8  bmAdditionalBOFs;
 	__u8  bmAdditionalBOFs;
 	__u8  bIrdaRateSniff;
 	__u8  bIrdaRateSniff;
 	__u8  bMaxUnicastList;
 	__u8  bMaxUnicastList;
-} __attribute__ ((packed));
+} __packed;
 
 
 /* class specific interface request to get the IrDA-USB class descriptor
 /* class specific interface request to get the IrDA-USB class descriptor
  * (6.2.5, USB-IrDA class spec 1.0) */
  * (6.2.5, USB-IrDA class spec 1.0) */

+ 1 - 1
drivers/net/irda/ks959-sir.c

@@ -154,7 +154,7 @@ struct ks959_speedparams {
 	__le32 baudrate;	/* baud rate, little endian */
 	__le32 baudrate;	/* baud rate, little endian */
 	__u8 flags;
 	__u8 flags;
 	__u8 reserved[3];
 	__u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 
 #define KS_DATA_5_BITS 0x00
 #define KS_DATA_5_BITS 0x00
 #define KS_DATA_6_BITS 0x01
 #define KS_DATA_6_BITS 0x01

+ 1 - 1
drivers/net/irda/ksdazzle-sir.c

@@ -117,7 +117,7 @@ struct ksdazzle_speedparams {
 	__le32 baudrate;	/* baud rate, little endian */
 	__le32 baudrate;	/* baud rate, little endian */
 	__u8 flags;
 	__u8 flags;
 	__u8 reserved[3];
 	__u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
 
 
 #define KS_DATA_5_BITS 0x00
 #define KS_DATA_5_BITS 0x00
 #define KS_DATA_6_BITS 0x01
 #define KS_DATA_6_BITS 0x01

+ 3 - 3
drivers/net/irda/vlsi_ir.h

@@ -544,9 +544,9 @@ struct ring_descr_hw {
 		struct {
 		struct {
 			u8		addr_res[3];
 			u8		addr_res[3];
 			volatile u8	status;		/* descriptor status */
 			volatile u8	status;		/* descriptor status */
-		} __attribute__((packed)) rd_s;
-	} __attribute((packed)) rd_u;
-} __attribute__ ((packed));
+		} __packed rd_s;
+	} __packed rd_u;
+} __packed;
 
 
 #define rd_addr		rd_u.addr
 #define rd_addr		rd_u.addr
 #define rd_status	rd_u.rd_s.status
 #define rd_status	rd_u.rd_s.status

+ 3 - 5
drivers/net/ixgbe/ixgbe.h

@@ -44,11 +44,9 @@
 #include <linux/dca.h>
 #include <linux/dca.h>
 #endif
 #endif
 
 
-#define PFX "ixgbe: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
-	((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
-	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
-		__func__ , ## args)))
+/* common prefix used by pr_<> macros */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 
 /* TX/RX descriptor defines */
 /* TX/RX descriptor defines */
 #define IXGBE_DEFAULT_TXD		    512
 #define IXGBE_DEFAULT_TXD		    512

+ 2 - 3
drivers/net/ixgbe/ixgbe_82599.c

@@ -707,9 +707,8 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 
 
 out:
 out:
 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
-		netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
-			" downgraded the link speed from the maximum"
-			" advertised\n");
+		e_info("Smartspeed has downgraded the link speed from "
+		       "the maximum advertised\n");
 	return status;
 	return status;
 }
 }
 
 

+ 2 - 0
drivers/net/ixgbe/ixgbe_common.c

@@ -1188,6 +1188,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
 		IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 		IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 	} else {
 	} else {
 		hw_dbg(hw, "RAR index %d is out of range.\n", index);
 		hw_dbg(hw, "RAR index %d is out of range.\n", index);
+		return IXGBE_ERR_RAR_INDEX;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -1219,6 +1220,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
 		IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 		IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 	} else {
 	} else {
 		hw_dbg(hw, "RAR index %d is out of range.\n", index);
 		hw_dbg(hw, "RAR index %d is out of range.\n", index);
+		return IXGBE_ERR_RAR_INDEX;
 	}
 	}
 
 
 	/* clear VMDq pool/queue selection for this RAR */
 	/* clear VMDq pool/queue selection for this RAR */

+ 20 - 6
drivers/net/ixgbe/ixgbe_common.h

@@ -105,12 +105,26 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
 
 
 #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
 #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
 
 
-#ifdef DEBUG
-extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw);
+extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
 #define hw_dbg(hw, format, arg...) \
-	printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg)
-#else
-#define hw_dbg(hw, format, arg...) do {} while (0)
-#endif
+	netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
+#define e_err(format, arg...) \
+	netdev_err(adapter->netdev, format, ## arg)
+#define e_info(format, arg...) \
+	netdev_info(adapter->netdev, format, ## arg)
+#define e_warn(format, arg...) \
+	netdev_warn(adapter->netdev, format, ## arg)
+#define e_notice(format, arg...) \
+	netdev_notice(adapter->netdev, format, ## arg)
+#define e_crit(format, arg...) \
+	netdev_crit(adapter->netdev, format, ## arg)
+#define e_dev_info(format, arg...) \
+	dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+	dev_warn(&adapter->pdev->dev, format, ## arg)
+#define e_dev_err(format, arg...) \
+	dev_err(&adapter->pdev->dev, format, ## arg)
+#define e_dev_notice(format, arg...) \
+	dev_notice(&adapter->pdev->dev, format, ## arg)
 
 
 #endif /* IXGBE_COMMON */
 #endif /* IXGBE_COMMON */

+ 1 - 1
drivers/net/ixgbe/ixgbe_dcb_nl.c

@@ -121,7 +121,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
 			goto out;
 			goto out;
 
 
 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
-			DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n");
+			e_err("Enable failed, needs MSI-X\n");
 			err = 1;
 			err = 1;
 			goto out;
 			goto out;
 		}
 		}

+ 24 - 48
drivers/net/ixgbe/ixgbe_ethtool.c

@@ -294,8 +294,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
 		hw->mac.autotry_restart = true;
 		hw->mac.autotry_restart = true;
 		err = hw->mac.ops.setup_link(hw, advertised, true, true);
 		err = hw->mac.ops.setup_link(hw, advertised, true, true);
 		if (err) {
 		if (err) {
-			DPRINTK(PROBE, INFO,
-			        "setup link failed with code %d\n", err);
+			e_info("setup link failed with code %d\n", err);
 			hw->mac.ops.setup_link(hw, old, true, true);
 			hw->mac.ops.setup_link(hw, old, true, true);
 		}
 		}
 	} else {
 	} else {
@@ -1188,9 +1187,9 @@ static struct ixgbe_reg_test reg_test_82598[] = {
 		writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
 		writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
 		val = readl(adapter->hw.hw_addr + R);                         \
 		val = readl(adapter->hw.hw_addr + R);                         \
 		if (val != (_test[pat] & W & M)) {                            \
 		if (val != (_test[pat] & W & M)) {                            \
-			DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
-					  "0x%08X expected 0x%08X\n",         \
-				R, val, (_test[pat] & W & M));                \
+			e_err("pattern test reg %04X failed: got "	\
+			      "0x%08X expected 0x%08X\n",		\
+			      R, val, (_test[pat] & W & M));                \
 			*data = R;                                            \
 			*data = R;                                            \
 			writel(before, adapter->hw.hw_addr + R);              \
 			writel(before, adapter->hw.hw_addr + R);              \
 			return 1;                                             \
 			return 1;                                             \
@@ -1206,8 +1205,8 @@ static struct ixgbe_reg_test reg_test_82598[] = {
 	writel((W & M), (adapter->hw.hw_addr + R));                           \
 	writel((W & M), (adapter->hw.hw_addr + R));                           \
 	val = readl(adapter->hw.hw_addr + R);                                 \
 	val = readl(adapter->hw.hw_addr + R);                                 \
 	if ((W & M) != (val & M)) {                                           \
 	if ((W & M) != (val & M)) {                                           \
-		DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
-				 "expected 0x%08X\n", R, (val & M), (W & M)); \
+		e_err("set/check reg %04X test failed: got 0x%08X "	\
+		      "expected 0x%08X\n", R, (val & M), (W & M));	\
 		*data = R;                                                    \
 		*data = R;                                                    \
 		writel(before, (adapter->hw.hw_addr + R));                    \
 		writel(before, (adapter->hw.hw_addr + R));                    \
 		return 1;                                                     \
 		return 1;                                                     \
@@ -1240,8 +1239,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
 	after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
 	after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
 	if (value != after) {
 	if (value != after) {
-		DPRINTK(DRV, ERR, "failed STATUS register test got: "
-		        "0x%08X expected: 0x%08X\n", after, value);
+		e_err("failed STATUS register test got: 0x%08X expected: "
+		      "0x%08X\n", after, value);
 		*data = 1;
 		*data = 1;
 		return 1;
 		return 1;
 	}
 	}
@@ -1341,8 +1340,8 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
 		*data = 1;
 		*data = 1;
 		return -1;
 		return -1;
 	}
 	}
-	DPRINTK(HW, INFO, "testing %s interrupt\n",
-		(shared_int ? "shared" : "unshared"));
+	e_info("testing %s interrupt\n", shared_int ?
+		   "shared" : "unshared");
 
 
 	/* Disable all the interrupts */
 	/* Disable all the interrupts */
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
@@ -1847,7 +1846,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline tests */
 		/* Offline tests */
 
 
-		DPRINTK(HW, INFO, "offline testing starting\n");
+		e_info("offline testing starting\n");
 
 
 		/* Link test performed before hardware reset so autoneg doesn't
 		/* Link test performed before hardware reset so autoneg doesn't
 		 * interfere with test result */
 		 * interfere with test result */
@@ -1880,17 +1879,17 @@ static void ixgbe_diag_test(struct net_device *netdev,
 		else
 		else
 			ixgbe_reset(adapter);
 			ixgbe_reset(adapter);
 
 
-		DPRINTK(HW, INFO, "register testing starting\n");
+		e_info("register testing starting\n");
 		if (ixgbe_reg_test(adapter, &data[0]))
 		if (ixgbe_reg_test(adapter, &data[0]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 
 		ixgbe_reset(adapter);
 		ixgbe_reset(adapter);
-		DPRINTK(HW, INFO, "eeprom testing starting\n");
+		e_info("eeprom testing starting\n");
 		if (ixgbe_eeprom_test(adapter, &data[1]))
 		if (ixgbe_eeprom_test(adapter, &data[1]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 
 		ixgbe_reset(adapter);
 		ixgbe_reset(adapter);
-		DPRINTK(HW, INFO, "interrupt testing starting\n");
+		e_info("interrupt testing starting\n");
 		if (ixgbe_intr_test(adapter, &data[2]))
 		if (ixgbe_intr_test(adapter, &data[2]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 
@@ -1898,14 +1897,13 @@ static void ixgbe_diag_test(struct net_device *netdev,
 		 * loopback diagnostic. */
 		 * loopback diagnostic. */
 		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
 		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
 				      IXGBE_FLAG_VMDQ_ENABLED)) {
 				      IXGBE_FLAG_VMDQ_ENABLED)) {
-			DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
-				"mode\n");
+			e_info("Skip MAC loopback diagnostic in VT mode\n");
 			data[3] = 0;
 			data[3] = 0;
 			goto skip_loopback;
 			goto skip_loopback;
 		}
 		}
 
 
 		ixgbe_reset(adapter);
 		ixgbe_reset(adapter);
-		DPRINTK(HW, INFO, "loopback testing starting\n");
+		e_info("loopback testing starting\n");
 		if (ixgbe_loopback_test(adapter, &data[3]))
 		if (ixgbe_loopback_test(adapter, &data[3]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 
@@ -1916,7 +1914,7 @@ skip_loopback:
 		if (if_running)
 		if (if_running)
 			dev_open(netdev);
 			dev_open(netdev);
 	} else {
 	} else {
-		DPRINTK(HW, INFO, "online testing starting\n");
+		e_info("online testing starting\n");
 		/* Online tests */
 		/* Online tests */
 		if (ixgbe_link_test(adapter, &data[4]))
 		if (ixgbe_link_test(adapter, &data[4]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 			eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -2077,25 +2075,6 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
 	return 0;
 	return 0;
 }
 }
 
 
-/*
- * this function must be called before setting the new value of
- * rx_itr_setting
- */
-static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
-                               struct ethtool_coalesce *ec)
-{
-	/* check the old value and enable RSC if necessary */
-	if ((adapter->rx_itr_setting == 0) &&
-	    (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
-		adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
-		adapter->netdev->features |= NETIF_F_LRO;
-		DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
-		        ec->rx_coalesce_usecs);
-		return true;
-	}
-	return false;
-}
-
 static int ixgbe_set_coalesce(struct net_device *netdev,
 static int ixgbe_set_coalesce(struct net_device *netdev,
                               struct ethtool_coalesce *ec)
                               struct ethtool_coalesce *ec)
 {
 {
@@ -2124,9 +2103,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
 		    (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
 		    (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
 			return -EINVAL;
 			return -EINVAL;
 
 
-		/* check the old value and enable RSC if necessary */
-		need_reset = ixgbe_reenable_rsc(adapter, ec);
-
 		/* store the value in ints/second */
 		/* store the value in ints/second */
 		adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
 		adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
 
 
@@ -2135,9 +2111,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
 		/* clear the lower bit as its used for dynamic state */
 		/* clear the lower bit as its used for dynamic state */
 		adapter->rx_itr_setting &= ~1;
 		adapter->rx_itr_setting &= ~1;
 	} else if (ec->rx_coalesce_usecs == 1) {
 	} else if (ec->rx_coalesce_usecs == 1) {
-		/* check the old value and enable RSC if necessary */
-		need_reset = ixgbe_reenable_rsc(adapter, ec);
-
 		/* 1 means dynamic mode */
 		/* 1 means dynamic mode */
 		adapter->rx_eitr_param = 20000;
 		adapter->rx_eitr_param = 20000;
 		adapter->rx_itr_setting = 1;
 		adapter->rx_itr_setting = 1;
@@ -2157,10 +2130,10 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
 		 */
 		 */
 		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
 		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
 			adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
 			adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
-			netdev->features &= ~NETIF_F_LRO;
-			DPRINTK(PROBE, INFO,
-			        "rx-usecs set to 0, disabling RSC\n");
-
+			if (netdev->features & NETIF_F_LRO) {
+				netdev->features &= ~NETIF_F_LRO;
+				e_info("rx-usecs set to 0, disabling RSC\n");
+			}
 			need_reset = true;
 			need_reset = true;
 		}
 		}
 	}
 	}
@@ -2255,6 +2228,9 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
 			}
 			}
 		} else if (!adapter->rx_itr_setting) {
 		} else if (!adapter->rx_itr_setting) {
 			netdev->features &= ~ETH_FLAG_LRO;
 			netdev->features &= ~ETH_FLAG_LRO;
+			if (data & ETH_FLAG_LRO)
+				e_info("rx-usecs set to 0, "
+					"LRO/RSC cannot be enabled.\n");
 		}
 		}
 	}
 	}
 
 

+ 16 - 19
drivers/net/ixgbe/ixgbe_fcoe.c

@@ -25,7 +25,6 @@
 
 
 *******************************************************************************/
 *******************************************************************************/
 
 
-
 #include "ixgbe.h"
 #include "ixgbe.h"
 #ifdef CONFIG_IXGBE_DCB
 #ifdef CONFIG_IXGBE_DCB
 #include "ixgbe_dcb_82599.h"
 #include "ixgbe_dcb_82599.h"
@@ -165,20 +164,20 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 
 
 	adapter = netdev_priv(netdev);
 	adapter = netdev_priv(netdev);
 	if (xid >= IXGBE_FCOE_DDP_MAX) {
 	if (xid >= IXGBE_FCOE_DDP_MAX) {
-		DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid);
+		e_warn("xid=0x%x out-of-range\n", xid);
 		return 0;
 		return 0;
 	}
 	}
 
 
 	fcoe = &adapter->fcoe;
 	fcoe = &adapter->fcoe;
 	if (!fcoe->pool) {
 	if (!fcoe->pool) {
-		DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid);
+		e_warn("xid=0x%x no ddp pool for fcoe\n", xid);
 		return 0;
 		return 0;
 	}
 	}
 
 
 	ddp = &fcoe->ddp[xid];
 	ddp = &fcoe->ddp[xid];
 	if (ddp->sgl) {
 	if (ddp->sgl) {
-		DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
-			xid, ddp->sgl, ddp->sgc);
+		e_err("xid 0x%x w/ non-null sgl=%p nents=%d\n",
+			  xid, ddp->sgl, ddp->sgc);
 		return 0;
 		return 0;
 	}
 	}
 	ixgbe_fcoe_clear_ddp(ddp);
 	ixgbe_fcoe_clear_ddp(ddp);
@@ -186,14 +185,14 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 	/* setup dma from scsi command sgl */
 	/* setup dma from scsi command sgl */
 	dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
 	dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
 	if (dmacount == 0) {
 	if (dmacount == 0) {
-		DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid);
+		e_err("xid 0x%x DMA map error\n", xid);
 		return 0;
 		return 0;
 	}
 	}
 
 
 	/* alloc the udl from our ddp pool */
 	/* alloc the udl from our ddp pool */
 	ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp);
 	ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp);
 	if (!ddp->udl) {
 	if (!ddp->udl) {
-		DPRINTK(DRV, ERR, "failed allocated ddp context\n");
+		e_err("failed allocated ddp context\n");
 		goto out_noddp_unmap;
 		goto out_noddp_unmap;
 	}
 	}
 	ddp->sgl = sgl;
 	ddp->sgl = sgl;
@@ -206,10 +205,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 		while (len) {
 		while (len) {
 			/* max number of buffers allowed in one DDP context */
 			/* max number of buffers allowed in one DDP context */
 			if (j >= IXGBE_BUFFCNT_MAX) {
 			if (j >= IXGBE_BUFFCNT_MAX) {
-				netif_err(adapter, drv, adapter->netdev,
-					  "xid=%x:%d,%d,%d:addr=%llx "
-					  "not enough descriptors\n",
-					  xid, i, j, dmacount, (u64)addr);
+				e_err("xid=%x:%d,%d,%d:addr=%llx "
+				      "not enough descriptors\n",
+				      xid, i, j, dmacount, (u64)addr);
 				goto out_noddp_free;
 				goto out_noddp_free;
 			}
 			}
 
 
@@ -387,8 +385,8 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
 	struct fc_frame_header *fh;
 	struct fc_frame_header *fh;
 
 
 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
-		DPRINTK(DRV, ERR, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
-			skb_shinfo(skb)->gso_type);
+		e_err("Wrong gso type %d:expecting SKB_GSO_FCOE\n",
+		      skb_shinfo(skb)->gso_type);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
@@ -414,7 +412,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
 		break;
 		break;
 	default:
 	default:
-		DPRINTK(DRV, WARNING, "unknown sof = 0x%x\n", sof);
+		e_warn("unknown sof = 0x%x\n", sof);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
@@ -441,7 +439,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
 		break;
 		break;
 	default:
 	default:
-		DPRINTK(DRV, WARNING, "unknown eof = 0x%x\n", eof);
+		e_warn("unknown eof = 0x%x\n", eof);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
@@ -517,8 +515,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 					     adapter->pdev, IXGBE_FCPTR_MAX,
 					     adapter->pdev, IXGBE_FCPTR_MAX,
 					     IXGBE_FCPTR_ALIGN, PAGE_SIZE);
 					     IXGBE_FCPTR_ALIGN, PAGE_SIZE);
 		if (!fcoe->pool)
 		if (!fcoe->pool)
-			DPRINTK(DRV, ERR,
-				"failed to allocated FCoE DDP pool\n");
+			e_err("failed to allocated FCoE DDP pool\n");
 
 
 		spin_lock_init(&fcoe->lock);
 		spin_lock_init(&fcoe->lock);
 	}
 	}
@@ -614,7 +611,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
 		goto out_enable;
 		goto out_enable;
 
 
-	DPRINTK(DRV, INFO, "Enabling FCoE offload features.\n");
+	e_info("Enabling FCoE offload features.\n");
 	if (netif_running(netdev))
 	if (netif_running(netdev))
 		netdev->netdev_ops->ndo_stop(netdev);
 		netdev->netdev_ops->ndo_stop(netdev);
 
 
@@ -660,7 +657,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
 		goto out_disable;
 		goto out_disable;
 
 
-	DPRINTK(DRV, INFO, "Disabling FCoE offload features.\n");
+	e_info("Disabling FCoE offload features.\n");
 	if (netif_running(netdev))
 	if (netif_running(netdev))
 		netdev->netdev_ops->ndo_stop(netdev);
 		netdev->netdev_ops->ndo_stop(netdev);
 
 

+ 133 - 154
drivers/net/ixgbe/ixgbe_main.c

@@ -642,7 +642,7 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
 	u32 txoff = IXGBE_TFCS_TXOFF;
 	u32 txoff = IXGBE_TFCS_TXOFF;
 
 
 #ifdef CONFIG_IXGBE_DCB
 #ifdef CONFIG_IXGBE_DCB
-	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+	if (adapter->dcb_cfg.pfc_mode_enable) {
 		int tc;
 		int tc;
 		int reg_idx = tx_ring->reg_idx;
 		int reg_idx = tx_ring->reg_idx;
 		int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 		int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
@@ -696,19 +696,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
 		/* detected Tx unit hang */
 		/* detected Tx unit hang */
 		union ixgbe_adv_tx_desc *tx_desc;
 		union ixgbe_adv_tx_desc *tx_desc;
 		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
 		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
-		DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
-			"  Tx Queue             <%d>\n"
-			"  TDH, TDT             <%x>, <%x>\n"
-			"  next_to_use          <%x>\n"
-			"  next_to_clean        <%x>\n"
-			"tx_buffer_info[next_to_clean]\n"
-			"  time_stamp           <%lx>\n"
-			"  jiffies              <%lx>\n",
-			tx_ring->queue_index,
-			IXGBE_READ_REG(hw, tx_ring->head),
-			IXGBE_READ_REG(hw, tx_ring->tail),
-			tx_ring->next_to_use, eop,
-			tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+		e_err("Detected Tx Unit Hang\n"
+		      "  Tx Queue             <%d>\n"
+		      "  TDH, TDT             <%x>, <%x>\n"
+		      "  next_to_use          <%x>\n"
+		      "  next_to_clean        <%x>\n"
+		      "tx_buffer_info[next_to_clean]\n"
+		      "  time_stamp           <%lx>\n"
+		      "  jiffies              <%lx>\n",
+		      tx_ring->queue_index,
+		      IXGBE_READ_REG(hw, tx_ring->head),
+		      IXGBE_READ_REG(hw, tx_ring->tail),
+		      tx_ring->next_to_use, eop,
+		      tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
 		return true;
 		return true;
 	}
 	}
 
 
@@ -812,9 +812,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 	if (adapter->detect_tx_hung) {
 	if (adapter->detect_tx_hung) {
 		if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
 		if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
 			/* schedule immediate reset if we believe we hung */
 			/* schedule immediate reset if we believe we hung */
-			DPRINTK(PROBE, INFO,
-			        "tx hang %d detected, resetting adapter\n",
-			        adapter->tx_timeout_count + 1);
+			e_info("tx hang %d detected, resetting adapter\n",
+			       adapter->tx_timeout_count + 1);
 			ixgbe_tx_timeout(adapter->netdev);
 			ixgbe_tx_timeout(adapter->netdev);
 		}
 		}
 	}
 	}
@@ -1653,10 +1652,10 @@ static void ixgbe_check_overtemp_task(struct work_struct *work)
 				return;
 				return;
 			break;
 			break;
 		}
 		}
-		DPRINTK(DRV, ERR, "Network adapter has been stopped because it "
-		        "has over heated. Restart the computer. If the problem "
-		        "persists, power off the system and replace the "
-		        "adapter\n");
+		e_crit("Network adapter has been stopped because it "
+		       "has over heated. Restart the computer. If the problem "
+		       "persists, power off the system and replace the "
+		       "adapter\n");
 		/* write to clear the interrupt */
 		/* write to clear the interrupt */
 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
 	}
 	}
@@ -1668,7 +1667,7 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
 
 
 	if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
 	if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
-		DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
+		e_crit("Fan has stopped, replace the adapter\n");
 		/* write to clear the interrupt */
 		/* write to clear the interrupt */
 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
 	}
 	}
@@ -2154,9 +2153,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 		                  handler, 0, adapter->name[vector],
 		                  handler, 0, adapter->name[vector],
 		                  adapter->q_vector[vector]);
 		                  adapter->q_vector[vector]);
 		if (err) {
 		if (err) {
-			DPRINTK(PROBE, ERR,
-			        "request_irq failed for MSIX interrupt "
-			        "Error: %d\n", err);
+			e_err("request_irq failed for MSIX interrupt: "
+			      "Error: %d\n", err);
 			goto free_queue_irqs;
 			goto free_queue_irqs;
 		}
 		}
 	}
 	}
@@ -2165,8 +2163,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 	err = request_irq(adapter->msix_entries[vector].vector,
 	err = request_irq(adapter->msix_entries[vector].vector,
 	                  ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
 	                  ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
 	if (err) {
 	if (err) {
-		DPRINTK(PROBE, ERR,
-			"request_irq for msix_lsc failed: %d\n", err);
+		e_err("request_irq for msix_lsc failed: %d\n", err);
 		goto free_queue_irqs;
 		goto free_queue_irqs;
 	}
 	}
 
 
@@ -2352,7 +2349,7 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
 	}
 	}
 
 
 	if (err)
 	if (err)
-		DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
+		e_err("request_irq failed, Error %d\n", err);
 
 
 	return err;
 	return err;
 }
 }
@@ -2423,7 +2420,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
 	map_vector_to_rxq(adapter, 0, 0);
 	map_vector_to_rxq(adapter, 0, 0);
 	map_vector_to_txq(adapter, 0, 0);
 	map_vector_to_txq(adapter, 0, 0);
 
 
-	DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
+	e_info("Legacy interrupt IVAR setup done\n");
 }
 }
 
 
 /**
 /**
@@ -3257,8 +3254,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
 			msleep(1);
 			msleep(1);
 	}
 	}
 	if (k >= IXGBE_MAX_RX_DESC_POLL) {
 	if (k >= IXGBE_MAX_RX_DESC_POLL) {
-		DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
-		        "not set within the polling period\n", rxr);
+		e_err("RXDCTL.ENABLE on Rx queue %d not set within "
+		      "the polling period\n", rxr);
 	}
 	}
 	ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
 	ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
 	                      (adapter->rx_ring[rxr]->count - 1));
 	                      (adapter->rx_ring[rxr]->count - 1));
@@ -3387,8 +3384,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 			} while (--wait_loop &&
 			} while (--wait_loop &&
 			         !(txdctl & IXGBE_TXDCTL_ENABLE));
 			         !(txdctl & IXGBE_TXDCTL_ENABLE));
 			if (!wait_loop)
 			if (!wait_loop)
-				DPRINTK(DRV, ERR, "Could not enable "
-				        "Tx Queue %d\n", j);
+				e_err("Could not enable Tx Queue %d\n", j);
 		}
 		}
 	}
 	}
 
 
@@ -3436,8 +3432,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
 		if (esdp & IXGBE_ESDP_SDP1)
 		if (esdp & IXGBE_ESDP_SDP1)
-			DPRINTK(DRV, CRIT,
-				"Fan has stopped, replace the adapter\n");
+			e_crit("Fan has stopped, replace the adapter\n");
 	}
 	}
 
 
 	/*
 	/*
@@ -3466,7 +3461,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 	} else {
 	} else {
 		err = ixgbe_non_sfp_link_config(hw);
 		err = ixgbe_non_sfp_link_config(hw);
 		if (err)
 		if (err)
-			DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
+			e_err("link_config FAILED %d\n", err);
 	}
 	}
 
 
 	for (i = 0; i < adapter->num_tx_queues; i++)
 	for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3527,19 +3522,19 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
 	case IXGBE_ERR_SFP_NOT_PRESENT:
 	case IXGBE_ERR_SFP_NOT_PRESENT:
 		break;
 		break;
 	case IXGBE_ERR_MASTER_REQUESTS_PENDING:
 	case IXGBE_ERR_MASTER_REQUESTS_PENDING:
-		dev_err(&adapter->pdev->dev, "master disable timed out\n");
+		e_dev_err("master disable timed out\n");
 		break;
 		break;
 	case IXGBE_ERR_EEPROM_VERSION:
 	case IXGBE_ERR_EEPROM_VERSION:
 		/* We are running on a pre-production device, log a warning */
 		/* We are running on a pre-production device, log a warning */
-		dev_warn(&adapter->pdev->dev, "This device is a pre-production "
-		         "adapter/LOM.  Please be aware there may be issues "
-		         "associated with your hardware.  If you are "
-		         "experiencing problems please contact your Intel or "
-		         "hardware representative who provided you with this "
-		         "hardware.\n");
+		e_dev_warn("This device is a pre-production adapter/LOM. "
+			   "Please be aware there may be issuesassociated with "
+			   "your hardware.  If you are experiencing problems "
+			   "please contact your Intel or hardware "
+			   "representative who provided you with this "
+			   "hardware.\n");
 		break;
 		break;
 	default:
 	default:
-		dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+		e_dev_err("Hardware Error: %d\n", err);
 	}
 	}
 
 
 	/* reprogram the RAR[0] in case user changed it. */
 	/* reprogram the RAR[0] in case user changed it. */
@@ -3920,12 +3915,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
 		adapter->num_tx_queues = 1;
 		adapter->num_tx_queues = 1;
 #ifdef CONFIG_IXGBE_DCB
 #ifdef CONFIG_IXGBE_DCB
 		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-			DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
+			e_info("FCoE enabled with DCB\n");
 			ixgbe_set_dcb_queues(adapter);
 			ixgbe_set_dcb_queues(adapter);
 		}
 		}
 #endif
 #endif
 		if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 		if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-			DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
+			e_info("FCoE enabled with RSS\n");
 			if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
 			if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
 			    (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
 			    (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
 				ixgbe_set_fdir_queues(adapter);
 				ixgbe_set_fdir_queues(adapter);
@@ -4038,7 +4033,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
 		 * This just means we'll go with either a single MSI
 		 * This just means we'll go with either a single MSI
 		 * vector or fall back to legacy interrupts.
 		 * vector or fall back to legacy interrupts.
 		 */
 		 */
-		DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
+		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+			     "Unable to allocate MSI-X interrupts\n");
 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 		kfree(adapter->msix_entries);
 		kfree(adapter->msix_entries);
 		adapter->msix_entries = NULL;
 		adapter->msix_entries = NULL;
@@ -4435,8 +4431,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
 	if (!err) {
 	if (!err) {
 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
 	} else {
 	} else {
-		DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
-		        "falling back to legacy.  Error: %d\n", err);
+		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+			     "Unable to allocate MSI interrupt, "
+			     "falling back to legacy.  Error: %d\n", err);
 		/* reset err */
 		/* reset err */
 		err = 0;
 		err = 0;
 	}
 	}
@@ -4557,27 +4554,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
 
 
 	err = ixgbe_set_interrupt_capability(adapter);
 	err = ixgbe_set_interrupt_capability(adapter);
 	if (err) {
 	if (err) {
-		DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
+		e_dev_err("Unable to setup interrupt capabilities\n");
 		goto err_set_interrupt;
 		goto err_set_interrupt;
 	}
 	}
 
 
 	err = ixgbe_alloc_q_vectors(adapter);
 	err = ixgbe_alloc_q_vectors(adapter);
 	if (err) {
 	if (err) {
-		DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
-		        "vectors\n");
+		e_dev_err("Unable to allocate memory for queue vectors\n");
 		goto err_alloc_q_vectors;
 		goto err_alloc_q_vectors;
 	}
 	}
 
 
 	err = ixgbe_alloc_queues(adapter);
 	err = ixgbe_alloc_queues(adapter);
 	if (err) {
 	if (err) {
-		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		e_dev_err("Unable to allocate memory for queues\n");
 		goto err_alloc_queues;
 		goto err_alloc_queues;
 	}
 	}
 
 
-	DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
-	        "Tx Queue count = %u\n",
-	        (adapter->num_rx_queues > 1) ? "Enabled" :
-	        "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
+	       (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+	       adapter->num_rx_queues, adapter->num_tx_queues);
 
 
 	set_bit(__IXGBE_DOWN, &adapter->state);
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
 
@@ -4648,15 +4643,13 @@ static void ixgbe_sfp_task(struct work_struct *work)
 			goto reschedule;
 			goto reschedule;
 		ret = hw->phy.ops.reset(hw);
 		ret = hw->phy.ops.reset(hw);
 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-			dev_err(&adapter->pdev->dev, "failed to initialize "
-				"because an unsupported SFP+ module type "
-				"was detected.\n"
-				"Reload the driver after installing a "
-				"supported module.\n");
+			e_dev_err("failed to initialize because an unsupported "
+				  "SFP+ module type was detected.\n");
+			e_dev_err("Reload the driver after installing a "
+				  "supported module.\n");
 			unregister_netdev(adapter->netdev);
 			unregister_netdev(adapter->netdev);
 		} else {
 		} else {
-			DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
-			        hw->phy.sfp_type);
+			e_info("detected SFP+: %d\n", hw->phy.sfp_type);
 		}
 		}
 		/* don't need this routine any more */
 		/* don't need this routine any more */
 		clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
 		clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
@@ -4783,7 +4776,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 
 
 	/* initialize eeprom parameters */
 	/* initialize eeprom parameters */
 	if (ixgbe_init_eeprom_params_generic(hw)) {
 	if (ixgbe_init_eeprom_params_generic(hw)) {
-		dev_err(&pdev->dev, "EEPROM initialization failed\n");
+		e_dev_err("EEPROM initialization failed\n");
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
@@ -4836,8 +4829,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
 err:
 err:
 	vfree(tx_ring->tx_buffer_info);
 	vfree(tx_ring->tx_buffer_info);
 	tx_ring->tx_buffer_info = NULL;
 	tx_ring->tx_buffer_info = NULL;
-	DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
-	                    "descriptor ring\n");
+	e_err("Unable to allocate memory for the Tx descriptor ring\n");
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
@@ -4859,7 +4851,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
 		err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
 		err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
 		if (!err)
 		if (!err)
 			continue;
 			continue;
-		DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
+		e_err("Allocation for Tx Queue %u failed\n", i);
 		break;
 		break;
 	}
 	}
 
 
@@ -4884,8 +4876,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 	if (!rx_ring->rx_buffer_info)
 	if (!rx_ring->rx_buffer_info)
 		rx_ring->rx_buffer_info = vmalloc(size);
 		rx_ring->rx_buffer_info = vmalloc(size);
 	if (!rx_ring->rx_buffer_info) {
 	if (!rx_ring->rx_buffer_info) {
-		DPRINTK(PROBE, ERR,
-		        "vmalloc allocation failed for the rx desc ring\n");
+		e_err("vmalloc allocation failed for the Rx desc ring\n");
 		goto alloc_failed;
 		goto alloc_failed;
 	}
 	}
 	memset(rx_ring->rx_buffer_info, 0, size);
 	memset(rx_ring->rx_buffer_info, 0, size);
@@ -4898,8 +4889,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 					   &rx_ring->dma, GFP_KERNEL);
 					   &rx_ring->dma, GFP_KERNEL);
 
 
 	if (!rx_ring->desc) {
 	if (!rx_ring->desc) {
-		DPRINTK(PROBE, ERR,
-		        "Memory allocation failed for the rx desc ring\n");
+		e_err("Memory allocation failed for the Rx desc ring\n");
 		vfree(rx_ring->rx_buffer_info);
 		vfree(rx_ring->rx_buffer_info);
 		goto alloc_failed;
 		goto alloc_failed;
 	}
 	}
@@ -4932,7 +4922,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 		err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
 		err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
 		if (!err)
 		if (!err)
 			continue;
 			continue;
-		DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
+		e_err("Allocation for Rx Queue %u failed\n", i);
 		break;
 		break;
 	}
 	}
 
 
@@ -5031,8 +5021,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
 	if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
 	if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
-	        netdev->mtu, new_mtu);
+	e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
 	/* must set new MTU before calling down or up */
 	/* must set new MTU before calling down or up */
 	netdev->mtu = new_mtu;
 	netdev->mtu = new_mtu;
 
 
@@ -5145,8 +5134,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
 
 	err = pci_enable_device_mem(pdev);
 	err = pci_enable_device_mem(pdev);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
-				"suspend\n");
+		e_dev_err("Cannot enable PCI device from suspend\n");
 		return err;
 		return err;
 	}
 	}
 	pci_set_master(pdev);
 	pci_set_master(pdev);
@@ -5155,8 +5143,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
 
 	err = ixgbe_init_interrupt_scheme(adapter);
 	err = ixgbe_init_interrupt_scheme(adapter);
 	if (err) {
 	if (err) {
-		printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
-		                "device\n");
+		e_dev_err("Cannot initialize interrupts for device\n");
 		return err;
 		return err;
 	}
 	}
 
 
@@ -5282,6 +5269,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 	u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
 	u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
 	u64 non_eop_descs = 0, restart_queue = 0;
 	u64 non_eop_descs = 0, restart_queue = 0;
 
 
+	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+	    test_bit(__IXGBE_RESETTING, &adapter->state))
+		return;
+
 	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
 	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
 		u64 rsc_count = 0;
 		u64 rsc_count = 0;
 		u64 rsc_flush = 0;
 		u64 rsc_flush = 0;
@@ -5512,10 +5503,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
 	err = hw->phy.ops.identify_sfp(hw);
 	err = hw->phy.ops.identify_sfp(hw);
 
 
 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-		dev_err(&adapter->pdev->dev, "failed to initialize because "
-			"an unsupported SFP+ module type was detected.\n"
-			"Reload the driver after installing a supported "
-			"module.\n");
+		e_dev_err("failed to initialize because an unsupported SFP+ "
+			  "module type was detected.\n");
+		e_dev_err("Reload the driver after installing a supported "
+			  "module.\n");
 		unregister_netdev(adapter->netdev);
 		unregister_netdev(adapter->netdev);
 		return;
 		return;
 	}
 	}
@@ -5544,8 +5535,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
 			set_bit(__IXGBE_FDIR_INIT_DONE,
 			set_bit(__IXGBE_FDIR_INIT_DONE,
 			        &(adapter->tx_ring[i]->reinit_state));
 			        &(adapter->tx_ring[i]->reinit_state));
 	} else {
 	} else {
-		DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
-			"ignored adding FDIR ATR filters\n");
+		e_err("failed to finish FDIR re-initialization, "
+		      "ignored adding FDIR ATR filters\n");
 	}
 	}
 	/* Done FDIR Re-initialization, enable transmits */
 	/* Done FDIR Re-initialization, enable transmits */
 	netif_tx_start_all_queues(adapter->netdev);
 	netif_tx_start_all_queues(adapter->netdev);
@@ -5616,16 +5607,14 @@ static void ixgbe_watchdog_task(struct work_struct *work)
 				flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
 				flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
 			}
 			}
 
 
-			printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
-			       "Flow Control: %s\n",
-			       netdev->name,
+			e_info("NIC Link is Up %s, Flow Control: %s\n",
 			       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
 			       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
-			        "10 Gbps" :
-			        (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
-			         "1 Gbps" : "unknown speed")),
+			       "10 Gbps" :
+			       (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+			       "1 Gbps" : "unknown speed")),
 			       ((flow_rx && flow_tx) ? "RX/TX" :
 			       ((flow_rx && flow_tx) ? "RX/TX" :
-			        (flow_rx ? "RX" :
-			        (flow_tx ? "TX" : "None"))));
+			       (flow_rx ? "RX" :
+			       (flow_tx ? "TX" : "None"))));
 
 
 			netif_carrier_on(netdev);
 			netif_carrier_on(netdev);
 		} else {
 		} else {
@@ -5636,8 +5625,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
 		adapter->link_up = false;
 		adapter->link_up = false;
 		adapter->link_speed = 0;
 		adapter->link_speed = 0;
 		if (netif_carrier_ok(netdev)) {
 		if (netif_carrier_ok(netdev)) {
-			printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
-			       netdev->name);
+			e_info("NIC Link is Down\n");
 			netif_carrier_off(netdev);
 			netif_carrier_off(netdev);
 		}
 		}
 	}
 	}
@@ -5813,9 +5801,8 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 				break;
 				break;
 			default:
 			default:
 				if (unlikely(net_ratelimit())) {
 				if (unlikely(net_ratelimit())) {
-					DPRINTK(PROBE, WARNING,
-					 "partial checksum but proto=%x!\n",
-					 skb->protocol);
+					e_warn("partial checksum but "
+					       "proto=%x!\n", skb->protocol);
 				}
 				}
 				break;
 				break;
 			}
 			}
@@ -5926,7 +5913,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 	return count;
 	return count;
 
 
 dma_error:
 dma_error:
-	dev_err(&pdev->dev, "TX DMA map failed\n");
+	e_dev_err("TX DMA map failed\n");
 
 
 	/* clear timestamp and dma mappings for failed tx_buffer_info map */
 	/* clear timestamp and dma mappings for failed tx_buffer_info map */
 	tx_buffer_info->dma = 0;
 	tx_buffer_info->dma = 0;
@@ -6423,8 +6410,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
 	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
 	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
 	err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
 	err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
 	if (err) {
 	if (err) {
-		DPRINTK(PROBE, ERR,
-			"Failed to enable PCI sriov: %d\n", err);
+		e_err("Failed to enable PCI sriov: %d\n", err);
 		goto err_novfs;
 		goto err_novfs;
 	}
 	}
 	/* If call to enable VFs succeeded then allocate memory
 	/* If call to enable VFs succeeded then allocate memory
@@ -6448,9 +6434,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
 	}
 	}
 
 
 	/* Oh oh */
 	/* Oh oh */
-	DPRINTK(PROBE, ERR,
-		"Unable to allocate memory for VF "
-		"Data Storage - SRIOV disabled\n");
+	e_err("Unable to allocate memory for VF Data Storage - SRIOV "
+	      "disabled\n");
 	pci_disable_sriov(adapter->pdev);
 	pci_disable_sriov(adapter->pdev);
 
 
 err_novfs:
 err_novfs:
@@ -6498,8 +6483,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 			err = dma_set_coherent_mask(&pdev->dev,
 			err = dma_set_coherent_mask(&pdev->dev,
 						    DMA_BIT_MASK(32));
 						    DMA_BIT_MASK(32));
 			if (err) {
 			if (err) {
-				dev_err(&pdev->dev, "No usable DMA "
-				        "configuration, aborting\n");
+				e_dev_err("No usable DMA configuration, "
+					  "aborting\n");
 				goto err_dma;
 				goto err_dma;
 			}
 			}
 		}
 		}
@@ -6509,8 +6494,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
 	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
 	                                   IORESOURCE_MEM), ixgbe_driver_name);
 	                                   IORESOURCE_MEM), ixgbe_driver_name);
 	if (err) {
 	if (err) {
-		dev_err(&pdev->dev,
-		        "pci_request_selected_regions failed 0x%x\n", err);
+		e_dev_err("pci_request_selected_regions failed 0x%x\n", err);
 		goto err_pci_reg;
 		goto err_pci_reg;
 	}
 	}
 
 
@@ -6621,8 +6605,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
 		if (esdp & IXGBE_ESDP_SDP1)
 		if (esdp & IXGBE_ESDP_SDP1)
-			DPRINTK(PROBE, CRIT,
-				"Fan has stopped, replace the adapter\n");
+			e_crit("Fan has stopped, replace the adapter\n");
 	}
 	}
 
 
 	/* reset_hw fills in the perm_addr as well */
 	/* reset_hw fills in the perm_addr as well */
@@ -6641,19 +6624,19 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 			  round_jiffies(jiffies + (2 * HZ)));
 			  round_jiffies(jiffies + (2 * HZ)));
 		err = 0;
 		err = 0;
 	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-		dev_err(&adapter->pdev->dev, "failed to initialize because "
-			"an unsupported SFP+ module type was detected.\n"
-			"Reload the driver after installing a supported "
-			"module.\n");
+		e_dev_err("failed to initialize because an unsupported SFP+ "
+			  "module type was detected.\n");
+		e_dev_err("Reload the driver after installing a supported "
+			  "module.\n");
 		goto err_sw_init;
 		goto err_sw_init;
 	} else if (err) {
 	} else if (err) {
-		dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
+		e_dev_err("HW Init failed: %d\n", err);
 		goto err_sw_init;
 		goto err_sw_init;
 	}
 	}
 
 
 	ixgbe_probe_vf(adapter, ii);
 	ixgbe_probe_vf(adapter, ii);
 
 
-	netdev->features = NETIF_F_SG |
+	netdev->features =    NETIF_F_SG |
 	                   NETIF_F_IP_CSUM |
 	                   NETIF_F_IP_CSUM |
 	                   NETIF_F_HW_VLAN_TX |
 	                   NETIF_F_HW_VLAN_TX |
 	                   NETIF_F_HW_VLAN_RX |
 	                   NETIF_F_HW_VLAN_RX |
@@ -6700,7 +6683,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
 
 	/* make sure the EEPROM is good */
 	/* make sure the EEPROM is good */
 	if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
 	if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
-		dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
+		e_dev_err("The EEPROM Checksum Is Not Valid\n");
 		err = -EIO;
 		err = -EIO;
 		goto err_eeprom;
 		goto err_eeprom;
 	}
 	}
@@ -6709,7 +6692,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
 	memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
 
 
 	if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
 	if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
-		dev_err(&pdev->dev, "invalid MAC address\n");
+		e_dev_err("invalid MAC address\n");
 		err = -EIO;
 		err = -EIO;
 		goto err_eeprom;
 		goto err_eeprom;
 	}
 	}
@@ -6744,7 +6727,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	hw->mac.ops.get_bus_info(hw);
 	hw->mac.ops.get_bus_info(hw);
 
 
 	/* print bus type/speed/width info */
 	/* print bus type/speed/width info */
-	dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
+	e_dev_info("(PCI Express:%s:%s) %pM\n",
 	        ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
 	        ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
 	         (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
 	         (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
 	        ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
 	        ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
@@ -6754,20 +6737,20 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	        netdev->dev_addr);
 	        netdev->dev_addr);
 	ixgbe_read_pba_num_generic(hw, &part_num);
 	ixgbe_read_pba_num_generic(hw, &part_num);
 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-		dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
-		         hw->mac.type, hw->phy.type, hw->phy.sfp_type,
-		         (part_num >> 8), (part_num & 0xff));
+		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
+			   "PBA No: %06x-%03x\n",
+			   hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+			   (part_num >> 8), (part_num & 0xff));
 	else
 	else
-		dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
-		         hw->mac.type, hw->phy.type,
-		         (part_num >> 8), (part_num & 0xff));
+		e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
+			   hw->mac.type, hw->phy.type,
+			   (part_num >> 8), (part_num & 0xff));
 
 
 	if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
 	if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
-		dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
-		         "this card is not sufficient for optimal "
-		         "performance.\n");
-		dev_warn(&pdev->dev, "For optimal performance a x8 "
-		         "PCI-Express slot is required.\n");
+		e_dev_warn("PCI-Express bandwidth available for this card is "
+			   "not sufficient for optimal performance.\n");
+		e_dev_warn("For optimal performance a x8 PCI-Express slot "
+			   "is required.\n");
 	}
 	}
 
 
 	/* save off EEPROM version number */
 	/* save off EEPROM version number */
@@ -6778,12 +6761,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
 
 	if (err == IXGBE_ERR_EEPROM_VERSION) {
 	if (err == IXGBE_ERR_EEPROM_VERSION) {
 		/* We are running on a pre-production device, log a warning */
 		/* We are running on a pre-production device, log a warning */
-		dev_warn(&pdev->dev, "This device is a pre-production "
-		         "adapter/LOM.  Please be aware there may be issues "
-		         "associated with your hardware.  If you are "
-		         "experiencing problems please contact your Intel or "
-		         "hardware representative who provided you with this "
-		         "hardware.\n");
+		e_dev_warn("This device is a pre-production adapter/LOM. "
+			   "Please be aware there may be issues associated "
+			   "with your hardware.  If you are experiencing "
+			   "problems please contact your Intel or hardware "
+			   "representative who provided you with this "
+			   "hardware.\n");
 	}
 	}
 	strcpy(netdev->name, "eth%d");
 	strcpy(netdev->name, "eth%d");
 	err = register_netdev(netdev);
 	err = register_netdev(netdev);
@@ -6806,8 +6789,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	}
 	}
 #endif
 #endif
 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
-		DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
-			adapter->num_vfs);
+		e_info("IOV is enabled with %d VFs\n", adapter->num_vfs);
 		for (i = 0; i < adapter->num_vfs; i++)
 		for (i = 0; i < adapter->num_vfs; i++)
 			ixgbe_vf_configuration(pdev, (i | 0x10000000));
 			ixgbe_vf_configuration(pdev, (i | 0x10000000));
 	}
 	}
@@ -6815,7 +6797,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	/* add san mac addr to netdev */
 	/* add san mac addr to netdev */
 	ixgbe_add_sanmac_netdev(netdev);
 	ixgbe_add_sanmac_netdev(netdev);
 
 
-	dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
+	e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
 	cards_found++;
 	cards_found++;
 	return 0;
 	return 0;
 
 
@@ -6905,7 +6887,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 	pci_release_selected_regions(pdev, pci_select_bars(pdev,
 	pci_release_selected_regions(pdev, pci_select_bars(pdev,
 	                             IORESOURCE_MEM));
 	                             IORESOURCE_MEM));
 
 
-	DPRINTK(PROBE, INFO, "complete\n");
+	e_dev_info("complete\n");
 
 
 	free_netdev(netdev);
 	free_netdev(netdev);
 
 
@@ -6955,8 +6937,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 	int err;
 	int err;
 
 
 	if (pci_enable_device_mem(pdev)) {
 	if (pci_enable_device_mem(pdev)) {
-		DPRINTK(PROBE, ERR,
-		        "Cannot re-enable PCI device after reset.\n");
+		e_err("Cannot re-enable PCI device after reset.\n");
 		result = PCI_ERS_RESULT_DISCONNECT;
 		result = PCI_ERS_RESULT_DISCONNECT;
 	} else {
 	} else {
 		pci_set_master(pdev);
 		pci_set_master(pdev);
@@ -6972,8 +6953,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 
 
 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
 	if (err) {
 	if (err) {
-		dev_err(&pdev->dev,
-		  "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
+		e_dev_err("pci_cleanup_aer_uncorrect_error_status "
+			  "failed 0x%0x\n", err);
 		/* non-fatal, continue */
 		/* non-fatal, continue */
 	}
 	}
 
 
@@ -6994,7 +6975,7 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
 
 
 	if (netif_running(netdev)) {
 	if (netif_running(netdev)) {
 		if (ixgbe_up(adapter)) {
 		if (ixgbe_up(adapter)) {
-			DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
+			e_info("ixgbe_up failed after reset\n");
 			return;
 			return;
 		}
 		}
 	}
 	}
@@ -7030,10 +7011,9 @@ static struct pci_driver ixgbe_driver = {
 static int __init ixgbe_init_module(void)
 static int __init ixgbe_init_module(void)
 {
 {
 	int ret;
 	int ret;
-	printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
-	       ixgbe_driver_string, ixgbe_driver_version);
-
-	printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
+	pr_info("%s - version %s\n", ixgbe_driver_string,
+		   ixgbe_driver_version);
+	pr_info("%s\n", ixgbe_copyright);
 
 
 #ifdef CONFIG_IXGBE_DCA
 #ifdef CONFIG_IXGBE_DCA
 	dca_register_notify(&dca_notifier);
 	dca_register_notify(&dca_notifier);
@@ -7072,18 +7052,17 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
 }
 }
 
 
 #endif /* CONFIG_IXGBE_DCA */
 #endif /* CONFIG_IXGBE_DCA */
-#ifdef DEBUG
+
 /**
 /**
- * ixgbe_get_hw_dev_name - return device name string
+ * ixgbe_get_hw_dev return device
  * used by hardware layer to print debugging information
  * used by hardware layer to print debugging information
  **/
  **/
-char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
+struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
 {
 {
 	struct ixgbe_adapter *adapter = hw->back;
 	struct ixgbe_adapter *adapter = hw->back;
-	return adapter->netdev->name;
+	return adapter->netdev;
 }
 }
 
 
-#endif
 module_exit(ixgbe_exit_module);
 module_exit(ixgbe_exit_module);
 
 
 /* ixgbe_main.c */
 /* ixgbe_main.c */

+ 5 - 10
drivers/net/ixgbe/ixgbe_sriov.c

@@ -25,7 +25,6 @@
 
 
 *******************************************************************************/
 *******************************************************************************/
 
 
-
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
@@ -174,7 +173,7 @@ int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
 	adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
 	adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
 	                                              vf, IXGBE_RAH_AV);
 	                                              vf, IXGBE_RAH_AV);
 	if (adapter->vfinfo[vf].rar < 0) {
 	if (adapter->vfinfo[vf].rar < 0) {
-		DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
+		e_err("Could not set MAC Filter for VF %d\n", vf);
 		return -1;
 		return -1;
 	}
 	}
 
 
@@ -194,11 +193,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 
 
 	if (enable) {
 	if (enable) {
 		random_ether_addr(vf_mac_addr);
 		random_ether_addr(vf_mac_addr);
-		DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
-		       "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
-		       vfn,
-		       vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
-		       vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+		e_info("IOV: VF %d is enabled MAC %pM\n", vfn, vf_mac_addr);
 		/*
 		/*
 		 * Store away the VF "permananet" MAC address, it will ask
 		 * Store away the VF "permananet" MAC address, it will ask
 		 * for it later.
 		 * for it later.
@@ -243,7 +238,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 
 
 	if (retval)
 	if (retval)
-		printk(KERN_ERR "Error receiving message from VF\n");
+		pr_err("Error receiving message from VF\n");
 
 
 	/* this is a message we already processed, do nothing */
 	/* this is a message we already processed, do nothing */
 	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
 	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
@@ -257,7 +252,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 	if (msgbuf[0] == IXGBE_VF_RESET) {
 	if (msgbuf[0] == IXGBE_VF_RESET) {
 		unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
 		unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
 		u8 *addr = (u8 *)(&msgbuf[1]);
 		u8 *addr = (u8 *)(&msgbuf[1]);
-		DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
+		e_info("VF Reset msg received from vf %d\n", vf);
 		adapter->vfinfo[vf].clear_to_send = false;
 		adapter->vfinfo[vf].clear_to_send = false;
 		ixgbe_vf_reset_msg(adapter, vf);
 		ixgbe_vf_reset_msg(adapter, vf);
 		adapter->vfinfo[vf].clear_to_send = true;
 		adapter->vfinfo[vf].clear_to_send = true;
@@ -310,7 +305,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 		retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
 		retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
 		break;
 		break;
 	default:
 	default:
-		DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
+		e_err("Unhandled Msg %8.8x\n", msgbuf[0]);
 		retval = IXGBE_ERR_MBX;
 		retval = IXGBE_ERR_MBX;
 		break;
 		break;
 	}
 	}

+ 1 - 0
drivers/net/ixgbe/ixgbe_type.h

@@ -2609,6 +2609,7 @@ struct ixgbe_info {
 #define IXGBE_ERR_EEPROM_VERSION                -24
 #define IXGBE_ERR_EEPROM_VERSION                -24
 #define IXGBE_ERR_NO_SPACE                      -25
 #define IXGBE_ERR_NO_SPACE                      -25
 #define IXGBE_ERR_OVERTEMP                      -26
 #define IXGBE_ERR_OVERTEMP                      -26
+#define IXGBE_ERR_RAR_INDEX                     -27
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
 
 #endif /* _IXGBE_TYPE_H_ */
 #endif /* _IXGBE_TYPE_H_ */

+ 1 - 0
drivers/net/ixgbevf/ixgbevf_main.c

@@ -3411,6 +3411,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
 	netdev->features |= NETIF_F_IPV6_CSUM;
 	netdev->features |= NETIF_F_IPV6_CSUM;
 	netdev->features |= NETIF_F_TSO;
 	netdev->features |= NETIF_F_TSO;
 	netdev->features |= NETIF_F_TSO6;
 	netdev->features |= NETIF_F_TSO6;
+	netdev->features |= NETIF_F_GRO;
 	netdev->vlan_features |= NETIF_F_TSO;
 	netdev->vlan_features |= NETIF_F_TSO;
 	netdev->vlan_features |= NETIF_F_TSO6;
 	netdev->vlan_features |= NETIF_F_TSO6;
 	netdev->vlan_features |= NETIF_F_IP_CSUM;
 	netdev->vlan_features |= NETIF_F_IP_CSUM;

+ 15 - 17
drivers/net/korina.c

@@ -135,6 +135,7 @@ struct korina_private {
 	struct napi_struct napi;
 	struct napi_struct napi;
 	struct timer_list media_check_timer;
 	struct timer_list media_check_timer;
 	struct mii_if_info mii_if;
 	struct mii_if_info mii_if;
+	struct work_struct restart_task;
 	struct net_device *dev;
 	struct net_device *dev;
 	int phy_addr;
 	int phy_addr;
 };
 };
@@ -375,7 +376,7 @@ static int korina_rx(struct net_device *dev, int limit)
 		if (devcs & ETH_RX_LE)
 		if (devcs & ETH_RX_LE)
 			dev->stats.rx_length_errors++;
 			dev->stats.rx_length_errors++;
 		if (devcs & ETH_RX_OVR)
 		if (devcs & ETH_RX_OVR)
-			dev->stats.rx_over_errors++;
+			dev->stats.rx_fifo_errors++;
 		if (devcs & ETH_RX_CV)
 		if (devcs & ETH_RX_CV)
 			dev->stats.rx_frame_errors++;
 			dev->stats.rx_frame_errors++;
 		if (devcs & ETH_RX_CES)
 		if (devcs & ETH_RX_CES)
@@ -764,10 +765,9 @@ static int korina_alloc_ring(struct net_device *dev)
 
 
 	/* Initialize the receive descriptors */
 	/* Initialize the receive descriptors */
 	for (i = 0; i < KORINA_NUM_RDS; i++) {
 	for (i = 0; i < KORINA_NUM_RDS; i++) {
-		skb = dev_alloc_skb(KORINA_RBSIZE + 2);
+		skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 		if (!skb)
 		if (!skb)
 			return -ENOMEM;
 			return -ENOMEM;
-		skb_reserve(skb, 2);
 		lp->rx_skb[i] = skb;
 		lp->rx_skb[i] = skb;
 		lp->rd_ring[i].control = DMA_DESC_IOD |
 		lp->rd_ring[i].control = DMA_DESC_IOD |
 				DMA_COUNT(KORINA_RBSIZE);
 				DMA_COUNT(KORINA_RBSIZE);
@@ -890,12 +890,12 @@ static int korina_init(struct net_device *dev)
 
 
 /*
 /*
  * Restart the RC32434 ethernet controller.
  * Restart the RC32434 ethernet controller.
- * FIXME: check the return status where we call it
  */
  */
-static int korina_restart(struct net_device *dev)
+static void korina_restart_task(struct work_struct *work)
 {
 {
-	struct korina_private *lp = netdev_priv(dev);
-	int ret;
+	struct korina_private *lp = container_of(work,
+			struct korina_private, restart_task);
+	struct net_device *dev = lp->dev;
 
 
 	/*
 	/*
 	 * Disable interrupts
 	 * Disable interrupts
@@ -916,10 +916,9 @@ static int korina_restart(struct net_device *dev)
 
 
 	napi_disable(&lp->napi);
 	napi_disable(&lp->napi);
 
 
-	ret = korina_init(dev);
-	if (ret < 0) {
+	if (korina_init(dev) < 0) {
 		printk(KERN_ERR "%s: cannot restart device\n", dev->name);
 		printk(KERN_ERR "%s: cannot restart device\n", dev->name);
-		return ret;
+		return;
 	}
 	}
 	korina_multicast_list(dev);
 	korina_multicast_list(dev);
 
 
@@ -927,8 +926,6 @@ static int korina_restart(struct net_device *dev)
 	enable_irq(lp->ovr_irq);
 	enable_irq(lp->ovr_irq);
 	enable_irq(lp->tx_irq);
 	enable_irq(lp->tx_irq);
 	enable_irq(lp->rx_irq);
 	enable_irq(lp->rx_irq);
-
-	return ret;
 }
 }
 
 
 static void korina_clear_and_restart(struct net_device *dev, u32 value)
 static void korina_clear_and_restart(struct net_device *dev, u32 value)
@@ -937,7 +934,7 @@ static void korina_clear_and_restart(struct net_device *dev, u32 value)
 
 
 	netif_stop_queue(dev);
 	netif_stop_queue(dev);
 	writel(value, &lp->eth_regs->ethintfc);
 	writel(value, &lp->eth_regs->ethintfc);
-	korina_restart(dev);
+	schedule_work(&lp->restart_task);
 }
 }
 
 
 /* Ethernet Tx Underflow interrupt */
 /* Ethernet Tx Underflow interrupt */
@@ -962,11 +959,8 @@ static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
 static void korina_tx_timeout(struct net_device *dev)
 static void korina_tx_timeout(struct net_device *dev)
 {
 {
 	struct korina_private *lp = netdev_priv(dev);
 	struct korina_private *lp = netdev_priv(dev);
-	unsigned long flags;
 
 
-	spin_lock_irqsave(&lp->lock, flags);
-	korina_restart(dev);
-	spin_unlock_irqrestore(&lp->lock, flags);
+	schedule_work(&lp->restart_task);
 }
 }
 
 
 /* Ethernet Rx Overflow interrupt */
 /* Ethernet Rx Overflow interrupt */
@@ -1086,6 +1080,8 @@ static int korina_close(struct net_device *dev)
 
 
 	napi_disable(&lp->napi);
 	napi_disable(&lp->napi);
 
 
+	cancel_work_sync(&lp->restart_task);
+
 	free_irq(lp->rx_irq, dev);
 	free_irq(lp->rx_irq, dev);
 	free_irq(lp->tx_irq, dev);
 	free_irq(lp->tx_irq, dev);
 	free_irq(lp->ovr_irq, dev);
 	free_irq(lp->ovr_irq, dev);
@@ -1198,6 +1194,8 @@ static int korina_probe(struct platform_device *pdev)
 	}
 	}
 	setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
 	setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
 
 
+	INIT_WORK(&lp->restart_task, korina_restart_task);
+
 	printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
 	printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
 			dev->name);
 			dev->name);
 out:
 out:

+ 2 - 1
drivers/net/ksz884x.c

@@ -4854,7 +4854,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
  *
  *
  * Return 0 if successful; otherwise an error code indicating failure.
  * Return 0 if successful; otherwise an error code indicating failure.
  */
  */
-static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
 {
 {
 	struct dev_priv *priv = netdev_priv(dev);
 	struct dev_priv *priv = netdev_priv(dev);
 	struct dev_info *hw_priv = priv->adapter;
 	struct dev_info *hw_priv = priv->adapter;
@@ -6863,6 +6863,7 @@ static const struct net_device_ops netdev_ops = {
 	.ndo_tx_timeout		= netdev_tx_timeout,
 	.ndo_tx_timeout		= netdev_tx_timeout,
 	.ndo_change_mtu		= netdev_change_mtu,
 	.ndo_change_mtu		= netdev_change_mtu,
 	.ndo_set_mac_address	= netdev_set_mac_address,
 	.ndo_set_mac_address	= netdev_set_mac_address,
+	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_do_ioctl		= netdev_ioctl,
 	.ndo_do_ioctl		= netdev_ioctl,
 	.ndo_set_rx_mode	= netdev_set_rx_mode,
 	.ndo_set_rx_mode	= netdev_set_rx_mode,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 #ifdef CONFIG_NET_POLL_CONTROLLER

+ 51 - 10
drivers/net/loopback.c

@@ -60,11 +60,51 @@
 #include <net/net_namespace.h>
 #include <net/net_namespace.h>
 
 
 struct pcpu_lstats {
 struct pcpu_lstats {
-	unsigned long packets;
-	unsigned long bytes;
+	u64 packets;
+	u64 bytes;
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	seqcount_t seq;
+#endif
 	unsigned long drops;
 	unsigned long drops;
 };
 };
 
 
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+static void inline lstats_update_begin(struct pcpu_lstats *lstats)
+{
+	write_seqcount_begin(&lstats->seq);
+}
+static void inline lstats_update_end(struct pcpu_lstats *lstats)
+{
+	write_seqcount_end(&lstats->seq);
+}
+static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
+{
+	u64 tpackets, tbytes;
+	unsigned int seq;
+
+	do {
+		seq = read_seqcount_begin(&lstats->seq);
+		tpackets = lstats->packets;
+		tbytes = lstats->bytes;
+	} while (read_seqcount_retry(&lstats->seq, seq));
+
+	*packets += tpackets;
+	*bytes += tbytes;
+}
+#else
+static void inline lstats_update_begin(struct pcpu_lstats *lstats)
+{
+}
+static void inline lstats_update_end(struct pcpu_lstats *lstats)
+{
+}
+static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
+{
+	*packets += lstats->packets;
+	*bytes += lstats->bytes;
+}
+#endif
+
 /*
 /*
  * The higher levels take care of making this non-reentrant (it's
  * The higher levels take care of making this non-reentrant (it's
  * called with bh's disabled).
  * called with bh's disabled).
@@ -86,21 +126,23 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
 
 
 	len = skb->len;
 	len = skb->len;
 	if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
 	if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
+		lstats_update_begin(lb_stats);
 		lb_stats->bytes += len;
 		lb_stats->bytes += len;
 		lb_stats->packets++;
 		lb_stats->packets++;
+		lstats_update_end(lb_stats);
 	} else
 	} else
 		lb_stats->drops++;
 		lb_stats->drops++;
 
 
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
 }
 }
 
 
-static struct net_device_stats *loopback_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev)
 {
 {
 	const struct pcpu_lstats __percpu *pcpu_lstats;
 	const struct pcpu_lstats __percpu *pcpu_lstats;
-	struct net_device_stats *stats = &dev->stats;
-	unsigned long bytes = 0;
-	unsigned long packets = 0;
-	unsigned long drops = 0;
+	struct rtnl_link_stats64 *stats = &dev->stats64;
+	u64 bytes = 0;
+	u64 packets = 0;
+	u64 drops = 0;
 	int i;
 	int i;
 
 
 	pcpu_lstats = (void __percpu __force *)dev->ml_priv;
 	pcpu_lstats = (void __percpu __force *)dev->ml_priv;
@@ -108,8 +150,7 @@ static struct net_device_stats *loopback_get_stats(struct net_device *dev)
 		const struct pcpu_lstats *lb_stats;
 		const struct pcpu_lstats *lb_stats;
 
 
 		lb_stats = per_cpu_ptr(pcpu_lstats, i);
 		lb_stats = per_cpu_ptr(pcpu_lstats, i);
-		bytes   += lb_stats->bytes;
-		packets += lb_stats->packets;
+		lstats_fetch_and_add(&packets, &bytes, lb_stats);
 		drops   += lb_stats->drops;
 		drops   += lb_stats->drops;
 	}
 	}
 	stats->rx_packets = packets;
 	stats->rx_packets = packets;
@@ -158,7 +199,7 @@ static void loopback_dev_free(struct net_device *dev)
 static const struct net_device_ops loopback_ops = {
 static const struct net_device_ops loopback_ops = {
 	.ndo_init      = loopback_dev_init,
 	.ndo_init      = loopback_dev_init,
 	.ndo_start_xmit= loopback_xmit,
 	.ndo_start_xmit= loopback_xmit,
-	.ndo_get_stats = loopback_get_stats,
+	.ndo_get_stats64 = loopback_get_stats64,
 };
 };
 
 
 /*
 /*

+ 29 - 28
drivers/net/mac8390.c

@@ -157,6 +157,8 @@ static void dayna_block_output(struct net_device *dev, int count,
 #define memcpy_fromio(a, b, c)	memcpy((a), (void *)(b), (c))
 #define memcpy_fromio(a, b, c)	memcpy((a), (void *)(b), (c))
 #define memcpy_toio(a, b, c)	memcpy((void *)(a), (b), (c))
 #define memcpy_toio(a, b, c)	memcpy((void *)(a), (b), (c))
 
 
+#define memcmp_withio(a, b, c)	memcmp((a), (void *)(b), (c))
+
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
 static void slow_sane_get_8390_hdr(struct net_device *dev,
 static void slow_sane_get_8390_hdr(struct net_device *dev,
 				   struct e8390_pkt_hdr *hdr, int ring_page);
 				   struct e8390_pkt_hdr *hdr, int ring_page);
@@ -164,8 +166,8 @@ static void slow_sane_block_input(struct net_device *dev, int count,
 				  struct sk_buff *skb, int ring_offset);
 				  struct sk_buff *skb, int ring_offset);
 static void slow_sane_block_output(struct net_device *dev, int count,
 static void slow_sane_block_output(struct net_device *dev, int count,
 				   const unsigned char *buf, int start_page);
 				   const unsigned char *buf, int start_page);
-static void word_memcpy_tocard(void *tp, const void *fp, int count);
-static void word_memcpy_fromcard(void *tp, const void *fp, int count);
+static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
+static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
 
 
 static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
 static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
 {
 {
@@ -245,9 +247,9 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
 	unsigned long outdata = 0xA5A0B5B0;
 	unsigned long outdata = 0xA5A0B5B0;
 	unsigned long indata =  0x00000000;
 	unsigned long indata =  0x00000000;
 	/* Try writing 32 bits */
 	/* Try writing 32 bits */
-	memcpy(membase, &outdata, 4);
+	memcpy_toio(membase, &outdata, 4);
 	/* Now compare them */
 	/* Now compare them */
-	if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
+	if (memcmp_withio(&outdata, membase, 4) == 0)
 		return ACCESS_32;
 		return ACCESS_32;
 	/* Write 16 bit output */
 	/* Write 16 bit output */
 	word_memcpy_tocard(membase, &outdata, 4);
 	word_memcpy_tocard(membase, &outdata, 4);
@@ -554,7 +556,7 @@ static int __init mac8390_initdev(struct net_device *dev,
 	case MAC8390_APPLE:
 	case MAC8390_APPLE:
 		switch (mac8390_testio(dev->mem_start)) {
 		switch (mac8390_testio(dev->mem_start)) {
 		case ACCESS_UNKNOWN:
 		case ACCESS_UNKNOWN:
-			pr_info("Don't know how to access card memory!\n");
+			pr_err("Don't know how to access card memory!\n");
 			return -ENODEV;
 			return -ENODEV;
 			break;
 			break;
 
 
@@ -641,12 +643,13 @@ static int __init mac8390_initdev(struct net_device *dev,
 
 
 static int mac8390_open(struct net_device *dev)
 static int mac8390_open(struct net_device *dev)
 {
 {
+	int err;
+
 	__ei_open(dev);
 	__ei_open(dev);
-	if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
-		pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
-		return -EAGAIN;
-	}
-	return 0;
+	err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev);
+	if (err)
+		pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq);
+	return err;
 }
 }
 
 
 static int mac8390_close(struct net_device *dev)
 static int mac8390_close(struct net_device *dev)
@@ -731,7 +734,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
 			      struct e8390_pkt_hdr *hdr, int ring_page)
 			      struct e8390_pkt_hdr *hdr, int ring_page)
 {
 {
 	unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
 	unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
-	memcpy_fromio((void *)hdr, (char *)dev->mem_start + hdr_start, 4);
+	memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
 	/* Fix endianness */
 	/* Fix endianness */
 	hdr->count = swab16(hdr->count);
 	hdr->count = swab16(hdr->count);
 }
 }
@@ -745,14 +748,13 @@ static void sane_block_input(struct net_device *dev, int count,
 	if (xfer_start + count > ei_status.rmem_end) {
 	if (xfer_start + count > ei_status.rmem_end) {
 		/* We must wrap the input move. */
 		/* We must wrap the input move. */
 		int semi_count = ei_status.rmem_end - xfer_start;
 		int semi_count = ei_status.rmem_end - xfer_start;
-		memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+		memcpy_fromio(skb->data, dev->mem_start + xfer_base,
 			      semi_count);
 			      semi_count);
 		count -= semi_count;
 		count -= semi_count;
-		memcpy_toio(skb->data + semi_count,
-			    (char *)ei_status.rmem_start, count);
-	} else {
-		memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+		memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
 			      count);
 			      count);
+	} else {
+		memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
 	}
 	}
 }
 }
 
 
@@ -761,7 +763,7 @@ static void sane_block_output(struct net_device *dev, int count,
 {
 {
 	long shmem = (start_page - WD_START_PG)<<8;
 	long shmem = (start_page - WD_START_PG)<<8;
 
 
-	memcpy_toio((char *)dev->mem_start + shmem, buf, count);
+	memcpy_toio(dev->mem_start + shmem, buf, count);
 }
 }
 
 
 /* dayna block input/output */
 /* dayna block input/output */
@@ -812,7 +814,7 @@ static void slow_sane_get_8390_hdr(struct net_device *dev,
 				   int ring_page)
 				   int ring_page)
 {
 {
 	unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
 	unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
-	word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
+	word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4);
 	/* Register endianism - fix here rather than 8390.c */
 	/* Register endianism - fix here rather than 8390.c */
 	hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
 	hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
 }
 }
@@ -826,15 +828,14 @@ static void slow_sane_block_input(struct net_device *dev, int count,
 	if (xfer_start + count > ei_status.rmem_end) {
 	if (xfer_start + count > ei_status.rmem_end) {
 		/* We must wrap the input move. */
 		/* We must wrap the input move. */
 		int semi_count = ei_status.rmem_end - xfer_start;
 		int semi_count = ei_status.rmem_end - xfer_start;
-		word_memcpy_fromcard(skb->data,
-				     (char *)dev->mem_start + xfer_base,
+		word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
 				     semi_count);
 				     semi_count);
 		count -= semi_count;
 		count -= semi_count;
 		word_memcpy_fromcard(skb->data + semi_count,
 		word_memcpy_fromcard(skb->data + semi_count,
-				     (char *)ei_status.rmem_start, count);
+				     ei_status.rmem_start, count);
 	} else {
 	} else {
-		word_memcpy_fromcard(skb->data,
-				     (char *)dev->mem_start + xfer_base, count);
+		word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
+				     count);
 	}
 	}
 }
 }
 
 
@@ -843,12 +844,12 @@ static void slow_sane_block_output(struct net_device *dev, int count,
 {
 {
 	long shmem = (start_page - WD_START_PG)<<8;
 	long shmem = (start_page - WD_START_PG)<<8;
 
 
-	word_memcpy_tocard((char *)dev->mem_start + shmem, buf, count);
+	word_memcpy_tocard(dev->mem_start + shmem, buf, count);
 }
 }
 
 
-static void word_memcpy_tocard(void *tp, const void *fp, int count)
+static void word_memcpy_tocard(unsigned long tp, const void *fp, int count)
 {
 {
-	volatile unsigned short *to = tp;
+	volatile unsigned short *to = (void *)tp;
 	const unsigned short *from = fp;
 	const unsigned short *from = fp;
 
 
 	count++;
 	count++;
@@ -858,10 +859,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
 		*to++ = *from++;
 		*to++ = *from++;
 }
 }
 
 
-static void word_memcpy_fromcard(void *tp, const void *fp, int count)
+static void word_memcpy_fromcard(void *tp, unsigned long fp, int count)
 {
 {
 	unsigned short *to = tp;
 	unsigned short *to = tp;
-	const volatile unsigned short *from = fp;
+	const volatile unsigned short *from = (const void *)fp;
 
 
 	count++;
 	count++;
 	count /= 2;
 	count /= 2;

+ 23 - 8
drivers/net/macvlan.c

@@ -37,6 +37,7 @@ struct macvlan_port {
 	struct net_device	*dev;
 	struct net_device	*dev;
 	struct hlist_head	vlan_hash[MACVLAN_HASH_SIZE];
 	struct hlist_head	vlan_hash[MACVLAN_HASH_SIZE];
 	struct list_head	vlans;
 	struct list_head	vlans;
+	struct rcu_head		rcu;
 };
 };
 
 
 static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
 static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
@@ -145,15 +146,16 @@ static void macvlan_broadcast(struct sk_buff *skb,
 }
 }
 
 
 /* called under rcu_read_lock() from netif_receive_skb */
 /* called under rcu_read_lock() from netif_receive_skb */
-static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port,
-					    struct sk_buff *skb)
+static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
 {
 {
+	struct macvlan_port *port;
 	const struct ethhdr *eth = eth_hdr(skb);
 	const struct ethhdr *eth = eth_hdr(skb);
 	const struct macvlan_dev *vlan;
 	const struct macvlan_dev *vlan;
 	const struct macvlan_dev *src;
 	const struct macvlan_dev *src;
 	struct net_device *dev;
 	struct net_device *dev;
 	unsigned int len;
 	unsigned int len;
 
 
+	port = rcu_dereference(skb->dev->macvlan_port);
 	if (is_multicast_ether_addr(eth->h_dest)) {
 	if (is_multicast_ether_addr(eth->h_dest)) {
 		src = macvlan_hash_lookup(port, eth->h_source);
 		src = macvlan_hash_lookup(port, eth->h_source);
 		if (!src)
 		if (!src)
@@ -515,6 +517,7 @@ static int macvlan_port_create(struct net_device *dev)
 {
 {
 	struct macvlan_port *port;
 	struct macvlan_port *port;
 	unsigned int i;
 	unsigned int i;
+	int err;
 
 
 	if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
 	if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
 		return -EINVAL;
 		return -EINVAL;
@@ -528,16 +531,31 @@ static int macvlan_port_create(struct net_device *dev)
 	for (i = 0; i < MACVLAN_HASH_SIZE; i++)
 	for (i = 0; i < MACVLAN_HASH_SIZE; i++)
 		INIT_HLIST_HEAD(&port->vlan_hash[i]);
 		INIT_HLIST_HEAD(&port->vlan_hash[i]);
 	rcu_assign_pointer(dev->macvlan_port, port);
 	rcu_assign_pointer(dev->macvlan_port, port);
-	return 0;
+
+	err = netdev_rx_handler_register(dev, macvlan_handle_frame);
+	if (err) {
+		rcu_assign_pointer(dev->macvlan_port, NULL);
+		kfree(port);
+	}
+
+	return err;
+}
+
+static void macvlan_port_rcu_free(struct rcu_head *head)
+{
+	struct macvlan_port *port;
+
+	port = container_of(head, struct macvlan_port, rcu);
+	kfree(port);
 }
 }
 
 
 static void macvlan_port_destroy(struct net_device *dev)
 static void macvlan_port_destroy(struct net_device *dev)
 {
 {
 	struct macvlan_port *port = dev->macvlan_port;
 	struct macvlan_port *port = dev->macvlan_port;
 
 
+	netdev_rx_handler_unregister(dev);
 	rcu_assign_pointer(dev->macvlan_port, NULL);
 	rcu_assign_pointer(dev->macvlan_port, NULL);
-	synchronize_rcu();
-	kfree(port);
+	call_rcu(&port->rcu, macvlan_port_rcu_free);
 }
 }
 
 
 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -767,14 +785,12 @@ static int __init macvlan_init_module(void)
 	int err;
 	int err;
 
 
 	register_netdevice_notifier(&macvlan_notifier_block);
 	register_netdevice_notifier(&macvlan_notifier_block);
-	macvlan_handle_frame_hook = macvlan_handle_frame;
 
 
 	err = macvlan_link_register(&macvlan_link_ops);
 	err = macvlan_link_register(&macvlan_link_ops);
 	if (err < 0)
 	if (err < 0)
 		goto err1;
 		goto err1;
 	return 0;
 	return 0;
 err1:
 err1:
-	macvlan_handle_frame_hook = NULL;
 	unregister_netdevice_notifier(&macvlan_notifier_block);
 	unregister_netdevice_notifier(&macvlan_notifier_block);
 	return err;
 	return err;
 }
 }
@@ -782,7 +798,6 @@ err1:
 static void __exit macvlan_cleanup_module(void)
 static void __exit macvlan_cleanup_module(void)
 {
 {
 	rtnl_link_unregister(&macvlan_link_ops);
 	rtnl_link_unregister(&macvlan_link_ops);
-	macvlan_handle_frame_hook = NULL;
 	unregister_netdevice_notifier(&macvlan_notifier_block);
 	unregister_netdevice_notifier(&macvlan_notifier_block);
 }
 }
 
 

+ 7 - 7
drivers/net/mlx4/eq.c

@@ -110,7 +110,7 @@ struct mlx4_eqe {
 		u32		raw[6];
 		u32		raw[6];
 		struct {
 		struct {
 			__be32	cqn;
 			__be32	cqn;
-		} __attribute__((packed)) comp;
+		} __packed comp;
 		struct {
 		struct {
 			u16	reserved1;
 			u16	reserved1;
 			__be16	token;
 			__be16	token;
@@ -118,27 +118,27 @@ struct mlx4_eqe {
 			u8	reserved3[3];
 			u8	reserved3[3];
 			u8	status;
 			u8	status;
 			__be64	out_param;
 			__be64	out_param;
-		} __attribute__((packed)) cmd;
+		} __packed cmd;
 		struct {
 		struct {
 			__be32	qpn;
 			__be32	qpn;
-		} __attribute__((packed)) qp;
+		} __packed qp;
 		struct {
 		struct {
 			__be32	srqn;
 			__be32	srqn;
-		} __attribute__((packed)) srq;
+		} __packed srq;
 		struct {
 		struct {
 			__be32	cqn;
 			__be32	cqn;
 			u32	reserved1;
 			u32	reserved1;
 			u8	reserved2[3];
 			u8	reserved2[3];
 			u8	syndrome;
 			u8	syndrome;
-		} __attribute__((packed)) cq_err;
+		} __packed cq_err;
 		struct {
 		struct {
 			u32	reserved1[2];
 			u32	reserved1[2];
 			__be32	port;
 			__be32	port;
-		} __attribute__((packed)) port_change;
+		} __packed port_change;
 	}			event;
 	}			event;
 	u8			reserved3[3];
 	u8			reserved3[3];
 	u8			owner;
 	u8			owner;
-} __attribute__((packed));
+} __packed;
 
 
 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
 {
 {

+ 1 - 1
drivers/net/mlx4/mr.c

@@ -58,7 +58,7 @@ struct mlx4_mpt_entry {
 	__be32 mtt_sz;
 	__be32 mtt_sz;
 	__be32 entity_size;
 	__be32 entity_size;
 	__be32 first_byte_offset;
 	__be32 first_byte_offset;
-} __attribute__((packed));
+} __packed;
 
 
 #define MLX4_MPT_FLAG_SW_OWNS	    (0xfUL << 28)
 #define MLX4_MPT_FLAG_SW_OWNS	    (0xfUL << 28)
 #define MLX4_MPT_FLAG_FREE	    (0x3UL << 28)
 #define MLX4_MPT_FLAG_FREE	    (0x3UL << 28)

+ 50 - 1
drivers/net/phy/lxt.c

@@ -53,6 +53,9 @@
 
 
 #define MII_LXT971_ISR		19  /* Interrupt Status Register */
 #define MII_LXT971_ISR		19  /* Interrupt Status Register */
 
 
+/* register definitions for the 973 */
+#define MII_LXT973_PCR 16 /* Port Configuration Register */
+#define PCR_FIBER_SELECT 1
 
 
 MODULE_DESCRIPTION("Intel LXT PHY driver");
 MODULE_DESCRIPTION("Intel LXT PHY driver");
 MODULE_AUTHOR("Andy Fleming");
 MODULE_AUTHOR("Andy Fleming");
@@ -119,6 +122,33 @@ static int lxt971_config_intr(struct phy_device *phydev)
 	return err;
 	return err;
 }
 }
 
 
+static int lxt973_probe(struct phy_device *phydev)
+{
+	int val = phy_read(phydev, MII_LXT973_PCR);
+
+	if (val & PCR_FIBER_SELECT) {
+		/*
+		 * If fiber is selected, then the only correct setting
+		 * is 100Mbps, full duplex, and auto negotiation off.
+		 */
+		val = phy_read(phydev, MII_BMCR);
+		val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
+		val &= ~BMCR_ANENABLE;
+		phy_write(phydev, MII_BMCR, val);
+		/* Remember that the port is in fiber mode. */
+		phydev->priv = lxt973_probe;
+	} else {
+		phydev->priv = NULL;
+	}
+	return 0;
+}
+
+static int lxt973_config_aneg(struct phy_device *phydev)
+{
+	/* Do nothing if port is in fiber mode. */
+	return phydev->priv ? 0 : genphy_config_aneg(phydev);
+}
+
 static struct phy_driver lxt970_driver = {
 static struct phy_driver lxt970_driver = {
 	.phy_id		= 0x78100000,
 	.phy_id		= 0x78100000,
 	.name		= "LXT970",
 	.name		= "LXT970",
@@ -146,6 +176,18 @@ static struct phy_driver lxt971_driver = {
 	.driver 	= { .owner = THIS_MODULE,},
 	.driver 	= { .owner = THIS_MODULE,},
 };
 };
 
 
+static struct phy_driver lxt973_driver = {
+	.phy_id		= 0x00137a10,
+	.name		= "LXT973",
+	.phy_id_mask	= 0xfffffff0,
+	.features	= PHY_BASIC_FEATURES,
+	.flags		= 0,
+	.probe		= lxt973_probe,
+	.config_aneg	= lxt973_config_aneg,
+	.read_status	= genphy_read_status,
+	.driver 	= { .owner = THIS_MODULE,},
+};
+
 static int __init lxt_init(void)
 static int __init lxt_init(void)
 {
 {
 	int ret;
 	int ret;
@@ -157,9 +199,15 @@ static int __init lxt_init(void)
 	ret = phy_driver_register(&lxt971_driver);
 	ret = phy_driver_register(&lxt971_driver);
 	if (ret)
 	if (ret)
 		goto err2;
 		goto err2;
+
+	ret = phy_driver_register(&lxt973_driver);
+	if (ret)
+		goto err3;
 	return 0;
 	return 0;
 
 
- err2:	
+ err3:
+	phy_driver_unregister(&lxt971_driver);
+ err2:
 	phy_driver_unregister(&lxt970_driver);
 	phy_driver_unregister(&lxt970_driver);
  err1:
  err1:
 	return ret;
 	return ret;
@@ -169,6 +217,7 @@ static void __exit lxt_exit(void)
 {
 {
 	phy_driver_unregister(&lxt970_driver);
 	phy_driver_unregister(&lxt970_driver);
 	phy_driver_unregister(&lxt971_driver);
 	phy_driver_unregister(&lxt971_driver);
+	phy_driver_unregister(&lxt973_driver);
 }
 }
 
 
 module_init(lxt_init);
 module_init(lxt_init);

+ 4 - 4
drivers/net/ppp_generic.c

@@ -1416,7 +1416,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
 		flen = len;
 		flen = len;
 		if (nfree > 0) {
 		if (nfree > 0) {
 			if (pch->speed == 0) {
 			if (pch->speed == 0) {
-				flen = totlen/nfree;
+				flen = len/nfree;
 				if (nbigger > 0) {
 				if (nbigger > 0) {
 					flen++;
 					flen++;
 					nbigger--;
 					nbigger--;
@@ -1927,9 +1927,9 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
 	/* If the queue is getting long, don't wait any longer for packets
 	/* If the queue is getting long, don't wait any longer for packets
 	   before the start of the queue. */
 	   before the start of the queue. */
 	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
 	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
-		struct sk_buff *skb = skb_peek(&ppp->mrq);
-		if (seq_before(ppp->minseq, skb->sequence))
-			ppp->minseq = skb->sequence;
+		struct sk_buff *mskb = skb_peek(&ppp->mrq);
+		if (seq_before(ppp->minseq, mskb->sequence))
+			ppp->minseq = mskb->sequence;
 	}
 	}
 
 
 	/* Pull completed packets off the queue and receive them. */
 	/* Pull completed packets off the queue and receive them. */

+ 1 - 2
drivers/net/pppoe.c

@@ -89,7 +89,6 @@
 #define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
 #define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
 #define PPPOE_HASH_MASK	(PPPOE_HASH_SIZE - 1)
 #define PPPOE_HASH_MASK	(PPPOE_HASH_SIZE - 1)
 
 
-static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
 
 
 static const struct proto_ops pppoe_ops;
 static const struct proto_ops pppoe_ops;
@@ -949,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
 
 
 abort:
 abort:
 	kfree_skb(skb);
 	kfree_skb(skb);
-	return 1;
+	return 0;
 }
 }
 
 
 /************************************************************************
 /************************************************************************

+ 5 - 5
drivers/net/ps3_gelic_wireless.h

@@ -74,7 +74,7 @@ struct gelic_eurus_common_cfg {
 	u16 bss_type;    /* infra or adhoc */
 	u16 bss_type;    /* infra or adhoc */
 	u16 auth_method; /* shared key or open */
 	u16 auth_method; /* shared key or open */
 	u16 op_mode; /* B/G */
 	u16 op_mode; /* B/G */
-} __attribute__((packed));
+} __packed;
 
 
 
 
 /* for GELIC_EURUS_CMD_WEP_CFG */
 /* for GELIC_EURUS_CMD_WEP_CFG */
@@ -88,7 +88,7 @@ struct gelic_eurus_wep_cfg {
 	/* all fields are big endian */
 	/* all fields are big endian */
 	u16 security;
 	u16 security;
 	u8 key[4][16];
 	u8 key[4][16];
-} __attribute__((packed));
+} __packed;
 
 
 /* for GELIC_EURUS_CMD_WPA_CFG */
 /* for GELIC_EURUS_CMD_WPA_CFG */
 enum gelic_eurus_wpa_security {
 enum gelic_eurus_wpa_security {
@@ -120,7 +120,7 @@ struct gelic_eurus_wpa_cfg {
 	u16 security;
 	u16 security;
 	u16 psk_type; /* psk key encoding type */
 	u16 psk_type; /* psk key encoding type */
 	u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
 	u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
-} __attribute__((packed));
+} __packed;
 
 
 /* for GELIC_EURUS_CMD_{START,GET}_SCAN */
 /* for GELIC_EURUS_CMD_{START,GET}_SCAN */
 enum gelic_eurus_scan_capability {
 enum gelic_eurus_scan_capability {
@@ -171,7 +171,7 @@ struct gelic_eurus_scan_info {
 	__be32 reserved3;
 	__be32 reserved3;
 	__be32 reserved4;
 	__be32 reserved4;
 	u8 elements[0]; /* ie */
 	u8 elements[0]; /* ie */
-} __attribute__ ((packed));
+} __packed;
 
 
 /* the hypervisor returns bbs up to 16 */
 /* the hypervisor returns bbs up to 16 */
 #define GELIC_EURUS_MAX_SCAN  (16)
 #define GELIC_EURUS_MAX_SCAN  (16)
@@ -193,7 +193,7 @@ struct gelic_wl_scan_info {
 struct gelic_eurus_rssi_info {
 struct gelic_eurus_rssi_info {
 	/* big endian */
 	/* big endian */
 	__be16 rssi;
 	__be16 rssi;
-} __attribute__ ((packed));
+} __packed;
 
 
 
 
 /* for 'stat' member of gelic_wl_info */
 /* for 'stat' member of gelic_wl_info */

+ 110 - 10
drivers/net/qlcnic/qlcnic.h

@@ -51,8 +51,8 @@
 
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 0
 #define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 2
-#define QLCNIC_LINUX_VERSIONID  "5.0.2"
+#define _QLCNIC_LINUX_SUBVERSION 3
+#define QLCNIC_LINUX_VERSIONID  "5.0.3"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRV_IDC_VER  0x01
 
 
 #define QLCNIC_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
 #define QLCNIC_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
@@ -197,8 +197,7 @@ struct cmd_desc_type0 {
 
 
 	__le64 addr_buffer4;
 	__le64 addr_buffer4;
 
 
-	__le32 reserved2;
-	__le16 reserved;
+	u8 eth_addr[ETH_ALEN];
 	__le16 vlan_TCI;
 	__le16 vlan_TCI;
 
 
 } __attribute__ ((aligned(64)));
 } __attribute__ ((aligned(64)));
@@ -315,6 +314,8 @@ struct uni_data_desc{
 #define QLCNIC_BRDTYPE_P3_10G_XFP	0x0032
 #define QLCNIC_BRDTYPE_P3_10G_XFP	0x0032
 #define QLCNIC_BRDTYPE_P3_10G_TP	0x0080
 #define QLCNIC_BRDTYPE_P3_10G_TP	0x0080
 
 
+#define QLCNIC_MSIX_TABLE_OFFSET	0x44
+
 /* Flash memory map */
 /* Flash memory map */
 #define QLCNIC_BRDCFG_START	0x4000		/* board config */
 #define QLCNIC_BRDCFG_START	0x4000		/* board config */
 #define QLCNIC_BOOTLD_START	0x10000		/* bootld */
 #define QLCNIC_BOOTLD_START	0x10000		/* bootld */
@@ -542,7 +543,17 @@ struct qlcnic_recv_context {
 #define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS	0x0000001c
 #define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS	0x0000001c
 #define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES	0x0000001d
 #define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES	0x0000001d
 #define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD	0x0000001e
 #define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD	0x0000001e
-#define QLCNIC_CDRP_CMD_MAX			0x0000001f
+#define QLCNIC_CDRP_CMD_MAC_ADDRESS		0x0000001f
+
+#define QLCNIC_CDRP_CMD_GET_PCI_INFO		0x00000020
+#define QLCNIC_CDRP_CMD_GET_NIC_INFO		0x00000021
+#define QLCNIC_CDRP_CMD_SET_NIC_INFO		0x00000022
+#define QLCNIC_CDRP_CMD_RESET_NPAR		0x00000023
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY	0x00000024
+#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH		0x00000025
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS	0x00000026
+#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING	0x00000027
+#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH	0x00000028
 
 
 #define QLCNIC_RCODE_SUCCESS		0
 #define QLCNIC_RCODE_SUCCESS		0
 #define QLCNIC_RCODE_TIMEOUT		17
 #define QLCNIC_RCODE_TIMEOUT		17
@@ -560,7 +571,6 @@ struct qlcnic_recv_context {
 /*
 /*
  * Context state
  * Context state
  */
  */
-#define QLCHAL_VERSION	1
 
 
 #define QLCNIC_HOST_CTX_STATE_ACTIVE	2
 #define QLCNIC_HOST_CTX_STATE_ACTIVE	2
 
 
@@ -881,12 +891,14 @@ struct qlcnic_mac_req {
 #define QLCNIC_LRO_ENABLED		0x08
 #define QLCNIC_LRO_ENABLED		0x08
 #define QLCNIC_BRIDGE_ENABLED       	0X10
 #define QLCNIC_BRIDGE_ENABLED       	0X10
 #define QLCNIC_DIAG_ENABLED		0x20
 #define QLCNIC_DIAG_ENABLED		0x20
+#define QLCNIC_NPAR_ENABLED		0x40
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
 	((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
 	((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
 
 
 #define MSIX_ENTRIES_PER_ADAPTER	NUM_STS_DESC_RINGS
 #define MSIX_ENTRIES_PER_ADAPTER	NUM_STS_DESC_RINGS
 #define QLCNIC_MSIX_TBL_SPACE		8192
 #define QLCNIC_MSIX_TBL_SPACE		8192
 #define QLCNIC_PCI_REG_MSIX_TBL 	0x44
 #define QLCNIC_PCI_REG_MSIX_TBL 	0x44
+#define QLCNIC_MSIX_TBL_PGSIZE		4096
 
 
 #define QLCNIC_NETDEV_WEIGHT	128
 #define QLCNIC_NETDEV_WEIGHT	128
 #define QLCNIC_ADAPTER_UP_MAGIC 777
 #define QLCNIC_ADAPTER_UP_MAGIC 777
@@ -923,7 +935,6 @@ struct qlcnic_adapter {
 	u8 mc_enabled;
 	u8 mc_enabled;
 	u8 max_mc_count;
 	u8 max_mc_count;
 	u8 rss_supported;
 	u8 rss_supported;
-	u8 rsrvd1;
 	u8 fw_wait_cnt;
 	u8 fw_wait_cnt;
 	u8 fw_fail_cnt;
 	u8 fw_fail_cnt;
 	u8 tx_timeo_cnt;
 	u8 tx_timeo_cnt;
@@ -940,6 +951,15 @@ struct qlcnic_adapter {
 	u16 link_autoneg;
 	u16 link_autoneg;
 	u16 module_type;
 	u16 module_type;
 
 
+	u16 op_mode;
+	u16 switch_mode;
+	u16 max_tx_ques;
+	u16 max_rx_ques;
+	u16 min_tx_bw;
+	u16 max_tx_bw;
+	u16 max_mtu;
+
+	u32 fw_hal_version;
 	u32 capabilities;
 	u32 capabilities;
 	u32 flags;
 	u32 flags;
 	u32 irq;
 	u32 irq;
@@ -948,18 +968,22 @@ struct qlcnic_adapter {
 	u32 int_vec_bit;
 	u32 int_vec_bit;
 	u32 heartbit;
 	u32 heartbit;
 
 
+	u8 max_mac_filters;
 	u8 dev_state;
 	u8 dev_state;
 	u8 diag_test;
 	u8 diag_test;
 	u8 diag_cnt;
 	u8 diag_cnt;
 	u8 reset_ack_timeo;
 	u8 reset_ack_timeo;
 	u8 dev_init_timeo;
 	u8 dev_init_timeo;
-	u8 rsrd1;
 	u16 msg_enable;
 	u16 msg_enable;
 
 
 	u8 mac_addr[ETH_ALEN];
 	u8 mac_addr[ETH_ALEN];
 
 
 	u64 dev_rst_time;
 	u64 dev_rst_time;
 
 
+	struct qlcnic_pci_info *npars;
+	struct qlcnic_eswitch *eswitch;
+	struct qlcnic_nic_template *nic_ops;
+
 	struct qlcnic_adapter_stats stats;
 	struct qlcnic_adapter_stats stats;
 
 
 	struct qlcnic_recv_context recv_ctx;
 	struct qlcnic_recv_context recv_ctx;
@@ -984,6 +1008,53 @@ struct qlcnic_adapter {
 	const struct firmware *fw;
 	const struct firmware *fw;
 };
 };
 
 
+struct qlcnic_info {
+	__le16	pci_func;
+	__le16	op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
+	__le16	phys_port;
+	__le16	switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
+
+	__le32	capabilities;
+	u8	max_mac_filters;
+	u8	reserved1;
+	__le16	max_mtu;
+
+	__le16	max_tx_ques;
+	__le16	max_rx_ques;
+	__le16	min_tx_bw;
+	__le16	max_tx_bw;
+	u8	reserved2[104];
+};
+
+struct qlcnic_pci_info {
+	__le16	id; /* pci function id */
+	__le16	active; /* 1 = Enabled */
+	__le16	type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
+	__le16	default_port; /* default port number */
+
+	__le16	tx_min_bw; /* Multiple of 100mbpc */
+	__le16	tx_max_bw;
+	__le16	reserved1[2];
+
+	u8	mac[ETH_ALEN];
+	u8	reserved2[106];
+};
+
+struct qlcnic_eswitch {
+	u8	port;
+	u8	active_vports;
+	u8	active_vlans;
+	u8	active_ucast_filters;
+	u8	max_ucast_filters;
+	u8	max_active_vlans;
+
+	u32	flags;
+#define QLCNIC_SWITCH_ENABLE		BIT_1
+#define QLCNIC_SWITCH_VLAN_FILTERING	BIT_2
+#define QLCNIC_SWITCH_PROMISC_MODE	BIT_3
+#define QLCNIC_SWITCH_PORT_MIRRORING	BIT_4
+};
+
 int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
 int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
 int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
 int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
 
 
@@ -1070,13 +1141,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
 int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
 int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
 int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
 int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
-int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
 void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
 void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
 		struct qlcnic_host_tx_ring *tx_ring);
 		struct qlcnic_host_tx_ring *tx_ring);
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac);
 void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
 void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
 int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
 int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
+void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
 
 
 /* Functions from qlcnic_main.c */
 /* Functions from qlcnic_main.c */
 int qlcnic_reset_context(struct qlcnic_adapter *);
 int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1088,6 +1160,25 @@ int qlcnic_check_loopback_buff(unsigned char *data);
 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
 void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
 
 
+/* Management functions */
+int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_get_nic_info(struct qlcnic_adapter *, u8);
+int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_get_pci_info(struct qlcnic_adapter *);
+int qlcnic_reset_partition(struct qlcnic_adapter *, u8);
+
+/*  eSwitch management functions */
+int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
+				struct qlcnic_eswitch *);
+int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
+				struct qlcnic_eswitch *);
+int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
+int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8,
+			u8, u8, u16);
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
+extern int qlcnic_config_tso;
+
 /*
 /*
  * QLOGIC Board information
  * QLOGIC Board information
  */
  */
@@ -1131,6 +1222,15 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
 
 
 extern const struct ethtool_ops qlcnic_ethtool_ops;
 extern const struct ethtool_ops qlcnic_ethtool_ops;
 
 
+struct qlcnic_nic_template {
+	int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
+	int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+	int (*config_led) (struct qlcnic_adapter *, u32, u32);
+	int (*set_ilb_mode) (struct qlcnic_adapter *);
+	void (*clear_ilb_mode) (struct qlcnic_adapter *);
+	int (*start_firmware) (struct qlcnic_adapter *);
+};
+
 #define QLCDB(adapter, lvl, _fmt, _args...) do {	\
 #define QLCDB(adapter, lvl, _fmt, _args...) do {	\
 	if (NETIF_MSG_##lvl & adapter->msg_enable)	\
 	if (NETIF_MSG_##lvl & adapter->msg_enable)	\
 		printk(KERN_INFO "%s: %s: " _fmt,	\
 		printk(KERN_INFO "%s: %s: " _fmt,	\

+ 494 - 19
drivers/net/qlcnic/qlcnic_ctx.c

@@ -88,12 +88,12 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
 
 
 	if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
 	if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
 		if (qlcnic_issue_cmd(adapter,
 		if (qlcnic_issue_cmd(adapter,
-				adapter->ahw.pci_func,
-				QLCHAL_VERSION,
-				recv_ctx->context_id,
-				mtu,
-				0,
-				QLCNIC_CDRP_CMD_SET_MTU)) {
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			recv_ctx->context_id,
+			mtu,
+			0,
+			QLCNIC_CDRP_CMD_SET_MTU)) {
 
 
 			dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
 			dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
 			return -EIO;
 			return -EIO;
@@ -121,7 +121,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
 
 
 	int i, nrds_rings, nsds_rings;
 	int i, nrds_rings, nsds_rings;
 	size_t rq_size, rsp_size;
 	size_t rq_size, rsp_size;
-	u32 cap, reg, val;
+	u32 cap, reg, val, reg2;
 	int err;
 	int err;
 
 
 	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
 	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -197,7 +197,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
 	phys_addr = hostrq_phys_addr;
 	phys_addr = hostrq_phys_addr;
 	err = qlcnic_issue_cmd(adapter,
 	err = qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			(u32)(phys_addr >> 32),
 			(u32)(phys_addr >> 32),
 			(u32)(phys_addr & 0xffffffff),
 			(u32)(phys_addr & 0xffffffff),
 			rq_size,
 			rq_size,
@@ -216,8 +216,12 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
 		rds_ring = &recv_ctx->rds_rings[i];
 		rds_ring = &recv_ctx->rds_rings[i];
 
 
 		reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
 		reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
-		rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
+		if (adapter->fw_hal_version == QLCNIC_FW_BASE)
+			rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
 				QLCNIC_REG(reg - 0x200));
 				QLCNIC_REG(reg - 0x200));
+		else
+			rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 +
+				reg;
 	}
 	}
 
 
 	prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
 	prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -227,12 +231,18 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
 		sds_ring = &recv_ctx->sds_rings[i];
 		sds_ring = &recv_ctx->sds_rings[i];
 
 
 		reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
 		reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
-		sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
-				QLCNIC_REG(reg - 0x200));
+		reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
 
 
-		reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
-		sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
+		if (adapter->fw_hal_version == QLCNIC_FW_BASE) {
+			sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
 				QLCNIC_REG(reg - 0x200));
 				QLCNIC_REG(reg - 0x200));
+			sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
+				QLCNIC_REG(reg2 - 0x200));
+		} else {
+			sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 +
+				reg;
+			sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2;
+		}
 	}
 	}
 
 
 	recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
 	recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
@@ -253,7 +263,7 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
 
 
 	if (qlcnic_issue_cmd(adapter,
 	if (qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			recv_ctx->context_id,
 			recv_ctx->context_id,
 			QLCNIC_DESTROY_CTX_RESET,
 			QLCNIC_DESTROY_CTX_RESET,
 			0,
 			0,
@@ -319,7 +329,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
 	phys_addr = rq_phys_addr;
 	phys_addr = rq_phys_addr;
 	err = qlcnic_issue_cmd(adapter,
 	err = qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			(u32)(phys_addr >> 32),
 			(u32)(phys_addr >> 32),
 			((u32)phys_addr & 0xffffffff),
 			((u32)phys_addr & 0xffffffff),
 			rq_size,
 			rq_size,
@@ -327,8 +337,12 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
 
 
 	if (err == QLCNIC_RCODE_SUCCESS) {
 	if (err == QLCNIC_RCODE_SUCCESS) {
 		temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
 		temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
-		tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
+		if (adapter->fw_hal_version == QLCNIC_FW_BASE)
+			tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
 				QLCNIC_REG(temp - 0x200));
 				QLCNIC_REG(temp - 0x200));
+		else
+			tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 +
+				temp;
 
 
 		adapter->tx_context_id =
 		adapter->tx_context_id =
 			le16_to_cpu(prsp->context_id);
 			le16_to_cpu(prsp->context_id);
@@ -351,7 +365,7 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
 {
 {
 	if (qlcnic_issue_cmd(adapter,
 	if (qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			adapter->tx_context_id,
 			adapter->tx_context_id,
 			QLCNIC_DESTROY_CTX_RESET,
 			QLCNIC_DESTROY_CTX_RESET,
 			0,
 			0,
@@ -368,7 +382,7 @@ qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
 
 
 	if (qlcnic_issue_cmd(adapter,
 	if (qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			reg,
 			reg,
 			0,
 			0,
 			0,
 			0,
@@ -385,7 +399,7 @@ qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
 {
 {
 	return qlcnic_issue_cmd(adapter,
 	return qlcnic_issue_cmd(adapter,
 			adapter->ahw.pci_func,
 			adapter->ahw.pci_func,
-			QLCHAL_VERSION,
+			adapter->fw_hal_version,
 			reg,
 			reg,
 			val,
 			val,
 			0,
 			0,
@@ -533,3 +547,464 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
 	}
 	}
 }
 }
 
 
+/* Set MAC address of a NIC partition */
+int qlcnic_set_mac_address(struct qlcnic_adapter *adapter, u8* mac)
+{
+	int err = 0;
+	u32 arg1, arg2, arg3;
+
+	arg1 = adapter->ahw.pci_func | BIT_9;
+	arg2 = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	arg3 = mac[4] | (mac[5] << 16);
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			arg2,
+			arg3,
+			QLCNIC_CDRP_CMD_MAC_ADDRESS);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to set mac address%d\n", err);
+		err = -EIO;
+	}
+
+	return err;
+}
+
+/* Get MAC address of a NIC partition */
+int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+{
+	int err;
+	u32 arg1;
+
+	arg1 = adapter->ahw.pci_func | BIT_8;
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_MAC_ADDRESS);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
+				QLCNIC_ARG2_CRB_OFFSET, 0, mac);
+		dev_info(&adapter->pdev->dev, "MAC address: %pM\n", mac);
+	} else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get mac address%d\n", err);
+		err = -EIO;
+	}
+
+	return err;
+}
+
+/* Get info of a NIC partition */
+int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, u8 func_id)
+{
+	int	err;
+	dma_addr_t nic_dma_t;
+	struct qlcnic_info *nic_info;
+	void *nic_info_addr;
+	size_t	nic_size = sizeof(struct qlcnic_info);
+
+	nic_info_addr = pci_alloc_consistent(adapter->pdev,
+		nic_size, &nic_dma_t);
+	if (!nic_info_addr)
+		return -ENOMEM;
+	memset(nic_info_addr, 0, nic_size);
+
+	nic_info = (struct qlcnic_info *) nic_info_addr;
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			MSD(nic_dma_t),
+			LSD(nic_dma_t),
+			(func_id << 16 | nic_size),
+			QLCNIC_CDRP_CMD_GET_NIC_INFO);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		adapter->physical_port = le16_to_cpu(nic_info->phys_port);
+		adapter->switch_mode = le16_to_cpu(nic_info->switch_mode);
+		adapter->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
+		adapter->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
+		adapter->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+		adapter->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
+		adapter->max_mtu = le16_to_cpu(nic_info->max_mtu);
+		adapter->capabilities = le32_to_cpu(nic_info->capabilities);
+		adapter->max_mac_filters = nic_info->max_mac_filters;
+
+		dev_info(&adapter->pdev->dev,
+			"phy port: %d switch_mode: %d,\n"
+			"\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
+			"\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
+			adapter->physical_port, adapter->switch_mode,
+			adapter->max_tx_ques, adapter->max_rx_ques,
+			adapter->min_tx_bw, adapter->max_tx_bw,
+			adapter->max_mtu, adapter->capabilities);
+	} else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get nic info%d\n", err);
+		err = -EIO;
+	}
+
+	pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
+	return err;
+}
+
+/* Configure a NIC partition */
+int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
+{
+	int err = -EIO;
+	u32 func_state;
+	dma_addr_t nic_dma_t;
+	void *nic_info_addr;
+	struct qlcnic_info *nic_info;
+	size_t nic_size = sizeof(struct qlcnic_info);
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	if (qlcnic_api_lock(adapter))
+		return err;
+
+	func_state = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+	if (QLC_DEV_CHECK_ACTIVE(func_state, nic->pci_func)) {
+		qlcnic_api_unlock(adapter);
+		return err;
+	}
+
+	qlcnic_api_unlock(adapter);
+
+	nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size,
+			&nic_dma_t);
+	if (!nic_info_addr)
+		return -ENOMEM;
+
+	memset(nic_info_addr, 0, nic_size);
+	nic_info = (struct qlcnic_info *)nic_info_addr;
+
+	nic_info->pci_func = cpu_to_le16(nic->pci_func);
+	nic_info->op_mode = cpu_to_le16(nic->op_mode);
+	nic_info->phys_port = cpu_to_le16(nic->phys_port);
+	nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
+	nic_info->capabilities = cpu_to_le32(nic->capabilities);
+	nic_info->max_mac_filters = nic->max_mac_filters;
+	nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
+	nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
+	nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
+	nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			MSD(nic_dma_t),
+			LSD(nic_dma_t),
+			nic_size,
+			QLCNIC_CDRP_CMD_SET_NIC_INFO);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to set nic info%d\n", err);
+		err = -EIO;
+	}
+
+	pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
+	return err;
+}
+
+/* Get PCI Info of a partition */
+int qlcnic_get_pci_info(struct qlcnic_adapter *adapter)
+{
+	int err = 0, i;
+	dma_addr_t pci_info_dma_t;
+	struct qlcnic_pci_info *npar;
+	void *pci_info_addr;
+	size_t npar_size = sizeof(struct qlcnic_pci_info);
+	size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+
+	pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size,
+			&pci_info_dma_t);
+	if (!pci_info_addr)
+		return -ENOMEM;
+	memset(pci_info_addr, 0, pci_size);
+
+	if (!adapter->npars)
+		adapter->npars = kzalloc(pci_size, GFP_KERNEL);
+	if (!adapter->npars) {
+		err = -ENOMEM;
+		goto err_npar;
+	}
+
+	if (!adapter->eswitch)
+		adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+				QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
+	if (!adapter->eswitch) {
+		err = -ENOMEM;
+		goto err_eswitch;
+	}
+
+	npar = (struct qlcnic_pci_info *) pci_info_addr;
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			MSD(pci_info_dma_t),
+			LSD(pci_info_dma_t),
+			pci_size,
+			QLCNIC_CDRP_CMD_GET_PCI_INFO);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++) {
+			adapter->npars[i].id = le32_to_cpu(npar->id);
+			adapter->npars[i].active = le32_to_cpu(npar->active);
+			adapter->npars[i].type = le32_to_cpu(npar->type);
+			adapter->npars[i].default_port =
+				le32_to_cpu(npar->default_port);
+			adapter->npars[i].tx_min_bw =
+				le32_to_cpu(npar->tx_min_bw);
+			adapter->npars[i].tx_max_bw =
+				le32_to_cpu(npar->tx_max_bw);
+			memcpy(adapter->npars[i].mac, npar->mac, ETH_ALEN);
+		}
+	} else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get PCI Info%d\n", err);
+		kfree(adapter->npars);
+		err = -EIO;
+	}
+	goto err_npar;
+
+err_eswitch:
+	kfree(adapter->npars);
+	adapter->npars = NULL;
+
+err_npar:
+	pci_free_consistent(adapter->pdev, pci_size, pci_info_addr,
+		pci_info_dma_t);
+	return err;
+}
+
+/* Reset a NIC partition */
+
+int qlcnic_reset_partition(struct qlcnic_adapter *adapter, u8 func_no)
+{
+	int err = -EIO;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			func_no,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_RESET_NPAR);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to issue reset partition%d\n", err);
+		err = -EIO;
+	}
+
+	return err;
+}
+
+/* Get eSwitch Capabilities */
+int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
+					struct qlcnic_eswitch *eswitch)
+{
+	int err = -EIO;
+	u32 arg1, arg2;
+
+	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
+		return err;
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			port,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+		arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
+
+		eswitch->port = arg1 & 0xf;
+		eswitch->active_vports = LSB(arg2);
+		eswitch->max_ucast_filters = MSB(arg2);
+		eswitch->max_active_vlans = LSB(MSW(arg2));
+		if (arg1 & BIT_6)
+			eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
+		if (arg1 & BIT_7)
+			eswitch->flags |= QLCNIC_SWITCH_PROMISC_MODE;
+		if (arg1 & BIT_8)
+			eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
+	} else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get eswitch capabilities%d\n", err);
+	}
+
+	return err;
+}
+
+/* Get current status of eswitch */
+int qlcnic_get_eswitch_status(struct qlcnic_adapter *adapter, u8 port,
+				struct qlcnic_eswitch *eswitch)
+{
+	int err = -EIO;
+	u32 arg1, arg2;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			port,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS);
+
+	if (err == QLCNIC_RCODE_SUCCESS) {
+		arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+		arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
+
+		eswitch->port = arg1 & 0xf;
+		eswitch->active_vports = LSB(arg2);
+		eswitch->active_ucast_filters = MSB(arg2);
+		eswitch->active_vlans = LSB(MSW(arg2));
+		if (arg1 & BIT_6)
+			eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
+		if (arg1 & BIT_8)
+			eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
+
+	} else {
+		dev_err(&adapter->pdev->dev,
+			"Failed to get eswitch status%d\n", err);
+	}
+
+	return err;
+}
+
+/* Enable/Disable eSwitch */
+int qlcnic_toggle_eswitch(struct qlcnic_adapter *adapter, u8 id, u8 enable)
+{
+	int err = -EIO;
+	u32 arg1, arg2;
+	struct qlcnic_eswitch *eswitch;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	eswitch = &adapter->eswitch[id];
+	if (!eswitch)
+		return err;
+
+	arg1 = eswitch->port | (enable ? BIT_4 : 0);
+	arg2 = eswitch->active_vports | (eswitch->max_ucast_filters << 8) |
+		(eswitch->max_active_vlans << 16);
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			arg2,
+			0,
+			QLCNIC_CDRP_CMD_TOGGLE_ESWITCH);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to enable eswitch%d\n", eswitch->port);
+		eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
+		err = -EIO;
+	} else {
+		eswitch->flags |= QLCNIC_SWITCH_ENABLE;
+		dev_info(&adapter->pdev->dev,
+			"Enabled eSwitch for port %d\n", eswitch->port);
+	}
+
+	return err;
+}
+
+/* Configure eSwitch for port mirroring */
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
+				u8 enable_mirroring, u8 pci_func)
+{
+	int err = -EIO;
+	u32 arg1;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
+		!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+		return err;
+
+	arg1 = id | (enable_mirroring ? BIT_4 : 0);
+	arg1 |= pci_func << 8;
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to configure port mirroring%d on eswitch:%d\n",
+			pci_func, id);
+	} else {
+		dev_info(&adapter->pdev->dev,
+			"Configured eSwitch %d for port mirroring:%d\n",
+			id, pci_func);
+	}
+
+	return err;
+}
+
+/* Configure eSwitch port */
+int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id,
+		int vlan_tagging, u8 discard_tagged, u8 promsc_mode,
+		u8 mac_learn, u8 pci_func, u16 vlan_id)
+{
+	int err = -EIO;
+	u32 arg1;
+	struct qlcnic_eswitch *eswitch;
+
+	if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+		return err;
+
+	eswitch = &adapter->eswitch[id];
+	if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE))
+		return err;
+
+	arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0);
+	arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0);
+	arg1 |= pci_func << 8;
+	if (vlan_tagging)
+		arg1 |= BIT_5 | (vlan_id << 16);
+
+	err = qlcnic_issue_cmd(adapter,
+			adapter->ahw.pci_func,
+			adapter->fw_hal_version,
+			arg1,
+			0,
+			0,
+			QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
+
+	if (err != QLCNIC_RCODE_SUCCESS) {
+		dev_err(&adapter->pdev->dev,
+			"Failed to configure eswitch port%d\n", eswitch->port);
+		eswitch->flags |= QLCNIC_SWITCH_ENABLE;
+	} else {
+		eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
+		dev_info(&adapter->pdev->dev,
+			"Configured eSwitch for port %d\n", eswitch->port);
+	}
+
+	return err;
+}

+ 6 - 5
drivers/net/qlcnic/qlcnic_ethtool.c

@@ -683,13 +683,13 @@ static int qlcnic_loopback_test(struct net_device *netdev)
 	if (ret)
 	if (ret)
 		goto clear_it;
 		goto clear_it;
 
 
-	ret = qlcnic_set_ilb_mode(adapter);
+	ret = adapter->nic_ops->set_ilb_mode(adapter);
 	if (ret)
 	if (ret)
 		goto done;
 		goto done;
 
 
 	ret = qlcnic_do_ilb_test(adapter);
 	ret = qlcnic_do_ilb_test(adapter);
 
 
-	qlcnic_clear_ilb_mode(adapter);
+	adapter->nic_ops->clear_ilb_mode(adapter);
 
 
 done:
 done:
 	qlcnic_diag_free_res(netdev, max_sds_rings);
 	qlcnic_diag_free_res(netdev, max_sds_rings);
@@ -715,7 +715,8 @@ static int qlcnic_irq_test(struct net_device *netdev)
 
 
 	adapter->diag_cnt = 0;
 	adapter->diag_cnt = 0;
 	ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
 	ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
-			QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011);
+			adapter->fw_hal_version, adapter->portnum,
+			0, 0, 0x00000011);
 	if (ret)
 	if (ret)
 		goto done;
 		goto done;
 
 
@@ -834,7 +835,7 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
 	struct qlcnic_adapter *adapter = netdev_priv(dev);
 	struct qlcnic_adapter *adapter = netdev_priv(dev);
 	int ret;
 	int ret;
 
 
-	ret = qlcnic_config_led(adapter, 1, 0xf);
+	ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
 	if (ret) {
 	if (ret) {
 		dev_err(&adapter->pdev->dev,
 		dev_err(&adapter->pdev->dev,
 			"Failed to set LED blink state.\n");
 			"Failed to set LED blink state.\n");
@@ -843,7 +844,7 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
 
 
 	msleep_interruptible(val * 1000);
 	msleep_interruptible(val * 1000);
 
 
-	ret = qlcnic_config_led(adapter, 0, 0xf);
+	ret = adapter->nic_ops->config_led(adapter, 0, 0xf);
 	if (ret) {
 	if (ret) {
 		dev_err(&adapter->pdev->dev,
 		dev_err(&adapter->pdev->dev,
 			"Failed to reset LED blink state.\n");
 			"Failed to reset LED blink state.\n");

+ 79 - 5
drivers/net/qlcnic/qlcnic_hdr.h

@@ -208,6 +208,39 @@ enum {
 	QLCNIC_HW_PX_MAP_CRB_PGR0
 	QLCNIC_HW_PX_MAP_CRB_PGR0
 };
 };
 
 
+#define	BIT_0	0x1
+#define	BIT_1	0x2
+#define	BIT_2	0x4
+#define	BIT_3	0x8
+#define	BIT_4	0x10
+#define	BIT_5	0x20
+#define	BIT_6	0x40
+#define	BIT_7	0x80
+#define	BIT_8	0x100
+#define	BIT_9	0x200
+#define	BIT_10	0x400
+#define	BIT_11	0x800
+#define	BIT_12	0x1000
+#define	BIT_13	0x2000
+#define	BIT_14	0x4000
+#define	BIT_15	0x8000
+#define	BIT_16	0x10000
+#define	BIT_17	0x20000
+#define	BIT_18	0x40000
+#define	BIT_19	0x80000
+#define	BIT_20	0x100000
+#define	BIT_21	0x200000
+#define	BIT_22	0x400000
+#define	BIT_23	0x800000
+#define	BIT_24	0x1000000
+#define	BIT_25	0x2000000
+#define	BIT_26	0x4000000
+#define	BIT_27	0x8000000
+#define	BIT_28	0x10000000
+#define	BIT_29	0x20000000
+#define	BIT_30	0x40000000
+#define	BIT_31	0x80000000
+
 /*  This field defines CRB adr [31:20] of the agents */
 /*  This field defines CRB adr [31:20] of the agents */
 
 
 #define QLCNIC_HW_CRB_HUB_AGT_ADR_MN	\
 #define QLCNIC_HW_CRB_HUB_AGT_ADR_MN	\
@@ -668,10 +701,11 @@ enum {
 #define QLCNIC_CRB_DEV_REF_COUNT	(QLCNIC_CAM_RAM(0x138))
 #define QLCNIC_CRB_DEV_REF_COUNT	(QLCNIC_CAM_RAM(0x138))
 #define QLCNIC_CRB_DEV_STATE		(QLCNIC_CAM_RAM(0x140))
 #define QLCNIC_CRB_DEV_STATE		(QLCNIC_CAM_RAM(0x140))
 
 
-#define QLCNIC_CRB_DRV_STATE               (QLCNIC_CAM_RAM(0x144))
-#define QLCNIC_CRB_DRV_SCRATCH             (QLCNIC_CAM_RAM(0x148))
-#define QLCNIC_CRB_DEV_PARTITION_INFO      (QLCNIC_CAM_RAM(0x14c))
+#define QLCNIC_CRB_DRV_STATE		(QLCNIC_CAM_RAM(0x144))
+#define QLCNIC_CRB_DRV_SCRATCH		(QLCNIC_CAM_RAM(0x148))
+#define QLCNIC_CRB_DEV_PARTITION_INFO	(QLCNIC_CAM_RAM(0x14c))
 #define QLCNIC_CRB_DRV_IDC_VER		(QLCNIC_CAM_RAM(0x174))
 #define QLCNIC_CRB_DRV_IDC_VER		(QLCNIC_CAM_RAM(0x174))
+#define QLCNIC_CRB_DEV_NPAR_STATE	(QLCNIC_CAM_RAM(0x19c))
 #define QLCNIC_ROM_DEV_INIT_TIMEOUT	(0x3e885c)
 #define QLCNIC_ROM_DEV_INIT_TIMEOUT	(0x3e885c)
 #define QLCNIC_ROM_DRV_RESET_TIMEOUT	(0x3e8860)
 #define QLCNIC_ROM_DRV_RESET_TIMEOUT	(0x3e8860)
 
 
@@ -684,15 +718,26 @@ enum {
 #define QLCNIC_DEV_FAILED		0x6
 #define QLCNIC_DEV_FAILED		0x6
 #define QLCNIC_DEV_QUISCENT		0x7
 #define QLCNIC_DEV_QUISCENT		0x7
 
 
+#define QLCNIC_DEV_NPAR_NOT_RDY	0
+#define QLCNIC_DEV_NPAR_RDY		1
+
+#define QLC_DEV_CHECK_ACTIVE(VAL, FN)		((VAL) &= (1 << (FN * 4)))
 #define QLC_DEV_SET_REF_CNT(VAL, FN)		((VAL) |= (1 << (FN * 4)))
 #define QLC_DEV_SET_REF_CNT(VAL, FN)		((VAL) |= (1 << (FN * 4)))
 #define QLC_DEV_CLR_REF_CNT(VAL, FN)		((VAL) &= ~(1 << (FN * 4)))
 #define QLC_DEV_CLR_REF_CNT(VAL, FN)		((VAL) &= ~(1 << (FN * 4)))
 #define QLC_DEV_SET_RST_RDY(VAL, FN)		((VAL) |= (1 << (FN * 4)))
 #define QLC_DEV_SET_RST_RDY(VAL, FN)		((VAL) |= (1 << (FN * 4)))
 #define QLC_DEV_SET_QSCNT_RDY(VAL, FN)		((VAL) |= (2 << (FN * 4)))
 #define QLC_DEV_SET_QSCNT_RDY(VAL, FN)		((VAL) |= (2 << (FN * 4)))
 #define QLC_DEV_CLR_RST_QSCNT(VAL, FN)		((VAL) &= ~(3 << (FN * 4)))
 #define QLC_DEV_CLR_RST_QSCNT(VAL, FN)		((VAL) &= ~(3 << (FN * 4)))
 
 
+#define QLC_DEV_GET_DRV(VAL, FN)		(0xf & ((VAL) >> (FN * 4)))
+#define QLC_DEV_SET_DRV(VAL, FN)		((VAL) << (FN * 4))
+
+#define QLCNIC_TYPE_NIC		1
+#define QLCNIC_TYPE_FCOE		2
+#define QLCNIC_TYPE_ISCSI		3
+
 #define QLCNIC_RCODE_DRIVER_INFO		0x20000000
 #define QLCNIC_RCODE_DRIVER_INFO		0x20000000
-#define QLCNIC_RCODE_DRIVER_CAN_RELOAD		0x40000000
-#define QLCNIC_RCODE_FATAL_ERROR		0x80000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD		BIT_30
+#define QLCNIC_RCODE_FATAL_ERROR		BIT_31
 #define QLCNIC_FWERROR_PEGNUM(code)		((code) & 0xff)
 #define QLCNIC_FWERROR_PEGNUM(code)		((code) & 0xff)
 #define QLCNIC_FWERROR_CODE(code)		((code >> 8) & 0xfffff)
 #define QLCNIC_FWERROR_CODE(code)		((code >> 8) & 0xfffff)
 
 
@@ -721,6 +766,35 @@ struct qlcnic_legacy_intr_set {
 	u32	pci_int_reg;
 	u32	pci_int_reg;
 };
 };
 
 
+#define QLCNIC_FW_API		0x1b216c
+#define QLCNIC_DRV_OP_MODE	0x1b2170
+#define QLCNIC_MSIX_BASE	0x132110
+#define QLCNIC_MAX_PCI_FUNC	8
+
+/* PCI function operational mode */
+enum {
+	QLCNIC_MGMT_FUNC	= 0,
+	QLCNIC_PRIV_FUNC	= 1,
+	QLCNIC_NON_PRIV_FUNC	= 2
+};
+
+/* FW HAL api version */
+enum {
+	QLCNIC_FW_BASE	= 1,
+	QLCNIC_FW_NPAR	= 2
+};
+
+#define QLC_DEV_DRV_DEFAULT 0x11111111
+
+#define LSB(x)	((uint8_t)(x))
+#define MSB(x)	((uint8_t)((uint16_t)(x) >> 8))
+
+#define LSW(x)  ((uint16_t)((uint32_t)(x)))
+#define MSW(x)  ((uint16_t)((uint32_t)(x) >> 16))
+
+#define LSD(x)  ((uint32_t)((uint64_t)(x)))
+#define MSD(x)  ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
 #define	QLCNIC_LEGACY_INTR_CONFIG					\
 #define	QLCNIC_LEGACY_INTR_CONFIG					\
 {									\
 {									\
 	{								\
 	{								\

+ 4 - 10
drivers/net/qlcnic/qlcnic_hw.c

@@ -538,7 +538,7 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
 	return rv;
 	return rv;
 }
 }
 
 
-int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
 {
 {
 	struct qlcnic_nic_req req;
 	struct qlcnic_nic_req req;
 	u64 word;
 	u64 word;
@@ -704,21 +704,15 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
 	return rc;
 	return rc;
 }
 }
 
 
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac)
 {
 {
-	u32 crbaddr, mac_hi, mac_lo;
+	u32 crbaddr;
 	int pci_func = adapter->ahw.pci_func;
 	int pci_func = adapter->ahw.pci_func;
 
 
 	crbaddr = CRB_MAC_BLOCK_START +
 	crbaddr = CRB_MAC_BLOCK_START +
 		(4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
 		(4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
 
 
-	mac_lo = QLCRD32(adapter, crbaddr);
-	mac_hi = QLCRD32(adapter, crbaddr+4);
-
-	if (pci_func & 1)
-		*mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
-	else
-		*mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+	qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac);
 
 
 	return 0;
 	return 0;
 }
 }

+ 30 - 10
drivers/net/qlcnic/qlcnic_init.c

@@ -520,17 +520,16 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
 	int timeo;
 	int timeo;
 	u32 val;
 	u32 val;
 
 
-	val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
-	val = (val >> (adapter->portnum * 4)) & 0xf;
-
-	if ((val & 0x3) != 1) {
-		dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n",
-									val);
-		return -EIO;
+	if (adapter->fw_hal_version == QLCNIC_FW_BASE) {
+		val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+		val = QLC_DEV_GET_DRV(val, adapter->portnum);
+		if ((val & 0x3) != QLCNIC_TYPE_NIC) {
+			dev_err(&adapter->pdev->dev,
+				"Not an Ethernet NIC func=%u\n", val);
+			return -EIO;
+		}
+		adapter->physical_port = (val >> 2);
 	}
 	}
-
-	adapter->physical_port = (val >> 2);
-
 	if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
 	if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
 		timeo = 30;
 		timeo = 30;
 
 
@@ -1701,3 +1700,24 @@ qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
 	sds_ring->consumer = consumer;
 	sds_ring->consumer = consumer;
 	writel(consumer, sds_ring->crb_sts_consumer);
 	writel(consumer, sds_ring->crb_sts_consumer);
 }
 }
+
+void
+qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
+			u8 alt_mac, u8 *mac)
+{
+	u32 mac_low, mac_high;
+	int i;
+
+	mac_low = QLCRD32(adapter, off1);
+	mac_high = QLCRD32(adapter, off2);
+
+	if (alt_mac) {
+		mac_low |= (mac_low >> 16) | (mac_high << 16);
+		mac_high >>= 16;
+	}
+
+	for (i = 0; i < 2; i++)
+		mac[i] = (u8)(mac_high >> ((1 - i) * 8));
+	for (i = 2; i < 6; i++)
+		mac[i] = (u8)(mac_low >> ((5 - i) * 8));
+}

+ 284 - 28
drivers/net/qlcnic/qlcnic_main.c

@@ -65,6 +65,10 @@ static int load_fw_file;
 module_param(load_fw_file, int, 0644);
 module_param(load_fw_file, int, 0644);
 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
 
 
+static int qlcnic_config_npars;
+module_param(qlcnic_config_npars, int, 0644);
+MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
+
 static int __devinit qlcnic_probe(struct pci_dev *pdev,
 static int __devinit qlcnic_probe(struct pci_dev *pdev,
 		const struct pci_device_id *ent);
 		const struct pci_device_id *ent);
 static void __devexit qlcnic_remove(struct pci_dev *pdev);
 static void __devexit qlcnic_remove(struct pci_dev *pdev);
@@ -99,7 +103,14 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data);
 
 
 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
 static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
 static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
-
+static int qlcnic_start_firmware(struct qlcnic_adapter *);
+
+static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
+static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *);
+static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *);
+static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
 /*  PCI Device ID Table  */
 /*  PCI Device ID Table  */
 #define ENTRY(device) \
 #define ENTRY(device) \
 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -307,19 +318,14 @@ static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
 static int
 static int
 qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
 qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
 {
 {
-	int i;
-	unsigned char *p;
-	u64 mac_addr;
+	u8 mac_addr[ETH_ALEN];
 	struct net_device *netdev = adapter->netdev;
 	struct net_device *netdev = adapter->netdev;
 	struct pci_dev *pdev = adapter->pdev;
 	struct pci_dev *pdev = adapter->pdev;
 
 
-	if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
+	if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
 		return -EIO;
 		return -EIO;
 
 
-	p = (unsigned char *)&mac_addr;
-	for (i = 0; i < 6; i++)
-		netdev->dev_addr[i] = *(p + 5 - i);
-
+	memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
 	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
 	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
 	memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
 	memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
 
 
@@ -371,6 +377,33 @@ static const struct net_device_ops qlcnic_netdev_ops = {
 #endif
 #endif
 };
 };
 
 
+static struct qlcnic_nic_template qlcnic_ops = {
+	.get_mac_addr = qlcnic_get_mac_addr,
+	.config_bridged_mode = qlcnic_config_bridged_mode,
+	.config_led = qlcnic_config_led,
+	.set_ilb_mode = qlcnic_set_ilb_mode,
+	.clear_ilb_mode = qlcnic_clear_ilb_mode,
+	.start_firmware = qlcnic_start_firmware
+};
+
+static struct qlcnic_nic_template qlcnic_pf_ops = {
+	.get_mac_addr = qlcnic_get_mac_address,
+	.config_bridged_mode = qlcnic_config_bridged_mode,
+	.config_led = qlcnic_config_led,
+	.set_ilb_mode = qlcnic_set_ilb_mode,
+	.clear_ilb_mode = qlcnic_clear_ilb_mode,
+	.start_firmware = qlcnic_start_firmware
+};
+
+static struct qlcnic_nic_template qlcnic_vf_ops = {
+	.get_mac_addr = qlcnic_get_mac_address,
+	.config_bridged_mode = qlcnicvf_config_bridged_mode,
+	.config_led = qlcnicvf_config_led,
+	.set_ilb_mode = qlcnicvf_set_ilb_mode,
+	.clear_ilb_mode = qlcnicvf_clear_ilb_mode,
+	.start_firmware = qlcnicvf_start_firmware
+};
+
 static void
 static void
 qlcnic_setup_intr(struct qlcnic_adapter *adapter)
 qlcnic_setup_intr(struct qlcnic_adapter *adapter)
 {
 {
@@ -452,6 +485,132 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
 		iounmap(adapter->ahw.pci_base0);
 		iounmap(adapter->ahw.pci_base0);
 }
 }
 
 
+static int
+qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
+{
+	u8 id;
+	u32 ref_count;
+	int i, ret = 1;
+	u32 data = QLCNIC_MGMT_FUNC;
+	void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+
+	/* If other drivers are not in use set their privilege level */
+	ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+	ret = qlcnic_api_lock(adapter);
+	if (ret)
+		goto err_lock;
+	if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
+		goto err_npar;
+
+	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+		id = adapter->npars[i].id;
+		if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
+			id == adapter->ahw.pci_func)
+			continue;
+		data |= (qlcnic_config_npars & QLC_DEV_SET_DRV(0xf, id));
+	}
+	writel(data, priv_op);
+
+err_npar:
+	qlcnic_api_unlock(adapter);
+err_lock:
+	return ret;
+}
+
+static u8
+qlcnic_set_mgmt_driver(struct qlcnic_adapter *adapter)
+{
+	u8 i, ret = 0;
+
+	if (qlcnic_get_pci_info(adapter))
+		return ret;
+	/* Set the eswitch */
+	for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
+		if (!qlcnic_get_eswitch_capabilities(adapter, i,
+			&adapter->eswitch[i])) {
+			ret++;
+			qlcnic_toggle_eswitch(adapter, i, ret);
+		}
+	}
+	return ret;
+}
+
+static u32
+qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
+{
+	void __iomem *msix_base_addr;
+	void __iomem *priv_op;
+	u32 func;
+	u32 msix_base;
+	u32 op_mode, priv_level;
+
+	/* Determine FW API version */
+	adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
+	if (adapter->fw_hal_version == ~0) {
+		adapter->nic_ops = &qlcnic_ops;
+		adapter->fw_hal_version = QLCNIC_FW_BASE;
+		adapter->ahw.pci_func = PCI_FUNC(adapter->pdev->devfn);
+		dev_info(&adapter->pdev->dev,
+			"FW does not support nic partion\n");
+		return adapter->fw_hal_version;
+	}
+
+	/* Find PCI function number */
+	pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+	msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
+	msix_base = readl(msix_base_addr);
+	func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
+	adapter->ahw.pci_func = func;
+
+	/* Determine function privilege level */
+	priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+	op_mode = readl(priv_op);
+	if (op_mode == QLC_DEV_DRV_DEFAULT) {
+		priv_level = QLCNIC_MGMT_FUNC;
+		if (qlcnic_api_lock(adapter))
+			return 0;
+		op_mode = (op_mode & ~QLC_DEV_SET_DRV(0xf, func)) |
+				(QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, func));
+		writel(op_mode, priv_op);
+		qlcnic_api_unlock(adapter);
+
+	} else
+		priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+
+	switch (priv_level) {
+	case QLCNIC_MGMT_FUNC:
+		adapter->op_mode = QLCNIC_MGMT_FUNC;
+		adapter->nic_ops = &qlcnic_pf_ops;
+		/* Set privilege level for other functions */
+		if (qlcnic_config_npars)
+			qlcnic_set_function_modes(adapter);
+		qlcnic_dev_set_npar_ready(adapter);
+		dev_info(&adapter->pdev->dev,
+			"HAL Version: %d, Management function\n",
+			adapter->fw_hal_version);
+		break;
+	case QLCNIC_PRIV_FUNC:
+		adapter->op_mode = QLCNIC_PRIV_FUNC;
+		dev_info(&adapter->pdev->dev,
+			"HAL Version: %d, Privileged function\n",
+			adapter->fw_hal_version);
+		adapter->nic_ops = &qlcnic_pf_ops;
+		break;
+	case QLCNIC_NON_PRIV_FUNC:
+		adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
+		dev_info(&adapter->pdev->dev,
+			"HAL Version: %d Non Privileged function\n",
+			adapter->fw_hal_version);
+		adapter->nic_ops = &qlcnic_vf_ops;
+		break;
+	default:
+		dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
+			priv_level);
+		return 0;
+	}
+	return adapter->fw_hal_version;
+}
+
 static int
 static int
 qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
 qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
 {
 {
@@ -460,7 +619,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
 	unsigned long mem_len, pci_len0 = 0;
 	unsigned long mem_len, pci_len0 = 0;
 
 
 	struct pci_dev *pdev = adapter->pdev;
 	struct pci_dev *pdev = adapter->pdev;
-	int pci_func = adapter->ahw.pci_func;
 
 
 	/* remap phys address */
 	/* remap phys address */
 	mem_base = pci_resource_start(pdev, 0);	/* 0 is for BAR 0 */
 	mem_base = pci_resource_start(pdev, 0);	/* 0 is for BAR 0 */
@@ -483,8 +641,13 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
 	adapter->ahw.pci_base0 = mem_ptr0;
 	adapter->ahw.pci_base0 = mem_ptr0;
 	adapter->ahw.pci_len0 = pci_len0;
 	adapter->ahw.pci_len0 = pci_len0;
 
 
+	if (!qlcnic_get_driver_mode(adapter)) {
+		iounmap(adapter->ahw.pci_base0);
+		return -EIO;
+	}
+
 	adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
 	adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
-		QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+		QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
 
 
 	return 0;
 	return 0;
 }
 }
@@ -553,7 +716,10 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
 	dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
 	dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
 			fw_major, fw_minor, fw_build);
 			fw_major, fw_minor, fw_build);
 
 
-	adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+	if (adapter->fw_hal_version == QLCNIC_FW_NPAR)
+		qlcnic_get_nic_info(adapter, adapter->ahw.pci_func);
+	else
+		adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
 
 
 	adapter->flags &= ~QLCNIC_LRO_ENABLED;
 	adapter->flags &= ~QLCNIC_LRO_ENABLED;
 
 
@@ -631,8 +797,14 @@ wait_init:
 	QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
 	QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
 	qlcnic_idc_debug_info(adapter, 1);
 	qlcnic_idc_debug_info(adapter, 1);
 
 
+	qlcnic_dev_set_npar_ready(adapter);
+
 	qlcnic_check_options(adapter);
 	qlcnic_check_options(adapter);
 
 
+	if (adapter->fw_hal_version != QLCNIC_FW_BASE &&
+			adapter->op_mode == QLCNIC_MGMT_FUNC)
+		qlcnic_set_mgmt_driver(adapter);
+
 	adapter->need_fw_reset = 0;
 	adapter->need_fw_reset = 0;
 
 
 	qlcnic_release_firmware(adapter);
 	qlcnic_release_firmware(adapter);
@@ -977,12 +1149,11 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
 
 
 	SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
 	SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
 
 
-	netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
-	netdev->features |= (NETIF_F_GRO);
-	netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+	netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+		NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6);
 
 
-	netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
-	netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+	netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+		NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6);
 
 
 	if (pci_using_dac) {
 	if (pci_using_dac) {
 		netdev->features |= NETIF_F_HIGHDMA;
 		netdev->features |= NETIF_F_HIGHDMA;
@@ -1036,7 +1207,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct net_device *netdev = NULL;
 	struct net_device *netdev = NULL;
 	struct qlcnic_adapter *adapter = NULL;
 	struct qlcnic_adapter *adapter = NULL;
 	int err;
 	int err;
-	int pci_func_id = PCI_FUNC(pdev->devfn);
 	uint8_t revision_id;
 	uint8_t revision_id;
 	uint8_t pci_using_dac;
 	uint8_t pci_using_dac;
 
 
@@ -1072,7 +1242,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	adapter->netdev  = netdev;
 	adapter->netdev  = netdev;
 	adapter->pdev    = pdev;
 	adapter->pdev    = pdev;
 	adapter->dev_rst_time = jiffies;
 	adapter->dev_rst_time = jiffies;
-	adapter->ahw.pci_func  = pci_func_id;
 
 
 	revision_id = pdev->revision;
 	revision_id = pdev->revision;
 	adapter->ahw.revision_id = revision_id;
 	adapter->ahw.revision_id = revision_id;
@@ -1088,7 +1257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_out_free_netdev;
 		goto err_out_free_netdev;
 
 
 	/* This will be reset for mezz cards  */
 	/* This will be reset for mezz cards  */
-	adapter->portnum = pci_func_id;
+	adapter->portnum = adapter->ahw.pci_func;
 
 
 	err = qlcnic_get_board_info(adapter);
 	err = qlcnic_get_board_info(adapter);
 	if (err) {
 	if (err) {
@@ -1102,7 +1271,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (qlcnic_setup_idc_param(adapter))
 	if (qlcnic_setup_idc_param(adapter))
 		goto err_out_iounmap;
 		goto err_out_iounmap;
 
 
-	err = qlcnic_start_firmware(adapter);
+	err = adapter->nic_ops->start_firmware(adapter);
 	if (err) {
 	if (err) {
 		dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
 		dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
 		goto err_out_decr_ref;
 		goto err_out_decr_ref;
@@ -1175,6 +1344,11 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
 
 
 	qlcnic_detach(adapter);
 	qlcnic_detach(adapter);
 
 
+	if (adapter->npars != NULL)
+		kfree(adapter->npars);
+	if (adapter->eswitch != NULL)
+		kfree(adapter->eswitch);
+
 	qlcnic_clr_all_drv_state(adapter);
 	qlcnic_clr_all_drv_state(adapter);
 
 
 	clear_bit(__QLCNIC_RESETTING, &adapter->state);
 	clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1263,7 +1437,7 @@ qlcnic_resume(struct pci_dev *pdev)
 	pci_set_master(pdev);
 	pci_set_master(pdev);
 	pci_restore_state(pdev);
 	pci_restore_state(pdev);
 
 
-	err = qlcnic_start_firmware(adapter);
+	err = adapter->nic_ops->start_firmware(adapter);
 	if (err) {
 	if (err) {
 		dev_err(&pdev->dev, "failed to start firmware\n");
 		dev_err(&pdev->dev, "failed to start firmware\n");
 		return err;
 		return err;
@@ -1340,11 +1514,11 @@ qlcnic_tso_check(struct net_device *netdev,
 	u8 opcode = TX_ETHER_PKT;
 	u8 opcode = TX_ETHER_PKT;
 	__be16 protocol = skb->protocol;
 	__be16 protocol = skb->protocol;
 	u16 flags = 0, vid = 0;
 	u16 flags = 0, vid = 0;
-	u32 producer;
 	int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
 	int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
 	struct cmd_desc_type0 *hwdesc;
 	struct cmd_desc_type0 *hwdesc;
 	struct vlan_ethhdr *vh;
 	struct vlan_ethhdr *vh;
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	u32 producer = tx_ring->producer;
 
 
 	if (protocol == cpu_to_be16(ETH_P_8021Q)) {
 	if (protocol == cpu_to_be16(ETH_P_8021Q)) {
 
 
@@ -1360,6 +1534,11 @@ qlcnic_tso_check(struct net_device *netdev,
 		vlan_oob = 1;
 		vlan_oob = 1;
 	}
 	}
 
 
+	if (*(skb->data) & BIT_0) {
+		flags |= BIT_0;
+		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+	}
+
 	if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
 	if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
 			skb_shinfo(skb)->gso_size > 0) {
 			skb_shinfo(skb)->gso_size > 0) {
 
 
@@ -1409,7 +1588,6 @@ qlcnic_tso_check(struct net_device *netdev,
 	/* For LSO, we need to copy the MAC/IP/TCP headers into
 	/* For LSO, we need to copy the MAC/IP/TCP headers into
 	 * the descriptor ring
 	 * the descriptor ring
 	 */
 	 */
-	producer = tx_ring->producer;
 	copied = 0;
 	copied = 0;
 	offset = 2;
 	offset = 2;
 
 
@@ -2109,7 +2287,7 @@ qlcnic_fwinit_work(struct work_struct *work)
 {
 {
 	struct qlcnic_adapter *adapter = container_of(work,
 	struct qlcnic_adapter *adapter = container_of(work,
 			struct qlcnic_adapter, fw_work.work);
 			struct qlcnic_adapter, fw_work.work);
-	u32 dev_state = 0xf;
+	u32 dev_state = 0xf, npar_state;
 
 
 	if (qlcnic_api_lock(adapter))
 	if (qlcnic_api_lock(adapter))
 		goto err_ret;
 		goto err_ret;
@@ -2122,6 +2300,19 @@ qlcnic_fwinit_work(struct work_struct *work)
 		return;
 		return;
 	}
 	}
 
 
+	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+		npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+		if (npar_state == QLCNIC_DEV_NPAR_RDY) {
+			qlcnic_api_unlock(adapter);
+			goto wait_npar;
+		} else {
+			qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+				FW_POLL_DELAY);
+			qlcnic_api_unlock(adapter);
+			return;
+		}
+	}
+
 	if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
 	if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
 		dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
 		dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
 					adapter->reset_ack_timeo);
 					adapter->reset_ack_timeo);
@@ -2154,7 +2345,7 @@ skip_ack_check:
 
 
 		qlcnic_api_unlock(adapter);
 		qlcnic_api_unlock(adapter);
 
 
-		if (!qlcnic_start_firmware(adapter)) {
+		if (!adapter->nic_ops->start_firmware(adapter)) {
 			qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
 			qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
 			return;
 			return;
 		}
 		}
@@ -2163,6 +2354,7 @@ skip_ack_check:
 
 
 	qlcnic_api_unlock(adapter);
 	qlcnic_api_unlock(adapter);
 
 
+wait_npar:
 	dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 	dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 	QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
 	QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
 
 
@@ -2177,7 +2369,7 @@ skip_ack_check:
 		break;
 		break;
 
 
 	default:
 	default:
-		if (!qlcnic_start_firmware(adapter)) {
+		if (!adapter->nic_ops->start_firmware(adapter)) {
 			qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
 			qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
 			return;
 			return;
 		}
 		}
@@ -2251,6 +2443,30 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
 	qlcnic_api_unlock(adapter);
 	qlcnic_api_unlock(adapter);
 }
 }
 
 
+/* Transit to NPAR READY state from NPAR NOT READY state */
+static void
+qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
+{
+	u32 state;
+
+	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC ||
+		adapter->fw_hal_version == QLCNIC_FW_BASE)
+		return;
+
+	if (qlcnic_api_lock(adapter))
+		return;
+
+	state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+
+	if (state != QLCNIC_DEV_NPAR_RDY) {
+		QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+			QLCNIC_DEV_NPAR_RDY);
+		QLCDB(adapter, DRV, "NPAR READY state set\n");
+	}
+
+	qlcnic_api_unlock(adapter);
+}
+
 static void
 static void
 qlcnic_schedule_work(struct qlcnic_adapter *adapter,
 qlcnic_schedule_work(struct qlcnic_adapter *adapter,
 		work_func_t func, int delay)
 		work_func_t func, int delay)
@@ -2365,6 +2581,46 @@ reschedule:
 	qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
 	qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
 }
 }
 
 
+static int
+qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
+{
+	int err;
+
+	err = qlcnic_can_start_firmware(adapter);
+	if (err)
+		return err;
+
+	qlcnic_check_options(adapter);
+
+	adapter->need_fw_reset = 0;
+
+	return err;
+}
+
+static int
+qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter)
+{
+	return -EOPNOTSUPP;
+}
+
+static void
+qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter)
+{
+	return;
+}
+
 static ssize_t
 static ssize_t
 qlcnic_store_bridged_mode(struct device *dev,
 qlcnic_store_bridged_mode(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 		struct device_attribute *attr, const char *buf, size_t len)
@@ -2382,7 +2638,7 @@ qlcnic_store_bridged_mode(struct device *dev,
 	if (strict_strtoul(buf, 2, &new))
 	if (strict_strtoul(buf, 2, &new))
 		goto err_out;
 		goto err_out;
 
 
-	if (!qlcnic_config_bridged_mode(adapter, !!new))
+	if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
 		ret = len;
 		ret = len;
 
 
 err_out:
 err_out:

+ 12 - 12
drivers/net/qlge/qlge.h

@@ -1062,7 +1062,7 @@ struct tx_buf_desc {
 #define TX_DESC_LEN_MASK	0x000fffff
 #define TX_DESC_LEN_MASK	0x000fffff
 #define TX_DESC_C	0x40000000
 #define TX_DESC_C	0x40000000
 #define TX_DESC_E	0x80000000
 #define TX_DESC_E	0x80000000
-} __attribute((packed));
+} __packed;
 
 
 /*
 /*
  * IOCB Definitions...
  * IOCB Definitions...
@@ -1095,7 +1095,7 @@ struct ob_mac_iocb_req {
 	__le16 vlan_tci;
 	__le16 vlan_tci;
 	__le16 reserved4;
 	__le16 reserved4;
 	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
 	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __attribute((packed));
+} __packed;
 
 
 struct ob_mac_iocb_rsp {
 struct ob_mac_iocb_rsp {
 	u8 opcode;		/* */
 	u8 opcode;		/* */
@@ -1112,7 +1112,7 @@ struct ob_mac_iocb_rsp {
 	u32 tid;
 	u32 tid;
 	u32 txq_idx;
 	u32 txq_idx;
 	__le32 reserved[13];
 	__le32 reserved[13];
-} __attribute((packed));
+} __packed;
 
 
 struct ob_mac_tso_iocb_req {
 struct ob_mac_tso_iocb_req {
 	u8 opcode;
 	u8 opcode;
@@ -1140,7 +1140,7 @@ struct ob_mac_tso_iocb_req {
 	__le16 vlan_tci;
 	__le16 vlan_tci;
 	__le16 mss;
 	__le16 mss;
 	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
 	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __attribute((packed));
+} __packed;
 
 
 struct ob_mac_tso_iocb_rsp {
 struct ob_mac_tso_iocb_rsp {
 	u8 opcode;
 	u8 opcode;
@@ -1157,7 +1157,7 @@ struct ob_mac_tso_iocb_rsp {
 	u32 tid;
 	u32 tid;
 	u32 txq_idx;
 	u32 txq_idx;
 	__le32 reserved2[13];
 	__le32 reserved2[13];
-} __attribute((packed));
+} __packed;
 
 
 struct ib_mac_iocb_rsp {
 struct ib_mac_iocb_rsp {
 	u8 opcode;		/* 0x20 */
 	u8 opcode;		/* 0x20 */
@@ -1216,7 +1216,7 @@ struct ib_mac_iocb_rsp {
 #define IB_MAC_IOCB_RSP_HL	0x80
 #define IB_MAC_IOCB_RSP_HL	0x80
 	__le32 hdr_len;		/* */
 	__le32 hdr_len;		/* */
 	__le64 hdr_addr;	/* */
 	__le64 hdr_addr;	/* */
-} __attribute((packed));
+} __packed;
 
 
 struct ib_ae_iocb_rsp {
 struct ib_ae_iocb_rsp {
 	u8 opcode;
 	u8 opcode;
@@ -1237,7 +1237,7 @@ struct ib_ae_iocb_rsp {
 #define PCI_ERR_ANON_BUF_RD        0x40
 #define PCI_ERR_ANON_BUF_RD        0x40
 	u8 q_id;
 	u8 q_id;
 	__le32 reserved[15];
 	__le32 reserved[15];
-} __attribute((packed));
+} __packed;
 
 
 /*
 /*
  * These three structures are for generic
  * These three structures are for generic
@@ -1249,7 +1249,7 @@ struct ql_net_rsp_iocb {
 	__le16 length;
 	__le16 length;
 	__le32 tid;
 	__le32 tid;
 	__le32 reserved[14];
 	__le32 reserved[14];
-} __attribute((packed));
+} __packed;
 
 
 struct net_req_iocb {
 struct net_req_iocb {
 	u8 opcode;
 	u8 opcode;
@@ -1257,7 +1257,7 @@ struct net_req_iocb {
 	__le16 flags1;
 	__le16 flags1;
 	__le32 tid;
 	__le32 tid;
 	__le32 reserved1[30];
 	__le32 reserved1[30];
-} __attribute((packed));
+} __packed;
 
 
 /*
 /*
  * tx ring initialization control block for chip.
  * tx ring initialization control block for chip.
@@ -1283,7 +1283,7 @@ struct wqicb {
 	__le16 rid;
 	__le16 rid;
 	__le64 addr;
 	__le64 addr;
 	__le64 cnsmr_idx_addr;
 	__le64 cnsmr_idx_addr;
-} __attribute((packed));
+} __packed;
 
 
 /*
 /*
  * rx ring initialization control block for chip.
  * rx ring initialization control block for chip.
@@ -1317,7 +1317,7 @@ struct cqicb {
 	__le64 sbq_addr;
 	__le64 sbq_addr;
 	__le16 sbq_buf_size;
 	__le16 sbq_buf_size;
 	__le16 sbq_len;		/* entry count */
 	__le16 sbq_len;		/* entry count */
-} __attribute((packed));
+} __packed;
 
 
 struct ricb {
 struct ricb {
 	u8 base_cq;
 	u8 base_cq;
@@ -1335,7 +1335,7 @@ struct ricb {
 	u8 hash_cq_id[1024];
 	u8 hash_cq_id[1024];
 	__le32 ipv6_hash_key[10];
 	__le32 ipv6_hash_key[10];
 	__le32 ipv4_hash_key[4];
 	__le32 ipv4_hash_key[4];
-} __attribute((packed));
+} __packed;
 
 
 /* SOFTWARE/DRIVER DATA STRUCTURES. */
 /* SOFTWARE/DRIVER DATA STRUCTURES. */
 
 

+ 146 - 156
drivers/net/r6040.c

@@ -44,12 +44,13 @@
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
+#include <linux/phy.h>
 
 
 #include <asm/processor.h>
 #include <asm/processor.h>
 
 
 #define DRV_NAME	"r6040"
 #define DRV_NAME	"r6040"
-#define DRV_VERSION	"0.25"
-#define DRV_RELDATE	"20Aug2009"
+#define DRV_VERSION	"0.26"
+#define DRV_RELDATE	"30May2010"
 
 
 /* PHY CHIP Address */
 /* PHY CHIP Address */
 #define PHY1_ADDR	1	/* For MAC1 */
 #define PHY1_ADDR	1	/* For MAC1 */
@@ -179,7 +180,6 @@ struct r6040_descriptor {
 
 
 struct r6040_private {
 struct r6040_private {
 	spinlock_t lock;		/* driver lock */
 	spinlock_t lock;		/* driver lock */
-	struct timer_list timer;
 	struct pci_dev *pdev;
 	struct pci_dev *pdev;
 	struct r6040_descriptor *rx_insert_ptr;
 	struct r6040_descriptor *rx_insert_ptr;
 	struct r6040_descriptor *rx_remove_ptr;
 	struct r6040_descriptor *rx_remove_ptr;
@@ -189,13 +189,15 @@ struct r6040_private {
 	struct r6040_descriptor *tx_ring;
 	struct r6040_descriptor *tx_ring;
 	dma_addr_t rx_ring_dma;
 	dma_addr_t rx_ring_dma;
 	dma_addr_t tx_ring_dma;
 	dma_addr_t tx_ring_dma;
-	u16	tx_free_desc, phy_addr, phy_mode;
+	u16	tx_free_desc, phy_addr;
 	u16	mcr0, mcr1;
 	u16	mcr0, mcr1;
-	u16	switch_sig;
 	struct net_device *dev;
 	struct net_device *dev;
-	struct mii_if_info mii_if;
+	struct mii_bus *mii_bus;
 	struct napi_struct napi;
 	struct napi_struct napi;
 	void __iomem *base;
 	void __iomem *base;
+	struct phy_device *phydev;
+	int old_link;
+	int old_duplex;
 };
 };
 
 
 static char version[] __devinitdata = KERN_INFO DRV_NAME
 static char version[] __devinitdata = KERN_INFO DRV_NAME
@@ -238,20 +240,30 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val
 	}
 	}
 }
 }
 
 
-static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg)
+static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
 {
 {
+	struct net_device *dev = bus->priv;
 	struct r6040_private *lp = netdev_priv(dev);
 	struct r6040_private *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
 	void __iomem *ioaddr = lp->base;
 
 
-	return (r6040_phy_read(ioaddr, lp->phy_addr, reg));
+	return r6040_phy_read(ioaddr, phy_addr, reg);
 }
 }
 
 
-static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val)
+static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
+						int reg, u16 value)
 {
 {
+	struct net_device *dev = bus->priv;
 	struct r6040_private *lp = netdev_priv(dev);
 	struct r6040_private *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
 	void __iomem *ioaddr = lp->base;
 
 
-	r6040_phy_write(ioaddr, lp->phy_addr, reg, val);
+	r6040_phy_write(ioaddr, phy_addr, reg, value);
+
+	return 0;
+}
+
+static int r6040_mdiobus_reset(struct mii_bus *bus)
+{
+	return 0;
 }
 }
 
 
 static void r6040_free_txbufs(struct net_device *dev)
 static void r6040_free_txbufs(struct net_device *dev)
@@ -408,10 +420,9 @@ static void r6040_tx_timeout(struct net_device *dev)
 	void __iomem *ioaddr = priv->base;
 	void __iomem *ioaddr = priv->base;
 
 
 	netdev_warn(dev, "transmit timed out, int enable %4.4x "
 	netdev_warn(dev, "transmit timed out, int enable %4.4x "
-		"status %4.4x, PHY status %4.4x\n",
+		"status %4.4x\n",
 		ioread16(ioaddr + MIER),
 		ioread16(ioaddr + MIER),
-		ioread16(ioaddr + MISR),
-		r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
+		ioread16(ioaddr + MISR));
 
 
 	dev->stats.tx_errors++;
 	dev->stats.tx_errors++;
 
 
@@ -463,9 +474,6 @@ static int r6040_close(struct net_device *dev)
 	struct r6040_private *lp = netdev_priv(dev);
 	struct r6040_private *lp = netdev_priv(dev);
 	struct pci_dev *pdev = lp->pdev;
 	struct pci_dev *pdev = lp->pdev;
 
 
-	/* deleted timer */
-	del_timer_sync(&lp->timer);
-
 	spin_lock_irq(&lp->lock);
 	spin_lock_irq(&lp->lock);
 	napi_disable(&lp->napi);
 	napi_disable(&lp->napi);
 	netif_stop_queue(dev);
 	netif_stop_queue(dev);
@@ -495,64 +503,14 @@ static int r6040_close(struct net_device *dev)
 	return 0;
 	return 0;
 }
 }
 
 
-/* Status of PHY CHIP */
-static int r6040_phy_mode_chk(struct net_device *dev)
-{
-	struct r6040_private *lp = netdev_priv(dev);
-	void __iomem *ioaddr = lp->base;
-	int phy_dat;
-
-	/* PHY Link Status Check */
-	phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
-	if (!(phy_dat & 0x4))
-		phy_dat = 0x8000;	/* Link Failed, full duplex */
-
-	/* PHY Chip Auto-Negotiation Status */
-	phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
-	if (phy_dat & 0x0020) {
-		/* Auto Negotiation Mode */
-		phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5);
-		phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4);
-		if (phy_dat & 0x140)
-			/* Force full duplex */
-			phy_dat = 0x8000;
-		else
-			phy_dat = 0;
-	} else {
-		/* Force Mode */
-		phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0);
-		if (phy_dat & 0x100)
-			phy_dat = 0x8000;
-		else
-			phy_dat = 0x0000;
-	}
-
-	return phy_dat;
-};
-
-static void r6040_set_carrier(struct mii_if_info *mii)
-{
-	if (r6040_phy_mode_chk(mii->dev)) {
-		/* autoneg is off: Link is always assumed to be up */
-		if (!netif_carrier_ok(mii->dev))
-			netif_carrier_on(mii->dev);
-	} else
-		r6040_phy_mode_chk(mii->dev);
-}
-
 static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 {
 	struct r6040_private *lp = netdev_priv(dev);
 	struct r6040_private *lp = netdev_priv(dev);
-	struct mii_ioctl_data *data = if_mii(rq);
-	int rc;
 
 
-	if (!netif_running(dev))
+	if (!lp->phydev)
 		return -EINVAL;
 		return -EINVAL;
-	spin_lock_irq(&lp->lock);
-	rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
-	spin_unlock_irq(&lp->lock);
-	r6040_set_carrier(&lp->mii_if);
-	return rc;
+
+	return phy_mii_ioctl(lp->phydev, if_mii(rq), cmd);
 }
 }
 
 
 static int r6040_rx(struct net_device *dev, int limit)
 static int r6040_rx(struct net_device *dev, int limit)
@@ -751,26 +709,6 @@ static int r6040_up(struct net_device *dev)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/* Read the PHY ID */
-	lp->switch_sig = r6040_phy_read(ioaddr, 0, 2);
-
-	if (lp->switch_sig  == ICPLUS_PHY_ID) {
-		r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */
-		lp->phy_mode = 0x8000;
-	} else {
-		/* PHY Mode Check */
-		r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP);
-		r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE);
-
-		if (PHY_MODE == 0x3100)
-			lp->phy_mode = r6040_phy_mode_chk(dev);
-		else
-			lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
-	}
-
-	/* Set duplex mode */
-	lp->mcr0 |= lp->phy_mode;
-
 	/* improve performance (by RDC guys) */
 	/* improve performance (by RDC guys) */
 	r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
 	r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
 	r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
 	r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
@@ -783,35 +721,6 @@ static int r6040_up(struct net_device *dev)
 	return 0;
 	return 0;
 }
 }
 
 
-/*
-  A periodic timer routine
-	Polling PHY Chip Link Status
-*/
-static void r6040_timer(unsigned long data)
-{
-	struct net_device *dev = (struct net_device *)data;
-	struct r6040_private *lp = netdev_priv(dev);
-	void __iomem *ioaddr = lp->base;
-	u16 phy_mode;
-
-	/* Polling PHY Chip Status */
-	if (PHY_MODE == 0x3100)
-		phy_mode = r6040_phy_mode_chk(dev);
-	else
-		phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
-
-	if (phy_mode != lp->phy_mode) {
-		lp->phy_mode = phy_mode;
-		lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode;
-		iowrite16(lp->mcr0, ioaddr);
-	}
-
-	/* Timer active again */
-	mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
-
-	/* Check media */
-	mii_check_media(&lp->mii_if, 1, 1);
-}
 
 
 /* Read/set MAC address routines */
 /* Read/set MAC address routines */
 static void r6040_mac_address(struct net_device *dev)
 static void r6040_mac_address(struct net_device *dev)
@@ -873,10 +782,6 @@ static int r6040_open(struct net_device *dev)
 	napi_enable(&lp->napi);
 	napi_enable(&lp->napi);
 	netif_start_queue(dev);
 	netif_start_queue(dev);
 
 
-	/* set and active a timer process */
-	setup_timer(&lp->timer, r6040_timer, (unsigned long) dev);
-	if (lp->switch_sig != ICPLUS_PHY_ID)
-		mod_timer(&lp->timer, jiffies + HZ);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1015,40 +920,22 @@ static void netdev_get_drvinfo(struct net_device *dev,
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 {
 	struct r6040_private *rp = netdev_priv(dev);
 	struct r6040_private *rp = netdev_priv(dev);
-	int rc;
-
-	spin_lock_irq(&rp->lock);
-	rc = mii_ethtool_gset(&rp->mii_if, cmd);
-	spin_unlock_irq(&rp->lock);
 
 
-	return rc;
+	return  phy_ethtool_gset(rp->phydev, cmd);
 }
 }
 
 
 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct r6040_private *rp = netdev_priv(dev);
-	int rc;
-
-	spin_lock_irq(&rp->lock);
-	rc = mii_ethtool_sset(&rp->mii_if, cmd);
-	spin_unlock_irq(&rp->lock);
-	r6040_set_carrier(&rp->mii_if);
-
-	return rc;
-}
-
-static u32 netdev_get_link(struct net_device *dev)
 {
 {
 	struct r6040_private *rp = netdev_priv(dev);
 	struct r6040_private *rp = netdev_priv(dev);
 
 
-	return mii_link_ok(&rp->mii_if);
+	return phy_ethtool_sset(rp->phydev, cmd);
 }
 }
 
 
 static const struct ethtool_ops netdev_ethtool_ops = {
 static const struct ethtool_ops netdev_ethtool_ops = {
 	.get_drvinfo		= netdev_get_drvinfo,
 	.get_drvinfo		= netdev_get_drvinfo,
 	.get_settings		= netdev_get_settings,
 	.get_settings		= netdev_get_settings,
 	.set_settings		= netdev_set_settings,
 	.set_settings		= netdev_set_settings,
-	.get_link		= netdev_get_link,
+	.get_link		= ethtool_op_get_link,
 };
 };
 
 
 static const struct net_device_ops r6040_netdev_ops = {
 static const struct net_device_ops r6040_netdev_ops = {
@@ -1067,6 +954,79 @@ static const struct net_device_ops r6040_netdev_ops = {
 #endif
 #endif
 };
 };
 
 
+static void r6040_adjust_link(struct net_device *dev)
+{
+	struct r6040_private *lp = netdev_priv(dev);
+	struct phy_device *phydev = lp->phydev;
+	int status_changed = 0;
+	void __iomem *ioaddr = lp->base;
+
+	BUG_ON(!phydev);
+
+	if (lp->old_link != phydev->link) {
+		status_changed = 1;
+		lp->old_link = phydev->link;
+	}
+
+	/* reflect duplex change */
+	if (phydev->link && (lp->old_duplex != phydev->duplex)) {
+		lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
+		iowrite16(lp->mcr0, ioaddr);
+
+		status_changed = 1;
+		lp->old_duplex = phydev->duplex;
+	}
+
+	if (status_changed) {
+		pr_info("%s: link %s", dev->name, phydev->link ?
+			"UP" : "DOWN");
+		if (phydev->link)
+			pr_cont(" - %d/%s", phydev->speed,
+			DUPLEX_FULL == phydev->duplex ? "full" : "half");
+		pr_cont("\n");
+	}
+}
+
+static int r6040_mii_probe(struct net_device *dev)
+{
+	struct r6040_private *lp = netdev_priv(dev);
+	struct phy_device *phydev = NULL;
+
+	phydev = phy_find_first(lp->mii_bus);
+	if (!phydev) {
+		dev_err(&lp->pdev->dev, "no PHY found\n");
+		return -ENODEV;
+	}
+
+	phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
+				0, PHY_INTERFACE_MODE_MII);
+
+	if (IS_ERR(phydev)) {
+		dev_err(&lp->pdev->dev, "could not attach to PHY\n");
+		return PTR_ERR(phydev);
+	}
+
+	/* mask with MAC supported features */
+	phydev->supported &= (SUPPORTED_10baseT_Half
+				| SUPPORTED_10baseT_Full
+				| SUPPORTED_100baseT_Half
+				| SUPPORTED_100baseT_Full
+				| SUPPORTED_Autoneg
+				| SUPPORTED_MII
+				| SUPPORTED_TP);
+
+	phydev->advertising = phydev->supported;
+	lp->phydev = phydev;
+	lp->old_link = 0;
+	lp->old_duplex = -1;
+
+	dev_info(&lp->pdev->dev, "attached PHY driver [%s] "
+		"(mii_bus:phy_addr=%s)\n",
+		phydev->drv->name, dev_name(&phydev->dev));
+
+	return 0;
+}
+
 static int __devinit r6040_init_one(struct pci_dev *pdev,
 static int __devinit r6040_init_one(struct pci_dev *pdev,
 					 const struct pci_device_id *ent)
 					 const struct pci_device_id *ent)
 {
 {
@@ -1077,6 +1037,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
 	static int card_idx = -1;
 	static int card_idx = -1;
 	int bar = 0;
 	int bar = 0;
 	u16 *adrp;
 	u16 *adrp;
+	int i;
 
 
 	printk("%s\n", version);
 	printk("%s\n", version);
 
 
@@ -1163,7 +1124,6 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
 	/* Init RDC private data */
 	/* Init RDC private data */
 	lp->mcr0 = 0x1002;
 	lp->mcr0 = 0x1002;
 	lp->phy_addr = phy_table[card_idx];
 	lp->phy_addr = phy_table[card_idx];
-	lp->switch_sig = 0;
 
 
 	/* The RDC-specific entries in the device structure. */
 	/* The RDC-specific entries in the device structure. */
 	dev->netdev_ops = &r6040_netdev_ops;
 	dev->netdev_ops = &r6040_netdev_ops;
@@ -1171,28 +1131,54 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
 	dev->watchdog_timeo = TX_TIMEOUT;
 	dev->watchdog_timeo = TX_TIMEOUT;
 
 
 	netif_napi_add(dev, &lp->napi, r6040_poll, 64);
 	netif_napi_add(dev, &lp->napi, r6040_poll, 64);
-	lp->mii_if.dev = dev;
-	lp->mii_if.mdio_read = r6040_mdio_read;
-	lp->mii_if.mdio_write = r6040_mdio_write;
-	lp->mii_if.phy_id = lp->phy_addr;
-	lp->mii_if.phy_id_mask = 0x1f;
-	lp->mii_if.reg_num_mask = 0x1f;
-
-	/* Check the vendor ID on the PHY, if 0xffff assume none attached */
-	if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
-		dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
-		err = -ENODEV;
+
+	lp->mii_bus = mdiobus_alloc();
+	if (!lp->mii_bus) {
+		dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
 		goto err_out_unmap;
 		goto err_out_unmap;
 	}
 	}
 
 
+	lp->mii_bus->priv = dev;
+	lp->mii_bus->read = r6040_mdiobus_read;
+	lp->mii_bus->write = r6040_mdiobus_write;
+	lp->mii_bus->reset = r6040_mdiobus_reset;
+	lp->mii_bus->name = "r6040_eth_mii";
+	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
+	lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+	if (!lp->mii_bus->irq) {
+		dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
+		goto err_out_mdio;
+	}
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		lp->mii_bus->irq[i] = PHY_POLL;
+
+	err = mdiobus_register(lp->mii_bus);
+	if (err) {
+		dev_err(&pdev->dev, "failed to register MII bus\n");
+		goto err_out_mdio_irq;
+	}
+
+	err = r6040_mii_probe(dev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to probe MII bus\n");
+		goto err_out_mdio_unregister;
+	}
+
 	/* Register net device. After this dev->name assign */
 	/* Register net device. After this dev->name assign */
 	err = register_netdev(dev);
 	err = register_netdev(dev);
 	if (err) {
 	if (err) {
 		dev_err(&pdev->dev, "Failed to register net device\n");
 		dev_err(&pdev->dev, "Failed to register net device\n");
-		goto err_out_unmap;
+		goto err_out_mdio_unregister;
 	}
 	}
 	return 0;
 	return 0;
 
 
+err_out_mdio_unregister:
+	mdiobus_unregister(lp->mii_bus);
+err_out_mdio_irq:
+	kfree(lp->mii_bus->irq);
+err_out_mdio:
+	mdiobus_free(lp->mii_bus);
 err_out_unmap:
 err_out_unmap:
 	pci_iounmap(pdev, ioaddr);
 	pci_iounmap(pdev, ioaddr);
 err_out_free_res:
 err_out_free_res:
@@ -1206,8 +1192,12 @@ err_out:
 static void __devexit r6040_remove_one(struct pci_dev *pdev)
 static void __devexit r6040_remove_one(struct pci_dev *pdev)
 {
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct net_device *dev = pci_get_drvdata(pdev);
+	struct r6040_private *lp = netdev_priv(dev);
 
 
 	unregister_netdev(dev);
 	unregister_netdev(dev);
+	mdiobus_unregister(lp->mii_bus);
+	kfree(lp->mii_bus->irq);
+	mdiobus_free(lp->mii_bus);
 	pci_release_regions(pdev);
 	pci_release_regions(pdev);
 	free_netdev(dev);
 	free_netdev(dev);
 	pci_disable_device(pdev);
 	pci_disable_device(pdev);

+ 11 - 0
drivers/net/r8169.c

@@ -559,6 +559,11 @@ static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
 			break;
 			break;
 		udelay(25);
 		udelay(25);
 	}
 	}
+	/*
+	 * According to hardware specs a 20us delay is required after write
+	 * complete indication, but before sending next command.
+	 */
+	udelay(20);
 }
 }
 
 
 static int mdio_read(void __iomem *ioaddr, int reg_addr)
 static int mdio_read(void __iomem *ioaddr, int reg_addr)
@@ -578,6 +583,12 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
 		}
 		}
 		udelay(25);
 		udelay(25);
 	}
 	}
+	/*
+	 * According to hardware specs a 20us delay is required after read
+	 * complete indication, but before sending next command.
+	 */
+	udelay(20);
+
 	return value;
 	return value;
 }
 }
 
 

+ 29 - 46
drivers/net/sfc/efx.c

@@ -27,6 +27,7 @@
 #include "nic.h"
 #include "nic.h"
 
 
 #include "mcdi.h"
 #include "mcdi.h"
+#include "workarounds.h"
 
 
 /**************************************************************************
 /**************************************************************************
  *
  *
@@ -92,13 +93,6 @@ const char *efx_reset_type_names[] = {
 
 
 #define EFX_MAX_MTU (9 * 1024)
 #define EFX_MAX_MTU (9 * 1024)
 
 
-/* RX slow fill workqueue. If memory allocation fails in the fast path,
- * a work item is pushed onto this work queue to retry the allocation later,
- * to avoid the NIC being starved of RX buffers. Since this is a per cpu
- * workqueue, there is nothing to be gained in making it per NIC
- */
-static struct workqueue_struct *refill_workqueue;
-
 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
  * queued onto this work queue. This is not a per-nic work queue, because
  * queued onto this work queue. This is not a per-nic work queue, because
  * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -475,7 +469,8 @@ static void efx_init_channels(struct efx_nic *efx)
 	efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
 	efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
 			      EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
 			      EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
 			      efx->type->rx_buffer_padding);
 			      efx->type->rx_buffer_padding);
-	efx->rx_buffer_order = get_order(efx->rx_buffer_len);
+	efx->rx_buffer_order = get_order(efx->rx_buffer_len +
+					 sizeof(struct efx_rx_page_state));
 
 
 	/* Initialise the channels */
 	/* Initialise the channels */
 	efx_for_each_channel(channel, efx) {
 	efx_for_each_channel(channel, efx) {
@@ -515,11 +510,11 @@ static void efx_start_channel(struct efx_channel *channel)
 	channel->enabled = true;
 	channel->enabled = true;
 	smp_wmb();
 	smp_wmb();
 
 
-	napi_enable(&channel->napi_str);
-
-	/* Load up RX descriptors */
+	/* Fill the queues before enabling NAPI */
 	efx_for_each_channel_rx_queue(rx_queue, channel)
 	efx_for_each_channel_rx_queue(rx_queue, channel)
 		efx_fast_push_rx_descriptors(rx_queue);
 		efx_fast_push_rx_descriptors(rx_queue);
+
+	napi_enable(&channel->napi_str);
 }
 }
 
 
 /* This disables event queue processing and packet transmission.
 /* This disables event queue processing and packet transmission.
@@ -528,8 +523,6 @@ static void efx_start_channel(struct efx_channel *channel)
  */
  */
 static void efx_stop_channel(struct efx_channel *channel)
 static void efx_stop_channel(struct efx_channel *channel)
 {
 {
-	struct efx_rx_queue *rx_queue;
-
 	if (!channel->enabled)
 	if (!channel->enabled)
 		return;
 		return;
 
 
@@ -537,12 +530,6 @@ static void efx_stop_channel(struct efx_channel *channel)
 
 
 	channel->enabled = false;
 	channel->enabled = false;
 	napi_disable(&channel->napi_str);
 	napi_disable(&channel->napi_str);
-
-	/* Ensure that any worker threads have exited or will be no-ops */
-	efx_for_each_channel_rx_queue(rx_queue, channel) {
-		spin_lock_bh(&rx_queue->add_lock);
-		spin_unlock_bh(&rx_queue->add_lock);
-	}
 }
 }
 
 
 static void efx_fini_channels(struct efx_nic *efx)
 static void efx_fini_channels(struct efx_nic *efx)
@@ -556,10 +543,18 @@ static void efx_fini_channels(struct efx_nic *efx)
 	BUG_ON(efx->port_enabled);
 	BUG_ON(efx->port_enabled);
 
 
 	rc = efx_nic_flush_queues(efx);
 	rc = efx_nic_flush_queues(efx);
-	if (rc)
+	if (rc && EFX_WORKAROUND_7803(efx)) {
+		/* Schedule a reset to recover from the flush failure. The
+		 * descriptor caches reference memory we're about to free,
+		 * but falcon_reconfigure_mac_wrapper() won't reconnect
+		 * the MACs because of the pending reset. */
+		EFX_ERR(efx, "Resetting to recover from flush failure\n");
+		efx_schedule_reset(efx, RESET_TYPE_ALL);
+	} else if (rc) {
 		EFX_ERR(efx, "failed to flush queues\n");
 		EFX_ERR(efx, "failed to flush queues\n");
-	else
+	} else {
 		EFX_LOG(efx, "successfully flushed all queues\n");
 		EFX_LOG(efx, "successfully flushed all queues\n");
+	}
 
 
 	efx_for_each_channel(channel, efx) {
 	efx_for_each_channel(channel, efx) {
 		EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
 		EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
@@ -586,9 +581,9 @@ static void efx_remove_channel(struct efx_channel *channel)
 	efx_remove_eventq(channel);
 	efx_remove_eventq(channel);
 }
 }
 
 
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
 {
 {
-	queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
+	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
 }
 }
 
 
 /**************************************************************************
 /**************************************************************************
@@ -1233,15 +1228,8 @@ static void efx_start_all(struct efx_nic *efx)
  * since we're holding the rtnl_lock at this point. */
  * since we're holding the rtnl_lock at this point. */
 static void efx_flush_all(struct efx_nic *efx)
 static void efx_flush_all(struct efx_nic *efx)
 {
 {
-	struct efx_rx_queue *rx_queue;
-
 	/* Make sure the hardware monitor is stopped */
 	/* Make sure the hardware monitor is stopped */
 	cancel_delayed_work_sync(&efx->monitor_work);
 	cancel_delayed_work_sync(&efx->monitor_work);
-
-	/* Ensure that all RX slow refills are complete. */
-	efx_for_each_rx_queue(rx_queue, efx)
-		cancel_delayed_work_sync(&rx_queue->work);
-
 	/* Stop scheduled port reconfigurations */
 	/* Stop scheduled port reconfigurations */
 	cancel_work_sync(&efx->mac_work);
 	cancel_work_sync(&efx->mac_work);
 }
 }
@@ -1504,11 +1492,11 @@ static int efx_net_stop(struct net_device *net_dev)
 }
 }
 
 
 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
+static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev)
 {
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
 	struct efx_mac_stats *mac_stats = &efx->mac_stats;
-	struct net_device_stats *stats = &net_dev->stats;
+	struct rtnl_link_stats64 *stats = &net_dev->stats64;
 
 
 	spin_lock_bh(&efx->stats_lock);
 	spin_lock_bh(&efx->stats_lock);
 	efx->type->update_stats(efx);
 	efx->type->update_stats(efx);
@@ -1530,11 +1518,8 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
 	stats->tx_window_errors = mac_stats->tx_late_collision;
 	stats->tx_window_errors = mac_stats->tx_late_collision;
 
 
 	stats->rx_errors = (stats->rx_length_errors +
 	stats->rx_errors = (stats->rx_length_errors +
-			    stats->rx_over_errors +
 			    stats->rx_crc_errors +
 			    stats->rx_crc_errors +
 			    stats->rx_frame_errors +
 			    stats->rx_frame_errors +
-			    stats->rx_fifo_errors +
-			    stats->rx_missed_errors +
 			    mac_stats->rx_symbol_error);
 			    mac_stats->rx_symbol_error);
 	stats->tx_errors = (stats->tx_window_errors +
 	stats->tx_errors = (stats->tx_window_errors +
 			    mac_stats->tx_bad);
 			    mac_stats->tx_bad);
@@ -1645,7 +1630,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
 static const struct net_device_ops efx_netdev_ops = {
 static const struct net_device_ops efx_netdev_ops = {
 	.ndo_open		= efx_net_open,
 	.ndo_open		= efx_net_open,
 	.ndo_stop		= efx_net_stop,
 	.ndo_stop		= efx_net_stop,
-	.ndo_get_stats		= efx_net_stats,
+	.ndo_get_stats64	= efx_net_stats,
 	.ndo_tx_timeout		= efx_watchdog,
 	.ndo_tx_timeout		= efx_watchdog,
 	.ndo_start_xmit		= efx_hard_start_xmit,
 	.ndo_start_xmit		= efx_hard_start_xmit,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_validate_addr	= eth_validate_addr,
@@ -1886,6 +1871,9 @@ static void efx_reset_work(struct work_struct *data)
 {
 {
 	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
 	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
 
 
+	if (efx->reset_pending == RESET_TYPE_NONE)
+		return;
+
 	/* If we're not RUNNING then don't reset. Leave the reset_pending
 	/* If we're not RUNNING then don't reset. Leave the reset_pending
 	 * flag set so that efx_pci_probe_main will be retried */
 	 * flag set so that efx_pci_probe_main will be retried */
 	if (efx->state != STATE_RUNNING) {
 	if (efx->state != STATE_RUNNING) {
@@ -2052,8 +2040,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
 		rx_queue->queue = i;
 		rx_queue->queue = i;
 		rx_queue->channel = &efx->channel[0]; /* for safety */
 		rx_queue->channel = &efx->channel[0]; /* for safety */
 		rx_queue->buffer = NULL;
 		rx_queue->buffer = NULL;
-		spin_lock_init(&rx_queue->add_lock);
-		INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
+		setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+			    (unsigned long)rx_queue);
 	}
 	}
 
 
 	efx->type = type;
 	efx->type = type;
@@ -2332,6 +2320,9 @@ static int efx_pm_thaw(struct device *dev)
 
 
 	efx->type->resume_wol(efx);
 	efx->type->resume_wol(efx);
 
 
+	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+	queue_work(reset_workqueue, &efx->reset_work);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2421,11 +2412,6 @@ static int __init efx_init_module(void)
 	if (rc)
 	if (rc)
 		goto err_notifier;
 		goto err_notifier;
 
 
-	refill_workqueue = create_workqueue("sfc_refill");
-	if (!refill_workqueue) {
-		rc = -ENOMEM;
-		goto err_refill;
-	}
 	reset_workqueue = create_singlethread_workqueue("sfc_reset");
 	reset_workqueue = create_singlethread_workqueue("sfc_reset");
 	if (!reset_workqueue) {
 	if (!reset_workqueue) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
@@ -2441,8 +2427,6 @@ static int __init efx_init_module(void)
  err_pci:
  err_pci:
 	destroy_workqueue(reset_workqueue);
 	destroy_workqueue(reset_workqueue);
  err_reset:
  err_reset:
-	destroy_workqueue(refill_workqueue);
- err_refill:
 	unregister_netdevice_notifier(&efx_netdev_notifier);
 	unregister_netdevice_notifier(&efx_netdev_notifier);
  err_notifier:
  err_notifier:
 	return rc;
 	return rc;
@@ -2454,7 +2438,6 @@ static void __exit efx_exit_module(void)
 
 
 	pci_unregister_driver(&efx_pci_driver);
 	pci_unregister_driver(&efx_pci_driver);
 	destroy_workqueue(reset_workqueue);
 	destroy_workqueue(reset_workqueue);
-	destroy_workqueue(refill_workqueue);
 	unregister_netdevice_notifier(&efx_netdev_notifier);
 	unregister_netdevice_notifier(&efx_netdev_notifier);
 
 
 }
 }

+ 2 - 2
drivers/net/sfc/efx.h

@@ -47,12 +47,12 @@ extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
 extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
 extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
 extern void efx_rx_strategy(struct efx_channel *channel);
 extern void efx_rx_strategy(struct efx_channel *channel);
 extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
 extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_work(struct work_struct *data);
+extern void efx_rx_slow_fill(unsigned long context);
 extern void __efx_rx_packet(struct efx_channel *channel,
 extern void __efx_rx_packet(struct efx_channel *channel,
 			    struct efx_rx_buffer *rx_buf, bool checksummed);
 			    struct efx_rx_buffer *rx_buf, bool checksummed);
 extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 			  unsigned int len, bool checksummed, bool discard);
 			  unsigned int len, bool checksummed, bool discard);
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
+extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 #define EFX_RXQ_SIZE 1024
 #define EFX_RXQ_SIZE 1024
 #define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
 #define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
 
 

+ 5 - 3
drivers/net/sfc/falcon.c

@@ -548,7 +548,9 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
 {
 {
 	struct efx_link_state *link_state = &efx->link_state;
 	struct efx_link_state *link_state = &efx->link_state;
 	efx_oword_t reg;
 	efx_oword_t reg;
-	int link_speed;
+	int link_speed, isolate;
+
+	isolate = (efx->reset_pending != RESET_TYPE_NONE);
 
 
 	switch (link_state->speed) {
 	switch (link_state->speed) {
 	case 10000: link_speed = 3; break;
 	case 10000: link_speed = 3; break;
@@ -570,7 +572,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
 	 * discarded. */
 	 * discarded. */
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
 		EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
 		EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
-				    !link_state->up);
+				    !link_state->up || isolate);
 	}
 	}
 
 
 	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
 	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
@@ -584,7 +586,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
 	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
 	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
 	/* Unisolate the MAC -> RX */
 	/* Unisolate the MAC -> RX */
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
-		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
 	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
 	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
 }
 }
 
 

+ 11 - 10
drivers/net/sfc/mcdi_phy.c

@@ -20,7 +20,7 @@
 #include "nic.h"
 #include "nic.h"
 #include "selftest.h"
 #include "selftest.h"
 
 
-struct efx_mcdi_phy_cfg {
+struct efx_mcdi_phy_data {
 	u32 flags;
 	u32 flags;
 	u32 type;
 	u32 type;
 	u32 supported_cap;
 	u32 supported_cap;
@@ -35,7 +35,7 @@ struct efx_mcdi_phy_cfg {
 };
 };
 
 
 static int
 static int
-efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
+efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
 {
 {
 	u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
 	u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
 	size_t outlen;
 	size_t outlen;
@@ -259,7 +259,7 @@ static u32 ethtool_to_mcdi_cap(u32 cap)
 
 
 static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
 static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
 {
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	enum efx_phy_mode mode, supported;
 	enum efx_phy_mode mode, supported;
 	u32 flags;
 	u32 flags;
 
 
@@ -307,7 +307,7 @@ static u32 mcdi_to_ethtool_media(u32 media)
 
 
 static int efx_mcdi_phy_probe(struct efx_nic *efx)
 static int efx_mcdi_phy_probe(struct efx_nic *efx)
 {
 {
-	struct efx_mcdi_phy_cfg *phy_data;
+	struct efx_mcdi_phy_data *phy_data;
 	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
 	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
 	u32 caps;
 	u32 caps;
 	int rc;
 	int rc;
@@ -395,6 +395,7 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
 	efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
 	efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
 	if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
 	if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
 		efx->wanted_fc |= EFX_FC_AUTO;
 		efx->wanted_fc |= EFX_FC_AUTO;
+	efx_link_set_wanted_fc(efx, efx->wanted_fc);
 
 
 	return 0;
 	return 0;
 
 
@@ -405,7 +406,7 @@ fail:
 
 
 int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
 int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
 {
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 caps = (efx->link_advertising ?
 	u32 caps = (efx->link_advertising ?
 		    ethtool_to_mcdi_cap(efx->link_advertising) :
 		    ethtool_to_mcdi_cap(efx->link_advertising) :
 		    phy_cfg->forced_cap);
 		    phy_cfg->forced_cap);
@@ -446,7 +447,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
  */
  */
 void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
 void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
 {
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 rmtadv;
 	u32 rmtadv;
 
 
 	/* The link partner capabilities are only relevent if the
 	/* The link partner capabilities are only relevent if the
@@ -505,7 +506,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
 
 
 static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 {
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
 	u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
 	int rc;
 	int rc;
 
 
@@ -535,7 +536,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
 
 
 static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 {
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 caps;
 	u32 caps;
 	int rc;
 	int rc;
 
 
@@ -674,7 +675,7 @@ out:
 static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
 static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
 				  unsigned flags)
 				  unsigned flags)
 {
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 mode;
 	u32 mode;
 	int rc;
 	int rc;
 
 
@@ -712,7 +713,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
 
 
 const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
 const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
 {
 {
-	struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 
 
 	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
 	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
 		if (index == 0)
 		if (index == 0)

+ 26 - 20
drivers/net/sfc/net_driver.h

@@ -18,6 +18,7 @@
 #include <linux/etherdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
 #include <linux/if_vlan.h>
+#include <linux/timer.h>
 #include <linux/mdio.h>
 #include <linux/mdio.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
@@ -221,7 +222,6 @@ struct efx_tx_queue {
  *	If both this and skb are %NULL, the buffer slot is currently free.
  *	If both this and skb are %NULL, the buffer slot is currently free.
  * @data: Pointer to ethernet header
  * @data: Pointer to ethernet header
  * @len: Buffer length, in bytes.
  * @len: Buffer length, in bytes.
- * @unmap_addr: DMA address to unmap
  */
  */
 struct efx_rx_buffer {
 struct efx_rx_buffer {
 	dma_addr_t dma_addr;
 	dma_addr_t dma_addr;
@@ -229,7 +229,24 @@ struct efx_rx_buffer {
 	struct page *page;
 	struct page *page;
 	char *data;
 	char *data;
 	unsigned int len;
 	unsigned int len;
-	dma_addr_t unmap_addr;
+};
+
+/**
+ * struct efx_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @refcnt: Number of struct efx_rx_buffer's referencing this page.
+ *	When refcnt falls to zero, the page is unmapped for dma
+ * @dma_addr: The dma address of this page.
+ */
+struct efx_rx_page_state {
+	unsigned refcnt;
+	dma_addr_t dma_addr;
+
+	unsigned int __pad[0] ____cacheline_aligned;
 };
 };
 
 
 /**
 /**
@@ -242,10 +259,6 @@ struct efx_rx_buffer {
  * @added_count: Number of buffers added to the receive queue.
  * @added_count: Number of buffers added to the receive queue.
  * @notified_count: Number of buffers given to NIC (<= @added_count).
  * @notified_count: Number of buffers given to NIC (<= @added_count).
  * @removed_count: Number of buffers removed from the receive queue.
  * @removed_count: Number of buffers removed from the receive queue.
- * @add_lock: Receive queue descriptor add spin lock.
- *	This lock must be held in order to add buffers to the RX
- *	descriptor ring (rxd and buffer) and to update added_count (but
- *	not removed_count).
  * @max_fill: RX descriptor maximum fill level (<= ring size)
  * @max_fill: RX descriptor maximum fill level (<= ring size)
  * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
  * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
  *	(<= @max_fill)
  *	(<= @max_fill)
@@ -259,12 +272,7 @@ struct efx_rx_buffer {
  *	overflow was observed.  It should never be set.
  *	overflow was observed.  It should never be set.
  * @alloc_page_count: RX allocation strategy counter.
  * @alloc_page_count: RX allocation strategy counter.
  * @alloc_skb_count: RX allocation strategy counter.
  * @alloc_skb_count: RX allocation strategy counter.
- * @work: Descriptor push work thread
- * @buf_page: Page for next RX buffer.
- *	We can use a single page for multiple RX buffers. This tracks
- *	the remaining space in the allocation.
- * @buf_dma_addr: Page's DMA address.
- * @buf_data: Page's host address.
+ * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
  * @flushed: Use when handling queue flushing
  * @flushed: Use when handling queue flushing
  */
  */
 struct efx_rx_queue {
 struct efx_rx_queue {
@@ -277,7 +285,6 @@ struct efx_rx_queue {
 	int added_count;
 	int added_count;
 	int notified_count;
 	int notified_count;
 	int removed_count;
 	int removed_count;
-	spinlock_t add_lock;
 	unsigned int max_fill;
 	unsigned int max_fill;
 	unsigned int fast_fill_trigger;
 	unsigned int fast_fill_trigger;
 	unsigned int fast_fill_limit;
 	unsigned int fast_fill_limit;
@@ -285,12 +292,9 @@ struct efx_rx_queue {
 	unsigned int min_overfill;
 	unsigned int min_overfill;
 	unsigned int alloc_page_count;
 	unsigned int alloc_page_count;
 	unsigned int alloc_skb_count;
 	unsigned int alloc_skb_count;
-	struct delayed_work work;
+	struct timer_list slow_fill;
 	unsigned int slow_fill_count;
 	unsigned int slow_fill_count;
 
 
-	struct page *buf_page;
-	dma_addr_t buf_dma_addr;
-	char *buf_data;
 	enum efx_flush_state flushed;
 	enum efx_flush_state flushed;
 };
 };
 
 
@@ -336,7 +340,7 @@ enum efx_rx_alloc_method {
  * @eventq: Event queue buffer
  * @eventq: Event queue buffer
  * @eventq_read_ptr: Event queue read pointer
  * @eventq_read_ptr: Event queue read pointer
  * @last_eventq_read_ptr: Last event queue read pointer value.
  * @last_eventq_read_ptr: Last event queue read pointer value.
- * @eventq_magic: Event queue magic value for driver-generated test events
+ * @magic_count: Event queue test event count
  * @irq_count: Number of IRQs since last adaptive moderation decision
  * @irq_count: Number of IRQs since last adaptive moderation decision
  * @irq_mod_score: IRQ moderation score
  * @irq_mod_score: IRQ moderation score
  * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
  * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -367,7 +371,7 @@ struct efx_channel {
 	struct efx_special_buffer eventq;
 	struct efx_special_buffer eventq;
 	unsigned int eventq_read_ptr;
 	unsigned int eventq_read_ptr;
 	unsigned int last_eventq_read_ptr;
 	unsigned int last_eventq_read_ptr;
-	unsigned int eventq_magic;
+	unsigned int magic_count;
 
 
 	unsigned int irq_count;
 	unsigned int irq_count;
 	unsigned int irq_mod_score;
 	unsigned int irq_mod_score;
@@ -645,6 +649,7 @@ union efx_multicast_hash {
  * struct efx_nic - an Efx NIC
  * struct efx_nic - an Efx NIC
  * @name: Device name (net device name or bus id before net device registered)
  * @name: Device name (net device name or bus id before net device registered)
  * @pci_dev: The PCI device
  * @pci_dev: The PCI device
+ * @port_num: Index of this host port within the controller
  * @type: Controller type attributes
  * @type: Controller type attributes
  * @legacy_irq: IRQ number
  * @legacy_irq: IRQ number
  * @workqueue: Workqueue for port reconfigures and the HW monitor.
  * @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -728,6 +733,7 @@ union efx_multicast_hash {
 struct efx_nic {
 struct efx_nic {
 	char name[IFNAMSIZ];
 	char name[IFNAMSIZ];
 	struct pci_dev *pci_dev;
 	struct pci_dev *pci_dev;
+	unsigned port_num;
 	const struct efx_nic_type *type;
 	const struct efx_nic_type *type;
 	int legacy_irq;
 	int legacy_irq;
 	struct workqueue_struct *workqueue;
 	struct workqueue_struct *workqueue;
@@ -830,7 +836,7 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
 
 
 static inline unsigned int efx_port_num(struct efx_nic *efx)
 static inline unsigned int efx_port_num(struct efx_nic *efx)
 {
 {
-	return PCI_FUNC(efx->pci_dev->devfn);
+	return efx->net_dev->dev_id;
 }
 }
 
 
 /**
 /**

+ 42 - 13
drivers/net/sfc/nic.c

@@ -79,6 +79,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
 /* Depth of RX flush request fifo */
 /* Depth of RX flush request fifo */
 #define EFX_RX_FLUSH_COUNT 4
 #define EFX_RX_FLUSH_COUNT 4
 
 
+/* Generated event code for efx_generate_test_event() */
+#define EFX_CHANNEL_MAGIC_TEST(_channel)	\
+	(0x00010100 + (_channel)->channel)
+
+/* Generated event code for efx_generate_fill_event() */
+#define EFX_CHANNEL_MAGIC_FILL(_channel)	\
+	(0x00010200 + (_channel)->channel)
+
 /**************************************************************************
 /**************************************************************************
  *
  *
  * Solarstorm hardware access
  * Solarstorm hardware access
@@ -850,6 +858,26 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
 		      checksummed, discard);
 		      checksummed, discard);
 }
 }
 
 
+static void
+efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
+{
+	struct efx_nic *efx = channel->efx;
+	unsigned code;
+
+	code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+	if (code == EFX_CHANNEL_MAGIC_TEST(channel))
+		++channel->magic_count;
+	else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
+		/* The queue must be empty, so we won't receive any rx
+		 * events, so efx_process_channel() won't refill the
+		 * queue. Refill it here */
+		efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
+	else
+		EFX_LOG(efx, "channel %d received generated "
+			"event "EFX_QWORD_FMT"\n", channel->channel,
+			EFX_QWORD_VAL(*event));
+}
+
 /* Global events are basically PHY events */
 /* Global events are basically PHY events */
 static void
 static void
 efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
 efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
@@ -993,11 +1021,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
 			}
 			}
 			break;
 			break;
 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
-			channel->eventq_magic = EFX_QWORD_FIELD(
-				event, FSF_AZ_DRV_GEN_EV_MAGIC);
-			EFX_LOG(channel->efx, "channel %d received generated "
-				"event "EFX_QWORD_FMT"\n", channel->channel,
-				EFX_QWORD_VAL(event));
+			efx_handle_generated_event(channel, &event);
 			break;
 			break;
 		case FSE_AZ_EV_CODE_GLOBAL_EV:
 		case FSE_AZ_EV_CODE_GLOBAL_EV:
 			efx_handle_global_event(channel, &event);
 			efx_handle_global_event(channel, &event);
@@ -1088,12 +1112,20 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
 }
 }
 
 
 
 
-/* Generates a test event on the event queue.  A subsequent call to
- * process_eventq() should pick up the event and place the value of
- * "magic" into channel->eventq_magic;
- */
-void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
+void efx_nic_generate_test_event(struct efx_channel *channel)
 {
 {
+	unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
+	efx_qword_t test_event;
+
+	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+			     FSE_AZ_EV_CODE_DRV_GEN_EV,
+			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+	efx_generate_event(channel, &test_event);
+}
+
+void efx_nic_generate_fill_event(struct efx_channel *channel)
+{
+	unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
 	efx_qword_t test_event;
 	efx_qword_t test_event;
 
 
 	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
 	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
@@ -1219,9 +1251,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
 		rx_queue->flushed = FLUSH_DONE;
 		rx_queue->flushed = FLUSH_DONE;
 	}
 	}
 
 
-	if (EFX_WORKAROUND_7803(efx))
-		return 0;
-
 	return -ETIMEDOUT;
 	return -ETIMEDOUT;
 }
 }
 
 

+ 2 - 2
drivers/net/sfc/nic.h

@@ -190,8 +190,8 @@ extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
 /* Interrupts and test events */
 /* Interrupts and test events */
 extern int efx_nic_init_interrupt(struct efx_nic *efx);
 extern int efx_nic_init_interrupt(struct efx_nic *efx);
 extern void efx_nic_enable_interrupts(struct efx_nic *efx);
 extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_generate_test_event(struct efx_channel *channel,
-					unsigned int magic);
+extern void efx_nic_generate_test_event(struct efx_channel *channel);
+extern void efx_nic_generate_fill_event(struct efx_channel *channel);
 extern void efx_nic_generate_interrupt(struct efx_nic *efx);
 extern void efx_nic_generate_interrupt(struct efx_nic *efx);
 extern void efx_nic_disable_interrupts(struct efx_nic *efx);
 extern void efx_nic_disable_interrupts(struct efx_nic *efx);
 extern void efx_nic_fini_interrupt(struct efx_nic *efx);
 extern void efx_nic_fini_interrupt(struct efx_nic *efx);

+ 186 - 207
drivers/net/sfc/rx.c

@@ -25,6 +25,9 @@
 /* Number of RX descriptors pushed at once. */
 /* Number of RX descriptors pushed at once. */
 #define EFX_RX_BATCH  8
 #define EFX_RX_BATCH  8
 
 
+/* Maximum size of a buffer sharing a page */
+#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+
 /* Size of buffer allocated for skb header area. */
 /* Size of buffer allocated for skb header area. */
 #define EFX_SKB_HEADERS  64u
 #define EFX_SKB_HEADERS  64u
 
 
@@ -98,155 +101,138 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
 	return PAGE_SIZE << efx->rx_buffer_order;
 	return PAGE_SIZE << efx->rx_buffer_order;
 }
 }
 
 
-
 /**
 /**
- * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
+ * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
  *
  *
  * @rx_queue:		Efx RX queue
  * @rx_queue:		Efx RX queue
- * @rx_buf:		RX buffer structure to populate
  *
  *
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information.  Return a negative error code or 0 on success.
+ * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
+ * struct efx_rx_buffer for each one. Return a negative error code or 0
+ * on success. May fail having only inserted fewer than EFX_RX_BATCH
+ * buffers.
  */
  */
-static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
-				  struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
 {
 {
 	struct efx_nic *efx = rx_queue->efx;
 	struct efx_nic *efx = rx_queue->efx;
 	struct net_device *net_dev = efx->net_dev;
 	struct net_device *net_dev = efx->net_dev;
+	struct efx_rx_buffer *rx_buf;
 	int skb_len = efx->rx_buffer_len;
 	int skb_len = efx->rx_buffer_len;
+	unsigned index, count;
 
 
-	rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
-	if (unlikely(!rx_buf->skb))
-		return -ENOMEM;
+	for (count = 0; count < EFX_RX_BATCH; ++count) {
+		index = rx_queue->added_count & EFX_RXQ_MASK;
+		rx_buf = efx_rx_buffer(rx_queue, index);
 
 
-	/* Adjust the SKB for padding and checksum */
-	skb_reserve(rx_buf->skb, NET_IP_ALIGN);
-	rx_buf->len = skb_len - NET_IP_ALIGN;
-	rx_buf->data = (char *)rx_buf->skb->data;
-	rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+		rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
+		if (unlikely(!rx_buf->skb))
+			return -ENOMEM;
+		rx_buf->page = NULL;
 
 
-	rx_buf->dma_addr = pci_map_single(efx->pci_dev,
-					  rx_buf->data, rx_buf->len,
-					  PCI_DMA_FROMDEVICE);
+		/* Adjust the SKB for padding and checksum */
+		skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+		rx_buf->len = skb_len - NET_IP_ALIGN;
+		rx_buf->data = (char *)rx_buf->skb->data;
+		rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+						  rx_buf->data, rx_buf->len,
+						  PCI_DMA_FROMDEVICE);
+		if (unlikely(pci_dma_mapping_error(efx->pci_dev,
+						   rx_buf->dma_addr))) {
+			dev_kfree_skb_any(rx_buf->skb);
+			rx_buf->skb = NULL;
+			return -EIO;
+		}
 
 
-	if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
-		dev_kfree_skb_any(rx_buf->skb);
-		rx_buf->skb = NULL;
-		return -EIO;
+		++rx_queue->added_count;
+		++rx_queue->alloc_skb_count;
 	}
 	}
 
 
 	return 0;
 	return 0;
 }
 }
 
 
 /**
 /**
- * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
+ * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
  *
  *
  * @rx_queue:		Efx RX queue
  * @rx_queue:		Efx RX queue
- * @rx_buf:		RX buffer structure to populate
  *
  *
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information.  Return a negative error code or 0 on success.
+ * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
+ * and populates struct efx_rx_buffers for each one. Return a negative error
+ * code or 0 on success. If a single page can be split between two buffers,
+ * then the page will either be inserted fully, or not at at all.
  */
  */
-static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
-				   struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
 {
 {
 	struct efx_nic *efx = rx_queue->efx;
 	struct efx_nic *efx = rx_queue->efx;
-	int bytes, space, offset;
-
-	bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
-
-	/* If there is space left in the previously allocated page,
-	 * then use it. Otherwise allocate a new one */
-	rx_buf->page = rx_queue->buf_page;
-	if (rx_buf->page == NULL) {
-		dma_addr_t dma_addr;
-
-		rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
-					   efx->rx_buffer_order);
-		if (unlikely(rx_buf->page == NULL))
+	struct efx_rx_buffer *rx_buf;
+	struct page *page;
+	void *page_addr;
+	struct efx_rx_page_state *state;
+	dma_addr_t dma_addr;
+	unsigned index, count;
+
+	/* We can split a page between two buffers */
+	BUILD_BUG_ON(EFX_RX_BATCH & 1);
+
+	for (count = 0; count < EFX_RX_BATCH; ++count) {
+		page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+				   efx->rx_buffer_order);
+		if (unlikely(page == NULL))
 			return -ENOMEM;
 			return -ENOMEM;
-
-		dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
-					0, efx_rx_buf_size(efx),
+		dma_addr = pci_map_page(efx->pci_dev, page, 0,
+					efx_rx_buf_size(efx),
 					PCI_DMA_FROMDEVICE);
 					PCI_DMA_FROMDEVICE);
-
 		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
 		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
-			__free_pages(rx_buf->page, efx->rx_buffer_order);
-			rx_buf->page = NULL;
+			__free_pages(page, efx->rx_buffer_order);
 			return -EIO;
 			return -EIO;
 		}
 		}
-
-		rx_queue->buf_page = rx_buf->page;
-		rx_queue->buf_dma_addr = dma_addr;
-		rx_queue->buf_data = (page_address(rx_buf->page) +
-				      EFX_PAGE_IP_ALIGN);
-	}
-
-	rx_buf->len = bytes;
-	rx_buf->data = rx_queue->buf_data;
-	offset = efx_rx_buf_offset(rx_buf);
-	rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
-
-	/* Try to pack multiple buffers per page */
-	if (efx->rx_buffer_order == 0) {
-		/* The next buffer starts on the next 512 byte boundary */
-		rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
-		offset += ((bytes + 0x1ff) & ~0x1ff);
-
-		space = efx_rx_buf_size(efx) - offset;
-		if (space >= bytes) {
-			/* Refs dropped on kernel releasing each skb */
-			get_page(rx_queue->buf_page);
-			goto out;
+		page_addr = page_address(page);
+		state = page_addr;
+		state->refcnt = 0;
+		state->dma_addr = dma_addr;
+
+		page_addr += sizeof(struct efx_rx_page_state);
+		dma_addr += sizeof(struct efx_rx_page_state);
+
+	split:
+		index = rx_queue->added_count & EFX_RXQ_MASK;
+		rx_buf = efx_rx_buffer(rx_queue, index);
+		rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+		rx_buf->skb = NULL;
+		rx_buf->page = page;
+		rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+		++rx_queue->added_count;
+		++rx_queue->alloc_page_count;
+		++state->refcnt;
+
+		if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
+			/* Use the second half of the page */
+			get_page(page);
+			dma_addr += (PAGE_SIZE >> 1);
+			page_addr += (PAGE_SIZE >> 1);
+			++count;
+			goto split;
 		}
 		}
 	}
 	}
 
 
-	/* This is the final RX buffer for this page, so mark it for
-	 * unmapping */
-	rx_queue->buf_page = NULL;
-	rx_buf->unmap_addr = rx_queue->buf_dma_addr;
-
- out:
 	return 0;
 	return 0;
 }
 }
 
 
-/* This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information.
- */
-static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
-			      struct efx_rx_buffer *new_rx_buf)
-{
-	int rc = 0;
-
-	if (rx_queue->channel->rx_alloc_push_pages) {
-		new_rx_buf->skb = NULL;
-		rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
-		rx_queue->alloc_page_count++;
-	} else {
-		new_rx_buf->page = NULL;
-		rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
-		rx_queue->alloc_skb_count++;
-	}
-
-	if (unlikely(rc < 0))
-		EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
-			   rx_queue->queue, rc);
-	return rc;
-}
-
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
 				struct efx_rx_buffer *rx_buf)
 				struct efx_rx_buffer *rx_buf)
 {
 {
 	if (rx_buf->page) {
 	if (rx_buf->page) {
+		struct efx_rx_page_state *state;
+
 		EFX_BUG_ON_PARANOID(rx_buf->skb);
 		EFX_BUG_ON_PARANOID(rx_buf->skb);
-		if (rx_buf->unmap_addr) {
-			pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
+
+		state = page_address(rx_buf->page);
+		if (--state->refcnt == 0) {
+			pci_unmap_page(efx->pci_dev,
+				       state->dma_addr,
 				       efx_rx_buf_size(efx),
 				       efx_rx_buf_size(efx),
 				       PCI_DMA_FROMDEVICE);
 				       PCI_DMA_FROMDEVICE);
-			rx_buf->unmap_addr = 0;
 		}
 		}
 	} else if (likely(rx_buf->skb)) {
 	} else if (likely(rx_buf->skb)) {
 		pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
 		pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
@@ -273,31 +259,84 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
 	efx_free_rx_buffer(rx_queue->efx, rx_buf);
 	efx_free_rx_buffer(rx_queue->efx, rx_buf);
 }
 }
 
 
+/* Attempt to resurrect the other receive buffer that used to share this page,
+ * which had previously been passed up to the kernel and freed. */
+static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
+				    struct efx_rx_buffer *rx_buf)
+{
+	struct efx_rx_page_state *state = page_address(rx_buf->page);
+	struct efx_rx_buffer *new_buf;
+	unsigned fill_level, index;
+
+	/* +1 because efx_rx_packet() incremented removed_count. +1 because
+	 * we'd like to insert an additional descriptor whilst leaving
+	 * EFX_RXD_HEAD_ROOM for the non-recycle path */
+	fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
+	if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
+		/* We could place "state" on a list, and drain the list in
+		 * efx_fast_push_rx_descriptors(). For now, this will do. */
+		return;
+	}
+
+	++state->refcnt;
+	get_page(rx_buf->page);
+
+	index = rx_queue->added_count & EFX_RXQ_MASK;
+	new_buf = efx_rx_buffer(rx_queue, index);
+	new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
+	new_buf->skb = NULL;
+	new_buf->page = rx_buf->page;
+	new_buf->data = (void *)
+		((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+	new_buf->len = rx_buf->len;
+	++rx_queue->added_count;
+}
+
+/* Recycle the given rx buffer directly back into the rx_queue. There is
+ * always room to add this buffer, because we've just popped a buffer. */
+static void efx_recycle_rx_buffer(struct efx_channel *channel,
+				  struct efx_rx_buffer *rx_buf)
+{
+	struct efx_nic *efx = channel->efx;
+	struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
+	struct efx_rx_buffer *new_buf;
+	unsigned index;
+
+	if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+	    page_count(rx_buf->page) == 1)
+		efx_resurrect_rx_buffer(rx_queue, rx_buf);
+
+	index = rx_queue->added_count & EFX_RXQ_MASK;
+	new_buf = efx_rx_buffer(rx_queue, index);
+
+	memcpy(new_buf, rx_buf, sizeof(*new_buf));
+	rx_buf->page = NULL;
+	rx_buf->skb = NULL;
+	++rx_queue->added_count;
+}
+
 /**
 /**
  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
  * @rx_queue:		RX descriptor queue
  * @rx_queue:		RX descriptor queue
- * @retry:              Recheck the fill level
  * This will aim to fill the RX descriptor queue up to
  * This will aim to fill the RX descriptor queue up to
  * @rx_queue->@fast_fill_limit. If there is insufficient atomic
  * @rx_queue->@fast_fill_limit. If there is insufficient atomic
- * memory to do so, the caller should retry.
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
  */
  */
-static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
-					  int retry)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
 {
 {
-	struct efx_rx_buffer *rx_buf;
-	unsigned fill_level, index;
-	int i, space, rc = 0;
+	struct efx_channel *channel = rx_queue->channel;
+	unsigned fill_level;
+	int space, rc = 0;
 
 
-	/* Calculate current fill level.  Do this outside the lock,
-	 * because most of the time we'll end up not wanting to do the
-	 * fill anyway.
-	 */
+	/* Calculate current fill level, and exit if we don't need to fill */
 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
 	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
 	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
-
-	/* Don't fill if we don't need to */
 	if (fill_level >= rx_queue->fast_fill_trigger)
 	if (fill_level >= rx_queue->fast_fill_trigger)
-		return 0;
+		goto out;
 
 
 	/* Record minimum fill level */
 	/* Record minimum fill level */
 	if (unlikely(fill_level < rx_queue->min_fill)) {
 	if (unlikely(fill_level < rx_queue->min_fill)) {
@@ -305,34 +344,25 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
 			rx_queue->min_fill = fill_level;
 			rx_queue->min_fill = fill_level;
 	}
 	}
 
 
-	/* Acquire RX add lock.  If this lock is contended, then a fast
-	 * fill must already be in progress (e.g. in the refill
-	 * tasklet), so we don't need to do anything
-	 */
-	if (!spin_trylock_bh(&rx_queue->add_lock))
-		return -1;
-
- retry:
-	/* Recalculate current fill level now that we have the lock */
-	fill_level = (rx_queue->added_count - rx_queue->removed_count);
-	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
 	space = rx_queue->fast_fill_limit - fill_level;
 	space = rx_queue->fast_fill_limit - fill_level;
 	if (space < EFX_RX_BATCH)
 	if (space < EFX_RX_BATCH)
-		goto out_unlock;
+		goto out;
 
 
 	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
 	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
 		  " level %d to level %d using %s allocation\n",
 		  " level %d to level %d using %s allocation\n",
 		  rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
 		  rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
-		  rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
+		  channel->rx_alloc_push_pages ? "page" : "skb");
 
 
 	do {
 	do {
-		for (i = 0; i < EFX_RX_BATCH; ++i) {
-			index = rx_queue->added_count & EFX_RXQ_MASK;
-			rx_buf = efx_rx_buffer(rx_queue, index);
-			rc = efx_init_rx_buffer(rx_queue, rx_buf);
-			if (unlikely(rc))
-				goto out;
-			++rx_queue->added_count;
+		if (channel->rx_alloc_push_pages)
+			rc = efx_init_rx_buffers_page(rx_queue);
+		else
+			rc = efx_init_rx_buffers_skb(rx_queue);
+		if (unlikely(rc)) {
+			/* Ensure that we don't leave the rx queue empty */
+			if (rx_queue->added_count == rx_queue->removed_count)
+				efx_schedule_slow_fill(rx_queue);
+			goto out;
 		}
 		}
 	} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
 	} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
 
 
@@ -341,63 +371,18 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
 		  rx_queue->added_count - rx_queue->removed_count);
 		  rx_queue->added_count - rx_queue->removed_count);
 
 
  out:
  out:
-	/* Send write pointer to card. */
-	efx_nic_notify_rx_desc(rx_queue);
-
-	/* If the fast fill is running inside from the refill tasklet, then
-	 * for SMP systems it may be running on a different CPU to
-	 * RX event processing, which means that the fill level may now be
-	 * out of date. */
-	if (unlikely(retry && (rc == 0)))
-		goto retry;
-
- out_unlock:
-	spin_unlock_bh(&rx_queue->add_lock);
-
-	return rc;
-}
-
-/**
- * efx_fast_push_rx_descriptors - push new RX descriptors quickly
- * @rx_queue:		RX descriptor queue
- *
- * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@fast_fill_limit.  If there is insufficient memory to do so,
- * it will schedule a work item to immediately continue the fast fill
- */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
-{
-	int rc;
-
-	rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
-	if (unlikely(rc)) {
-		/* Schedule the work item to run immediately. The hope is
-		 * that work is immediately pending to free some memory
-		 * (e.g. an RX event or TX completion)
-		 */
-		efx_schedule_slow_fill(rx_queue, 0);
-	}
+	if (rx_queue->notified_count != rx_queue->added_count)
+		efx_nic_notify_rx_desc(rx_queue);
 }
 }
 
 
-void efx_rx_work(struct work_struct *data)
+void efx_rx_slow_fill(unsigned long context)
 {
 {
-	struct efx_rx_queue *rx_queue;
-	int rc;
-
-	rx_queue = container_of(data, struct efx_rx_queue, work.work);
-
-	if (unlikely(!rx_queue->channel->enabled))
-		return;
-
-	EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
-		  "%d\n", rx_queue->queue, raw_smp_processor_id());
+	struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
+	struct efx_channel *channel = rx_queue->channel;
 
 
+	/* Post an event to cause NAPI to run and refill the queue */
+	efx_nic_generate_fill_event(channel);
 	++rx_queue->slow_fill_count;
 	++rx_queue->slow_fill_count;
-	/* Push new RX descriptors, allowing at least 1 jiffy for
-	 * the kernel to free some more memory. */
-	rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
-	if (rc)
-		efx_schedule_slow_fill(rx_queue, 1);
 }
 }
 
 
 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
@@ -498,6 +483,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 		   unsigned int len, bool checksummed, bool discard)
 		   unsigned int len, bool checksummed, bool discard)
 {
 {
 	struct efx_nic *efx = rx_queue->efx;
 	struct efx_nic *efx = rx_queue->efx;
+	struct efx_channel *channel = rx_queue->channel;
 	struct efx_rx_buffer *rx_buf;
 	struct efx_rx_buffer *rx_buf;
 	bool leak_packet = false;
 	bool leak_packet = false;
 
 
@@ -525,12 +511,13 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 	/* Discard packet, if instructed to do so */
 	/* Discard packet, if instructed to do so */
 	if (unlikely(discard)) {
 	if (unlikely(discard)) {
 		if (unlikely(leak_packet))
 		if (unlikely(leak_packet))
-			rx_queue->channel->n_skbuff_leaks++;
+			channel->n_skbuff_leaks++;
 		else
 		else
-			/* We haven't called efx_unmap_rx_buffer yet,
-			 * so fini the entire rx_buffer here */
-			efx_fini_rx_buffer(rx_queue, rx_buf);
-		return;
+			efx_recycle_rx_buffer(channel, rx_buf);
+
+		/* Don't hold off the previous receive */
+		rx_buf = NULL;
+		goto out;
 	}
 	}
 
 
 	/* Release card resources - assumes all RX buffers consumed in-order
 	/* Release card resources - assumes all RX buffers consumed in-order
@@ -547,6 +534,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 	 * prefetched into cache.
 	 * prefetched into cache.
 	 */
 	 */
 	rx_buf->len = len;
 	rx_buf->len = len;
+out:
 	if (rx_queue->channel->rx_pkt)
 	if (rx_queue->channel->rx_pkt)
 		__efx_rx_packet(rx_queue->channel,
 		__efx_rx_packet(rx_queue->channel,
 				rx_queue->channel->rx_pkt,
 				rx_queue->channel->rx_pkt,
@@ -682,6 +670,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
 
 
 	EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
 	EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
 
 
+	del_timer_sync(&rx_queue->slow_fill);
 	efx_nic_fini_rx(rx_queue);
 	efx_nic_fini_rx(rx_queue);
 
 
 	/* Release RX buffers NB start at index 0 not current HW ptr */
 	/* Release RX buffers NB start at index 0 not current HW ptr */
@@ -691,16 +680,6 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
 			efx_fini_rx_buffer(rx_queue, rx_buf);
 			efx_fini_rx_buffer(rx_queue, rx_buf);
 		}
 		}
 	}
 	}
-
-	/* For a page that is part-way through splitting into RX buffers */
-	if (rx_queue->buf_page != NULL) {
-		pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
-			       efx_rx_buf_size(rx_queue->efx),
-			       PCI_DMA_FROMDEVICE);
-		__free_pages(rx_queue->buf_page,
-			     rx_queue->efx->rx_buffer_order);
-		rx_queue->buf_page = NULL;
-	}
 }
 }
 
 
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)

+ 13 - 15
drivers/net/sfc/selftest.c

@@ -38,7 +38,7 @@ struct efx_loopback_payload {
 	struct udphdr udp;
 	struct udphdr udp;
 	__be16 iteration;
 	__be16 iteration;
 	const char msg[64];
 	const char msg[64];
-} __attribute__ ((packed));
+} __packed;
 
 
 /* Loopback test source MAC address */
 /* Loopback test source MAC address */
 static const unsigned char payload_source[ETH_ALEN] = {
 static const unsigned char payload_source[ETH_ALEN] = {
@@ -161,23 +161,17 @@ static int efx_test_interrupts(struct efx_nic *efx,
 static int efx_test_eventq_irq(struct efx_channel *channel,
 static int efx_test_eventq_irq(struct efx_channel *channel,
 			       struct efx_self_tests *tests)
 			       struct efx_self_tests *tests)
 {
 {
-	unsigned int magic, count;
-
-	/* Channel specific code, limited to 20 bits */
-	magic = (0x00010150 + channel->channel);
-	EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
-		channel->channel, magic);
+	unsigned int magic_count, count;
 
 
 	tests->eventq_dma[channel->channel] = -1;
 	tests->eventq_dma[channel->channel] = -1;
 	tests->eventq_int[channel->channel] = -1;
 	tests->eventq_int[channel->channel] = -1;
 	tests->eventq_poll[channel->channel] = -1;
 	tests->eventq_poll[channel->channel] = -1;
 
 
-	/* Reset flag and zero magic word */
+	magic_count = channel->magic_count;
 	channel->efx->last_irq_cpu = -1;
 	channel->efx->last_irq_cpu = -1;
-	channel->eventq_magic = 0;
 	smp_wmb();
 	smp_wmb();
 
 
-	efx_nic_generate_test_event(channel, magic);
+	efx_nic_generate_test_event(channel);
 
 
 	/* Wait for arrival of interrupt */
 	/* Wait for arrival of interrupt */
 	count = 0;
 	count = 0;
@@ -187,7 +181,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
 		if (channel->work_pending)
 		if (channel->work_pending)
 			efx_process_channel_now(channel);
 			efx_process_channel_now(channel);
 
 
-		if (channel->eventq_magic == magic)
+		if (channel->magic_count != magic_count)
 			goto eventq_ok;
 			goto eventq_ok;
 	} while (++count < 2);
 	} while (++count < 2);
 
 
@@ -204,7 +198,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
 
 
 	/* Check to see if event was received even if interrupt wasn't */
 	/* Check to see if event was received even if interrupt wasn't */
 	efx_process_channel_now(channel);
 	efx_process_channel_now(channel);
-	if (channel->eventq_magic == magic) {
+	if (channel->magic_count != magic_count) {
 		EFX_ERR(channel->efx, "channel %d event was generated, but "
 		EFX_ERR(channel->efx, "channel %d event was generated, but "
 			"failed to trigger an interrupt\n", channel->channel);
 			"failed to trigger an interrupt\n", channel->channel);
 		tests->eventq_dma[channel->channel] = 1;
 		tests->eventq_dma[channel->channel] = 1;
@@ -545,7 +539,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
 static int efx_wait_for_link(struct efx_nic *efx)
 static int efx_wait_for_link(struct efx_nic *efx)
 {
 {
 	struct efx_link_state *link_state = &efx->link_state;
 	struct efx_link_state *link_state = &efx->link_state;
-	int count;
+	int count, link_up_count = 0;
 	bool link_up;
 	bool link_up;
 
 
 	for (count = 0; count < 40; count++) {
 	for (count = 0; count < 40; count++) {
@@ -567,8 +561,12 @@ static int efx_wait_for_link(struct efx_nic *efx)
 			link_up = !efx->mac_op->check_fault(efx);
 			link_up = !efx->mac_op->check_fault(efx);
 		mutex_unlock(&efx->mac_lock);
 		mutex_unlock(&efx->mac_lock);
 
 
-		if (link_up)
-			return 0;
+		if (link_up) {
+			if (++link_up_count == 2)
+				return 0;
+		} else {
+			link_up_count = 0;
+		}
 	}
 	}
 
 
 	return -ETIMEDOUT;
 	return -ETIMEDOUT;

+ 4 - 0
drivers/net/sfc/siena.c

@@ -206,6 +206,7 @@ static int siena_probe_nic(struct efx_nic *efx)
 {
 {
 	struct siena_nic_data *nic_data;
 	struct siena_nic_data *nic_data;
 	bool already_attached = 0;
 	bool already_attached = 0;
+	efx_oword_t reg;
 	int rc;
 	int rc;
 
 
 	/* Allocate storage for hardware specific data */
 	/* Allocate storage for hardware specific data */
@@ -220,6 +221,9 @@ static int siena_probe_nic(struct efx_nic *efx)
 		goto fail1;
 		goto fail1;
 	}
 	}
 
 
+	efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
+	efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
+
 	efx_mcdi_init(efx);
 	efx_mcdi_init(efx);
 
 
 	/* Recover from a failed assertion before probing */
 	/* Recover from a failed assertion before probing */

이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.