瀏覽代碼

Merge git://github.com/Jkirsher/net-next

David S. Miller 13 年之前
父節點
當前提交
fb7a6d4e7d

+ 2 - 0
drivers/net/ethernet/intel/ixgbe/ixgbe.h

@@ -128,6 +128,7 @@ struct vf_data_storage {
 	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
 	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
 	u16 pf_qos;
 	u16 pf_qos;
 	u16 tx_rate;
 	u16 tx_rate;
+	struct pci_dev *vfdev;
 };
 };
 
 
 struct vf_macvlans {
 struct vf_macvlans {
@@ -490,6 +491,7 @@ struct ixgbe_adapter {
 	u64 rsc_total_flush;
 	u64 rsc_total_flush;
 	u32 wol;
 	u32 wol;
 	u16 eeprom_version;
 	u16 eeprom_version;
+	u16 eeprom_cap;
 
 
 	int node;
 	int node;
 	u32 led_reg;
 	u32 led_reg;

+ 5 - 10
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c

@@ -759,7 +759,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
 	u8  analog_val;
 	u8  analog_val;
 
 
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	/* Call adapter stop to disable tx/rx and clear interrupts */
-	hw->mac.ops.stop_adapter(hw);
+	status = hw->mac.ops.stop_adapter(hw);
+	if (status != 0)
+		goto reset_hw_out;
 
 
 	/*
 	/*
 	 * Power up the Atlas Tx lanes if they are currently powered down.
 	 * Power up the Atlas Tx lanes if they are currently powered down.
@@ -802,19 +804,12 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
 		phy_status = hw->phy.ops.init(hw);
 		phy_status = hw->phy.ops.init(hw);
 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
 			goto reset_hw_out;
 			goto reset_hw_out;
-		else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
-			goto no_phy_reset;
+		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+			goto mac_reset_top;
 
 
 		hw->phy.ops.reset(hw);
 		hw->phy.ops.reset(hw);
 	}
 	}
 
 
-no_phy_reset:
-	/*
-	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
-	 * access and verify no pending requests before reset
-	 */
-	ixgbe_disable_pcie_master(hw);
-
 mac_reset_top:
 mac_reset_top:
 	/*
 	/*
 	 * Issue global reset to the MAC.  This needs to be a SW reset.
 	 * Issue global reset to the MAC.  This needs to be a SW reset.

+ 6 - 7
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c

@@ -910,7 +910,12 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 	bool link_up = false;
 	bool link_up = false;
 
 
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	/* Call adapter stop to disable tx/rx and clear interrupts */
-	hw->mac.ops.stop_adapter(hw);
+	status = hw->mac.ops.stop_adapter(hw);
+	if (status != 0)
+		goto reset_hw_out;
+
+	/* flush pending Tx transactions */
+	ixgbe_clear_tx_pending(hw);
 
 
 	/* PHY ops must be identified and initialized prior to reset */
 	/* PHY ops must be identified and initialized prior to reset */
 
 
@@ -933,12 +938,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
 	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
 		hw->phy.ops.reset(hw);
 		hw->phy.ops.reset(hw);
 
 
-	/*
-	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
-	 * access and verify no pending requests before reset
-	 */
-	ixgbe_disable_pcie_master(hw);
-
 mac_reset_top:
 mac_reset_top:
 	/*
 	/*
 	 * Issue global reset to the MAC. Needs to be SW reset if link is up.
 	 * Issue global reset to the MAC. Needs to be SW reset if link is up.

+ 84 - 61
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c

@@ -61,6 +61,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
 					     u16 words, u16 *data);
 					     u16 words, u16 *data);
 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
 						 u16 offset);
 						 u16 offset);
+static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
 
 
 /**
 /**
  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -496,7 +497,6 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
  **/
  **/
 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
 {
 {
-	u32 number_of_queues;
 	u32 reg_val;
 	u32 reg_val;
 	u16 i;
 	u16 i;
 
 
@@ -507,35 +507,35 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
 	hw->adapter_stopped = true;
 	hw->adapter_stopped = true;
 
 
 	/* Disable the receive unit */
 	/* Disable the receive unit */
-	reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-	reg_val &= ~(IXGBE_RXCTRL_RXEN);
-	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
-	IXGBE_WRITE_FLUSH(hw);
-	usleep_range(2000, 4000);
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
 
 
-	/* Clear interrupt mask to stop from interrupts being generated */
+	/* Clear interrupt mask to stop interrupts from being generated */
 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
 
 
-	/* Clear any pending interrupts */
+	/* Clear any pending interrupts, flush previous writes */
 	IXGBE_READ_REG(hw, IXGBE_EICR);
 	IXGBE_READ_REG(hw, IXGBE_EICR);
 
 
 	/* Disable the transmit unit.  Each queue must be disabled. */
 	/* Disable the transmit unit.  Each queue must be disabled. */
-	number_of_queues = hw->mac.max_tx_queues;
-	for (i = 0; i < number_of_queues; i++) {
-		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
-		if (reg_val & IXGBE_TXDCTL_ENABLE) {
-			reg_val &= ~IXGBE_TXDCTL_ENABLE;
-			IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
-		}
+	for (i = 0; i < hw->mac.max_tx_queues; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+	/* Disable the receive unit by stopping each queue */
+	for (i = 0; i < hw->mac.max_rx_queues; i++) {
+		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+		reg_val &= ~IXGBE_RXDCTL_ENABLE;
+		reg_val |= IXGBE_RXDCTL_SWFLSH;
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
 	}
 	}
 
 
+	/* flush all queues disables */
+	IXGBE_WRITE_FLUSH(hw);
+	usleep_range(1000, 2000);
+
 	/*
 	/*
 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
 	 * access and verify no pending requests
 	 * access and verify no pending requests
 	 */
 	 */
-	ixgbe_disable_pcie_master(hw);
-
-	return 0;
+	return ixgbe_disable_pcie_master(hw);
 }
 }
 
 
 /**
 /**
@@ -2458,75 +2458,57 @@ out:
  *  bit hasn't caused the master requests to be disabled, else 0
  *  bit hasn't caused the master requests to be disabled, else 0
  *  is returned signifying master requests disabled.
  *  is returned signifying master requests disabled.
  **/
  **/
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
 {
 	struct ixgbe_adapter *adapter = hw->back;
 	struct ixgbe_adapter *adapter = hw->back;
-	u32 i;
-	u32 reg_val;
-	u32 number_of_queues;
 	s32 status = 0;
 	s32 status = 0;
-	u16 dev_status = 0;
+	u32 i;
+	u16 value;
+
+	/* Always set this bit to ensure any future transactions are blocked */
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
 
 
-	/* Just jump out if bus mastering is already disabled */
+	/* Exit if master requests are blocked */
 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
 		goto out;
 		goto out;
 
 
-	/* Disable the receive unit by stopping each queue */
-	number_of_queues = hw->mac.max_rx_queues;
-	for (i = 0; i < number_of_queues; i++) {
-		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
-		if (reg_val & IXGBE_RXDCTL_ENABLE) {
-			reg_val &= ~IXGBE_RXDCTL_ENABLE;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
-		}
-	}
-
-	reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
-	reg_val |= IXGBE_CTRL_GIO_DIS;
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
-
+	/* Poll for master request bit to clear */
 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
-		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
-			goto check_device_status;
 		udelay(100);
 		udelay(100);
+		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+			goto out;
 	}
 	}
 
 
+	/*
+	 * Two consecutive resets are required via CTRL.RST per datasheet
+	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
+	 * of this need.  The first reset prevents new master requests from
+	 * being issued by our device.  We then must wait 1usec or more for any
+	 * remaining completions from the PCIe bus to trickle in, and then reset
+	 * again to clear out any effects they may have had on our device.
+	 */
 	hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
 	hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
-	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
 
 
 	/*
 	/*
 	 * Before proceeding, make sure that the PCIe block does not have
 	 * Before proceeding, make sure that the PCIe block does not have
 	 * transactions pending.
 	 * transactions pending.
 	 */
 	 */
-check_device_status:
 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
-		pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
-							 &dev_status);
-		if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
-			break;
 		udelay(100);
 		udelay(100);
+		pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
+							 &value);
+		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+			goto out;
 	}
 	}
 
 
-	if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
-		hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
-	else
-		goto out;
-
-	/*
-	 * Two consecutive resets are required via CTRL.RST per datasheet
-	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
-	 * of this need.  The first reset prevents new master requests from
-	 * being issued by our device.  We then must wait 1usec for any
-	 * remaining completions from the PCIe bus to trickle in, and then reset
-	 * again to clear out any effects they may have had on our device.
-	 */
-	 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+	hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
+	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
 
 
 out:
 out:
 	return status;
 	return status;
 }
 }
 
 
-
 /**
 /**
  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
  *  @hw: pointer to hardware structure
  *  @hw: pointer to hardware structure
@@ -3509,3 +3491,44 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
 out:
 out:
 	return ret_val;
 	return ret_val;
 }
 }
+
+/**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs.  This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+{
+	u32 gcr_ext, hlreg0;
+
+	/*
+	 * If double reset is not requested then all transactions should
+	 * already be clear and as such there is no work to do
+	 */
+	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+		return;
+
+	/*
+	 * Set loopback enable to prevent any transmits from being sent
+	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
+	 * has already been cleared.
+	 */
+	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+	/* initiate cleaning flow for buffers in the PCIe transaction layer */
+	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+	/* Flush all writes and allow 20usec for all transactions to clear */
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(20);
+
+	/* restore previous register values */
+	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+}

+ 1 - 1
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h

@@ -81,7 +81,6 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
@@ -101,6 +100,7 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
 				 u8 build, u8 ver);
 				 u8 build, u8 ver);
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
 
 
 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
 			     u32 headroom, int strategy);
 			     u32 headroom, int strategy);

+ 35 - 1
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c

@@ -40,7 +40,8 @@
  * hardware. The IEEE 802.1Qaz specification do not use bandwidth
  * hardware. The IEEE 802.1Qaz specification do not use bandwidth
  * groups so this is much simplified from the CEE case.
  * groups so this is much simplified from the CEE case.
  */
  */
-s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
+static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
+			      __u16 *max, int max_frame)
 {
 {
 	int min_percent = 100;
 	int min_percent = 100;
 	int min_credit, multiplier;
 	int min_credit, multiplier;
@@ -291,6 +292,39 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
 	return ret;
 	return ret;
 }
 }
 
 
+s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+{
+	__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+	__u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+	int i;
+
+	/* naively give each TC a bwg to map onto CEE hardware */
+	__u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+	/* Map TSA onto CEE prio type */
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_STRICT:
+			prio_type[i] = 2;
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			prio_type[i] = 0;
+			break;
+		default:
+			/* Hardware only supports priority strict or
+			 * ETS transmission selection algorithms if
+			 * we receive some other value from dcbnl
+			 * throw an error
+			 */
+			return -EINVAL;
+		}
+	}
+
+	ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
+	return ixgbe_dcb_hw_ets_config(hw, refill, max,
+				       bwg_id, prio_type, ets->prio_tc);
+}
+
 s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
 s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
 			    u16 *refill, u16 *max, u8 *bwg_id,
 			    u16 *refill, u16 *max, u8 *bwg_id,
 			    u8 *prio_type, u8 *prio_tc)
 			    u8 *prio_type, u8 *prio_tc)

+ 2 - 1
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h

@@ -29,6 +29,7 @@
 #ifndef _DCB_CONFIG_H_
 #ifndef _DCB_CONFIG_H_
 #define _DCB_CONFIG_H_
 #define _DCB_CONFIG_H_
 
 
+#include <linux/dcbnl.h>
 #include "ixgbe_type.h"
 #include "ixgbe_type.h"
 
 
 /* DCB data structures */
 /* DCB data structures */
@@ -147,11 +148,11 @@ void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
 void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
 void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
 
 
 /* DCB credits calculation */
 /* DCB credits calculation */
-s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
 s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
 s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
 				   struct ixgbe_dcb_config *, int, u8);
 				   struct ixgbe_dcb_config *, int, u8);
 
 
 /* DCB hw initialization */
 /* DCB hw initialization */
+s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
 s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
 s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
 			    u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
 			    u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
 s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
 s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);

+ 23 - 34
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c

@@ -114,6 +114,10 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
 	u8 err = 0;
 	u8 err = 0;
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
 
+	/* Fail command if not in CEE mode */
+	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+		return 1;
+
 	/* verify there is something to do, if not then exit */
 	/* verify there is something to do, if not then exit */
 	if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
 	if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
 		return err;
 		return err;
@@ -301,6 +305,10 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 	u8 up = dcb_getapp(netdev, &app);
 	u8 up = dcb_getapp(netdev, &app);
 #endif
 #endif
 
 
+	/* Fail command if not in CEE mode */
+	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+		return 1;
+
 	ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
 	ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
 				 MAX_TRAFFIC_CLASS);
 				 MAX_TRAFFIC_CLASS);
 	if (ret)
 	if (ret)
@@ -537,13 +545,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
 				   struct ieee_ets *ets)
 				   struct ieee_ets *ets)
 {
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
-	__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
-	__u8 prio_type[IEEE_8021QAZ_MAX_TCS];
 	int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
-	int i, err;
-	__u64 *p = (__u64 *) ets->prio_tc;
-	/* naively give each TC a bwg to map onto CEE hardware */
-	__u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+	int i;
+	__u8 max_tc = 0;
 
 
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
 		return -EINVAL;
 		return -EINVAL;
@@ -557,34 +561,21 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
 
 
 	memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
 	memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
 
 
-	/* Map TSA onto CEE prio type */
 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-		switch (ets->tc_tsa[i]) {
-		case IEEE_8021QAZ_TSA_STRICT:
-			prio_type[i] = 2;
-			break;
-		case IEEE_8021QAZ_TSA_ETS:
-			prio_type[i] = 0;
-			break;
-		default:
-			/* Hardware only supports priority strict or
-			 * ETS transmission selection algorithms if
-			 * we receive some other value from dcbnl
-			 * throw an error
-			 */
-			return -EINVAL;
-		}
+		if (ets->prio_tc[i] > max_tc)
+			max_tc = ets->prio_tc[i];
 	}
 	}
 
 
-	if (*p)
-		ixgbe_dcbnl_set_state(dev, 1);
-	else
-		ixgbe_dcbnl_set_state(dev, 0);
+	if (max_tc)
+		max_tc++;
 
 
-	ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
-	err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
-				      bwg_id, prio_type, ets->prio_tc);
-	return err;
+	if (max_tc != netdev_get_num_tc(dev))
+		ixgbe_setup_tc(dev, max_tc);
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+		netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
+
+	return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
 }
 }
 
 
 static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
 static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
@@ -615,7 +606,6 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
 				   struct ieee_pfc *pfc)
 				   struct ieee_pfc *pfc)
 {
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
-	int err;
 
 
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
 		return -EINVAL;
 		return -EINVAL;
@@ -628,8 +618,7 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
 	}
 	}
 
 
 	memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
 	memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
-	err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
-	return err;
+	return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
 }
 }
 
 
 #ifdef IXGBE_FCOE
 #ifdef IXGBE_FCOE
@@ -740,7 +729,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
 		 */
 		 */
 		ixgbe_dcbnl_ieee_setets(dev, &ets);
 		ixgbe_dcbnl_ieee_setets(dev, &ets);
 		ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
 		ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
-		ixgbe_dcbnl_set_state(dev, 0);
+		ixgbe_setup_tc(dev, 0);
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 13 - 0
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c

@@ -1888,6 +1888,7 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
 {
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ixgbe_hw *hw = &adapter->hw;
 	int retval = 1;
 	int retval = 1;
+	u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
 
 
 	/* WOL not supported except for the following */
 	/* WOL not supported except for the following */
 	switch(hw->device_id) {
 	switch(hw->device_id) {
@@ -1911,6 +1912,18 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
 	case IXGBE_DEV_ID_82599_KX4:
 	case IXGBE_DEV_ID_82599_KX4:
 		retval = 0;
 		retval = 0;
 		break;
 		break;
+	case IXGBE_DEV_ID_X540T:
+		/* check eeprom to see if enabled wol */
+		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+		     (hw->bus.func == 0))) {
+			retval = 0;
+			break;
+		}
+
+		/* All others not supported */
+		wol->supported = 0;
+		break;
 	default:
 	default:
 		wol->supported = 0;
 		wol->supported = 0;
 	}
 	}

+ 35 - 106
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

@@ -134,42 +134,6 @@ MODULE_VERSION(DRV_VERSION);
 
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
 
-static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
-{
-	struct ixgbe_hw *hw = &adapter->hw;
-	u32 gcr;
-	u32 gpie;
-	u32 vmdctl;
-
-#ifdef CONFIG_PCI_IOV
-	/* disable iov and allow time for transactions to clear */
-	pci_disable_sriov(adapter->pdev);
-#endif
-
-	/* turn off device IOV mode */
-	gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
-	gcr &= ~(IXGBE_GCR_EXT_SRIOV);
-	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
-	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
-	gpie &= ~IXGBE_GPIE_VTMODE_MASK;
-	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
-	/* set default pool back to 0 */
-	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
-	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
-	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* take a breather then clean up driver data */
-	msleep(100);
-
-	kfree(adapter->vfinfo);
-	adapter->vfinfo = NULL;
-
-	adapter->num_vfs = 0;
-	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
-}
-
 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
 {
 {
 	if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
 	if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -1899,10 +1863,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
 		mask |= IXGBE_EIMS_GPI_SDP1;
 		mask |= IXGBE_EIMS_GPI_SDP1;
 	switch (adapter->hw.mac.type) {
 	switch (adapter->hw.mac.type) {
 	case ixgbe_mac_82599EB:
 	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-		mask |= IXGBE_EIMS_ECC;
 		mask |= IXGBE_EIMS_GPI_SDP1;
 		mask |= IXGBE_EIMS_GPI_SDP1;
 		mask |= IXGBE_EIMS_GPI_SDP2;
 		mask |= IXGBE_EIMS_GPI_SDP2;
+	case ixgbe_mac_X540:
+		mask |= IXGBE_EIMS_ECC;
 		mask |= IXGBE_EIMS_MAILBOX;
 		mask |= IXGBE_EIMS_MAILBOX;
 		break;
 		break;
 	default:
 	default:
@@ -3355,12 +3319,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
 	} else {
 	} else {
 		struct net_device *dev = adapter->netdev;
 		struct net_device *dev = adapter->netdev;
 
 
-		if (adapter->ixgbe_ieee_ets)
-			dev->dcbnl_ops->ieee_setets(dev,
-						    adapter->ixgbe_ieee_ets);
-		if (adapter->ixgbe_ieee_pfc)
-			dev->dcbnl_ops->ieee_setpfc(dev,
-						    adapter->ixgbe_ieee_pfc);
+		if (adapter->ixgbe_ieee_ets) {
+			struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
+			int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+			ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
+		}
+
+		if (adapter->ixgbe_ieee_pfc) {
+			struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc;
+
+			ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
+		}
 	}
 	}
 
 
 	/* Enable RSS Hash per TC */
 	/* Enable RSS Hash per TC */
@@ -7064,11 +7034,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
 {
 {
 #ifdef CONFIG_PCI_IOV
 #ifdef CONFIG_PCI_IOV
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ixgbe_hw *hw = &adapter->hw;
-	int err;
-	int num_vf_macvlans, i;
-	struct vf_macvlans *mv_list;
 
 
-	if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
+	if (hw->mac.type == ixgbe_mac_82598EB)
 		return;
 		return;
 
 
 	/* The 82599 supports up to 64 VFs per physical function
 	/* The 82599 supports up to 64 VFs per physical function
@@ -7077,60 +7044,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
 	 * physical function
 	 * physical function
 	 */
 	 */
 	adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
 	adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
-	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
-	err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
-	if (err) {
-		e_err(probe, "Failed to enable PCI sriov: %d\n", err);
-		goto err_novfs;
-	}
-
-	num_vf_macvlans = hw->mac.num_rar_entries -
-		(IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
-
-	adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
-					     sizeof(struct vf_macvlans),
-					     GFP_KERNEL);
-	if (mv_list) {
-		/* Initialize list of VF macvlans */
-		INIT_LIST_HEAD(&adapter->vf_mvs.l);
-		for (i = 0; i < num_vf_macvlans; i++) {
-			mv_list->vf = -1;
-			mv_list->free = true;
-			mv_list->rar_entry = hw->mac.num_rar_entries -
-				(i + adapter->num_vfs + 1);
-			list_add(&mv_list->l, &adapter->vf_mvs.l);
-			mv_list++;
-		}
-	}
-
-	/* If call to enable VFs succeeded then allocate memory
-	 * for per VF control structures.
-	 */
-	adapter->vfinfo =
-		kcalloc(adapter->num_vfs,
-			sizeof(struct vf_data_storage), GFP_KERNEL);
-	if (adapter->vfinfo) {
-		/* Now that we're sure SR-IOV is enabled
-		 * and memory allocated set up the mailbox parameters
-		 */
-		ixgbe_init_mbx_params_pf(hw);
-		memcpy(&hw->mbx.ops, ii->mbx_ops,
-		       sizeof(hw->mbx.ops));
-
-		/* Disable RSC when in SR-IOV mode */
-		adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
-				     IXGBE_FLAG2_RSC_ENABLED);
-		return;
-	}
-
-	/* Oh oh */
-	e_err(probe, "Unable to allocate memory for VF Data Storage - "
-	      "SRIOV disabled\n");
-	pci_disable_sriov(adapter->pdev);
-
-err_novfs:
-	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
-	adapter->num_vfs = 0;
+	ixgbe_enable_sriov(adapter, ii);
 #endif /* CONFIG_PCI_IOV */
 #endif /* CONFIG_PCI_IOV */
 }
 }
 
 
@@ -7160,6 +7074,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	u16 device_caps;
 	u16 device_caps;
 #endif
 #endif
 	u32 eec;
 	u32 eec;
+	u16 wol_cap;
 
 
 	/* Catch broken hardware that put the wrong VF device ID in
 	/* Catch broken hardware that put the wrong VF device ID in
 	 * the PCIe SR-IOV capability.
 	 * the PCIe SR-IOV capability.
@@ -7424,6 +7339,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 		netdev->features &= ~NETIF_F_RXHASH;
 		netdev->features &= ~NETIF_F_RXHASH;
 	}
 	}
 
 
+	/* WOL not supported for all but the following */
+	adapter->wol = 0;
 	switch (pdev->device) {
 	switch (pdev->device) {
 	case IXGBE_DEV_ID_82599_SFP:
 	case IXGBE_DEV_ID_82599_SFP:
 		/* Only this subdevice supports WOL */
 		/* Only this subdevice supports WOL */
@@ -7438,8 +7355,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	case IXGBE_DEV_ID_82599_KX4:
 	case IXGBE_DEV_ID_82599_KX4:
 		adapter->wol = IXGBE_WUFC_MAG;
 		adapter->wol = IXGBE_WUFC_MAG;
 		break;
 		break;
-	default:
-		adapter->wol = 0;
+	case IXGBE_DEV_ID_X540T:
+		/* Check eeprom to see if it is enabled */
+		hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
+		wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+
+		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+		     (hw->bus.func == 0)))
+			adapter->wol = IXGBE_WUFC_MAG;
 		break;
 		break;
 	}
 	}
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
@@ -7580,8 +7504,13 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 	if (netdev->reg_state == NETREG_REGISTERED)
 	if (netdev->reg_state == NETREG_REGISTERED)
 		unregister_netdev(netdev);
 		unregister_netdev(netdev);
 
 
-	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
-		ixgbe_disable_sriov(adapter);
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+		if (!(ixgbe_check_vf_assignment(adapter)))
+			ixgbe_disable_sriov(adapter);
+		else
+			e_dev_warn("Unloading driver while VFs are assigned "
+				   "- VFs will not be deallocated\n");
+	}
 
 
 	ixgbe_clear_interrupt_scheme(adapter);
 	ixgbe_clear_interrupt_scheme(adapter);
 
 

+ 206 - 1
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c

@@ -40,9 +40,174 @@
 #endif
 #endif
 
 
 #include "ixgbe.h"
 #include "ixgbe.h"
-
+#include "ixgbe_type.h"
 #include "ixgbe_sriov.h"
 #include "ixgbe_sriov.h"
 
 
+#ifdef CONFIG_PCI_IOV
+static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct pci_dev *pvfdev;
+	u16 vf_devfn = 0;
+	int device_id;
+	int vfs_found = 0;
+
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82599EB:
+		device_id = IXGBE_DEV_ID_82599_VF;
+		break;
+	case ixgbe_mac_X540:
+		device_id = IXGBE_DEV_ID_X540_VF;
+		break;
+	default:
+		device_id = 0;
+		break;
+	}
+
+	vf_devfn = pdev->devfn + 0x80;
+	pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+	while (pvfdev) {
+		if (pvfdev->devfn == vf_devfn)
+			vfs_found++;
+		vf_devfn += 2;
+		pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+					device_id, pvfdev);
+	}
+
+	return vfs_found;
+}
+
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
+			 const struct ixgbe_info *ii)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int err = 0;
+	int num_vf_macvlans, i;
+	struct vf_macvlans *mv_list;
+	int pre_existing_vfs = 0;
+
+	pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
+	if (!pre_existing_vfs && !adapter->num_vfs)
+		return;
+
+	/* If there are pre-existing VFs then we have to force
+	 * use of that many because they were not deleted the last
+	 * time someone removed the PF driver.  That would have
+	 * been because they were allocated to guest VMs and can't
+	 * be removed.  Go ahead and just re-enable the old amount.
+	 * If the user wants to change the number of VFs they can
+	 * use ethtool while making sure no VFs are allocated to
+	 * guest VMs... i.e. the right way.
+	 */
+	if (pre_existing_vfs) {
+		adapter->num_vfs = pre_existing_vfs;
+		dev_warn(&adapter->pdev->dev, "Virtual Functions already "
+			 "enabled for this device - Please reload all "
+			 "VF drivers to avoid spoofed packet errors\n");
+	} else {
+		err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+	}
+	if (err) {
+		e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+		goto err_novfs;
+	}
+	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+
+	e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
+
+	num_vf_macvlans = hw->mac.num_rar_entries -
+	(IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
+
+	adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
+					     sizeof(struct vf_macvlans),
+					     GFP_KERNEL);
+	if (mv_list) {
+		/* Initialize list of VF macvlans */
+		INIT_LIST_HEAD(&adapter->vf_mvs.l);
+		for (i = 0; i < num_vf_macvlans; i++) {
+			mv_list->vf = -1;
+			mv_list->free = true;
+			mv_list->rar_entry = hw->mac.num_rar_entries -
+				(i + adapter->num_vfs + 1);
+			list_add(&mv_list->l, &adapter->vf_mvs.l);
+			mv_list++;
+		}
+	}
+
+	/* If call to enable VFs succeeded then allocate memory
+	 * for per VF control structures.
+	 */
+	adapter->vfinfo =
+		kcalloc(adapter->num_vfs,
+			sizeof(struct vf_data_storage), GFP_KERNEL);
+	if (adapter->vfinfo) {
+		/* Now that we're sure SR-IOV is enabled
+		 * and memory allocated set up the mailbox parameters
+		 */
+		ixgbe_init_mbx_params_pf(hw);
+		memcpy(&hw->mbx.ops, ii->mbx_ops,
+		       sizeof(hw->mbx.ops));
+
+		/* Disable RSC when in SR-IOV mode */
+		adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+				     IXGBE_FLAG2_RSC_ENABLED);
+		return;
+	}
+
+	/* Oh oh */
+	e_err(probe, "Unable to allocate memory for VF Data Storage - "
+	      "SRIOV disabled\n");
+	pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+	adapter->num_vfs = 0;
+}
+#endif /* #ifdef CONFIG_PCI_IOV */
+
+void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 gcr;
+	u32 gpie;
+	u32 vmdctl;
+	int i;
+
+#ifdef CONFIG_PCI_IOV
+	/* disable iov and allow time for transactions to clear */
+	pci_disable_sriov(adapter->pdev);
+#endif
+
+	/* turn off device IOV mode */
+	gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+	gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+	gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+	/* set default pool back to 0 */
+	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* take a breather then clean up driver data */
+	msleep(100);
+
+	/* Release reference to VF devices */
+	for (i = 0; i < adapter->num_vfs; i++) {
+		if (adapter->vfinfo[i].vfdev)
+			pci_dev_put(adapter->vfinfo[i].vfdev);
+	}
+	kfree(adapter->vfinfo);
+	kfree(adapter->mv_list);
+	adapter->vfinfo = NULL;
+
+	adapter->num_vfs = 0;
+	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
 static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
 static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
 				   int entries, u16 *hash_list, u32 vf)
 				   int entries, u16 *hash_list, u32 vf)
 {
 {
@@ -273,11 +438,26 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
 	return 0;
 	return 0;
 }
 }
 
 
+int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
+{
+	int i;
+	for (i = 0; i < adapter->num_vfs; i++) {
+		if (adapter->vfinfo[i].vfdev->dev_flags &
+				PCI_DEV_FLAGS_ASSIGNED)
+			return true;
+	}
+	return false;
+}
+
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 {
 {
 	unsigned char vf_mac_addr[6];
 	unsigned char vf_mac_addr[6];
 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 	unsigned int vfn = (event_mask & 0x3f);
 	unsigned int vfn = (event_mask & 0x3f);
+	struct pci_dev *pvfdev;
+	unsigned int device_id;
+	u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
+				(pdev->devfn & 1);
 
 
 	bool enable = ((event_mask & 0x10000000U) != 0);
 	bool enable = ((event_mask & 0x10000000U) != 0);
 
 
@@ -290,6 +470,31 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 		 * for it later.
 		 * for it later.
 		 */
 		 */
 		memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
 		memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+
+		switch (adapter->hw.mac.type) {
+		case ixgbe_mac_82599EB:
+			device_id = IXGBE_DEV_ID_82599_VF;
+			break;
+		case ixgbe_mac_X540:
+			device_id = IXGBE_DEV_ID_X540_VF;
+			break;
+		default:
+			device_id = 0;
+			break;
+		}
+
+		pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+		while (pvfdev) {
+			if (pvfdev->devfn == thisvf_devfn)
+				break;
+			pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+						device_id, pvfdev);
+		}
+		if (pvfdev)
+			adapter->vfinfo[vfn].vfdev = pvfdev;
+		else
+			e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
+			      thisvf_devfn);
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 5 - 0
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h

@@ -41,6 +41,11 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
 			    int vf, struct ifla_vf_info *ivi);
 			    int vf, struct ifla_vf_info *ivi);
 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
+			const struct ixgbe_info *ii);
+int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
+
 
 
 #endif /* _IXGBE_SRIOV_H_ */
 #endif /* _IXGBE_SRIOV_H_ */
 
 

+ 10 - 0
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h

@@ -65,6 +65,10 @@
 #define IXGBE_DEV_ID_82599_LS            0x154F
 #define IXGBE_DEV_ID_82599_LS            0x154F
 #define IXGBE_DEV_ID_X540T               0x1528
 #define IXGBE_DEV_ID_X540T               0x1528
 
 
+/* VF Device IDs */
+#define IXGBE_DEV_ID_82599_VF           0x10ED
+#define IXGBE_DEV_ID_X540_VF            0x1515
+
 /* General Registers */
 /* General Registers */
 #define IXGBE_CTRL      0x00000
 #define IXGBE_CTRL      0x00000
 #define IXGBE_STATUS    0x00008
 #define IXGBE_STATUS    0x00008
@@ -766,6 +770,7 @@
 #define IXGBE_GCR_CAP_VER2              0x00040000
 #define IXGBE_GCR_CAP_VER2              0x00040000
 
 
 #define IXGBE_GCR_EXT_MSIX_EN           0x80000000
 #define IXGBE_GCR_EXT_MSIX_EN           0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR     0x40000000
 #define IXGBE_GCR_EXT_VT_MODE_16        0x00000001
 #define IXGBE_GCR_EXT_VT_MODE_16        0x00000001
 #define IXGBE_GCR_EXT_VT_MODE_32        0x00000002
 #define IXGBE_GCR_EXT_VT_MODE_32        0x00000002
 #define IXGBE_GCR_EXT_VT_MODE_64        0x00000003
 #define IXGBE_GCR_EXT_VT_MODE_64        0x00000003
@@ -1749,6 +1754,10 @@ enum {
 #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC  0x0 /* Alt. SAN MAC exists */
 #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC  0x0 /* Alt. SAN MAC exists */
 #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1 /* Alt. WWN base exists */
 #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1 /* Alt. WWN base exists */
 
 
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1  0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0    0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK     0xC /* Mask for WoL capabilities */
+
 /* PCI Bus Info */
 /* PCI Bus Info */
 #define IXGBE_PCI_DEVICE_STATUS   0xAA
 #define IXGBE_PCI_DEVICE_STATUS   0xAA
 #define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
 #define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
@@ -1818,6 +1827,7 @@ enum {
 #define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */
 #define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */
 #define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
 #define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
 #define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
 #define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH     0x04000000  /* Rx Desc. write-back flushing */
 #define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */
 #define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */
 #define IXGBE_RXDCTL_RLPML_EN   0x00008000
 #define IXGBE_RXDCTL_RLPML_EN   0x00008000
 #define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
 #define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */

+ 8 - 23
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c

@@ -93,34 +93,19 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
  **/
  **/
 static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
 static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
 {
 {
-	ixgbe_link_speed link_speed;
 	s32 status;
 	s32 status;
 	u32 ctrl, i;
 	u32 ctrl, i;
-	bool link_up = false;
 
 
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	/* Call adapter stop to disable tx/rx and clear interrupts */
-	hw->mac.ops.stop_adapter(hw);
+	status = hw->mac.ops.stop_adapter(hw);
+	if (status != 0)
+		goto reset_hw_out;
 
 
-	/*
-	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
-	 * access and verify no pending requests before reset
-	 */
-	ixgbe_disable_pcie_master(hw);
+	/* flush pending Tx transactions */
+	ixgbe_clear_tx_pending(hw);
 
 
 mac_reset_top:
 mac_reset_top:
-	/*
-	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
-	 * If link reset is used when link is up, it might reset the PHY when
-	 * mng is using it.  If link is down or the flag to force full link
-	 * reset is set, then perform link reset.
-	 */
-	ctrl = IXGBE_CTRL_LNK_RST;
-	if (!hw->force_full_reset) {
-		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
-		if (link_up)
-			ctrl = IXGBE_CTRL_RST;
-	}
-
+	ctrl = IXGBE_CTRL_RST;
 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
 	IXGBE_WRITE_FLUSH(hw);
 	IXGBE_WRITE_FLUSH(hw);
@@ -137,8 +122,7 @@ mac_reset_top:
 		status = IXGBE_ERR_RESET_FAILED;
 		status = IXGBE_ERR_RESET_FAILED;
 		hw_dbg(hw, "Reset polling failed to complete.\n");
 		hw_dbg(hw, "Reset polling failed to complete.\n");
 	}
 	}
-
-	msleep(50);
+	msleep(100);
 
 
 	/*
 	/*
 	 * Double resets are required for recovery from certain error
 	 * Double resets are required for recovery from certain error
@@ -180,6 +164,7 @@ mac_reset_top:
 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
 	                           &hw->mac.wwpn_prefix);
 	                           &hw->mac.wwpn_prefix);
 
 
+reset_hw_out:
 	return status;
 	return status;
 }
 }
 
 

+ 2 - 0
include/linux/pci.h

@@ -174,6 +174,8 @@ enum pci_dev_flags {
 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1,
 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1,
 	/* Device configuration is irrevocably lost if disabled into D3 */
 	/* Device configuration is irrevocably lost if disabled into D3 */
 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+	/* Provide indication device is assigned by a Virtual Machine Manager */
+	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
 };
 };
 
 
 enum pci_irq_reroute_variant {
 enum pci_irq_reroute_variant {

+ 2 - 0
virt/kvm/assigned-dev.c

@@ -205,6 +205,8 @@ static void kvm_free_assigned_device(struct kvm *kvm,
 	else
 	else
 		pci_restore_state(assigned_dev->dev);
 		pci_restore_state(assigned_dev->dev);
 
 
+	assigned_dev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+
 	pci_release_regions(assigned_dev->dev);
 	pci_release_regions(assigned_dev->dev);
 	pci_disable_device(assigned_dev->dev);
 	pci_disable_device(assigned_dev->dev);
 	pci_dev_put(assigned_dev->dev);
 	pci_dev_put(assigned_dev->dev);

+ 4 - 0
virt/kvm/iommu.c

@@ -187,6 +187,8 @@ int kvm_assign_device(struct kvm *kvm,
 			goto out_unmap;
 			goto out_unmap;
 	}
 	}
 
 
+	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+
 	printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
 	printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
 		assigned_dev->host_segnr,
 		assigned_dev->host_segnr,
 		assigned_dev->host_busnr,
 		assigned_dev->host_busnr,
@@ -215,6 +217,8 @@ int kvm_deassign_device(struct kvm *kvm,
 
 
 	iommu_detach_device(domain, &pdev->dev);
 	iommu_detach_device(domain, &pdev->dev);
 
 
+	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+
 	printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
 	printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
 		assigned_dev->host_segnr,
 		assigned_dev->host_segnr,
 		assigned_dev->host_busnr,
 		assigned_dev->host_busnr,