|
@@ -1227,6 +1227,79 @@ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
|
|
temp = rd32(E1000_SCVPC);
|
|
temp = rd32(E1000_SCVPC);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
|
|
|
|
+ * @hw: pointer to the HW structure
|
|
|
|
+ *
|
|
|
|
+ * After rx enable if managability is enabled then there is likely some
|
|
|
|
+ * bad data at the start of the fifo and possibly in the DMA fifo. This
|
|
|
|
+ * function clears the fifos and flushes any packets that came in as rx was
|
|
|
|
+ * being enabled.
|
|
|
|
+ **/
|
|
|
|
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
|
|
|
|
+{
|
|
|
|
+ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
|
|
|
|
+ int i, ms_wait;
|
|
|
|
+
|
|
|
|
+ if (hw->mac.type != e1000_82575 ||
|
|
|
|
+ !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* Disable all RX queues */
|
|
|
|
+ for (i = 0; i < 4; i++) {
|
|
|
|
+ rxdctl[i] = rd32(E1000_RXDCTL(i));
|
|
|
|
+ wr32(E1000_RXDCTL(i),
|
|
|
|
+ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
|
|
|
|
+ }
|
|
|
|
+ /* Poll all queues to verify they have shut down */
|
|
|
|
+ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
|
|
|
|
+ msleep(1);
|
|
|
|
+ rx_enabled = 0;
|
|
|
|
+ for (i = 0; i < 4; i++)
|
|
|
|
+ rx_enabled |= rd32(E1000_RXDCTL(i));
|
|
|
|
+ if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ms_wait == 10)
|
|
|
|
+ hw_dbg("Queue disable timed out after 10ms\n");
|
|
|
|
+
|
|
|
|
+ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
|
|
|
|
+ * incoming packets are rejected. Set enable and wait 2ms so that
|
|
|
|
+ * any packet that was coming in as RCTL.EN was set is flushed
|
|
|
|
+ */
|
|
|
|
+ rfctl = rd32(E1000_RFCTL);
|
|
|
|
+ wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
|
|
|
|
+
|
|
|
|
+ rlpml = rd32(E1000_RLPML);
|
|
|
|
+ wr32(E1000_RLPML, 0);
|
|
|
|
+
|
|
|
|
+ rctl = rd32(E1000_RCTL);
|
|
|
|
+ temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
|
|
|
|
+ temp_rctl |= E1000_RCTL_LPE;
|
|
|
|
+
|
|
|
|
+ wr32(E1000_RCTL, temp_rctl);
|
|
|
|
+ wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
|
|
|
|
+ wrfl();
|
|
|
|
+ msleep(2);
|
|
|
|
+
|
|
|
|
+ /* Enable RX queues that were previously enabled and restore our
|
|
|
|
+ * previous state
|
|
|
|
+ */
|
|
|
|
+ for (i = 0; i < 4; i++)
|
|
|
|
+ wr32(E1000_RXDCTL(i), rxdctl[i]);
|
|
|
|
+ wr32(E1000_RCTL, rctl);
|
|
|
|
+ wrfl();
|
|
|
|
+
|
|
|
|
+ wr32(E1000_RLPML, rlpml);
|
|
|
|
+ wr32(E1000_RFCTL, rfctl);
|
|
|
|
+
|
|
|
|
+ /* Flush receive errors generated by workaround */
|
|
|
|
+ rd32(E1000_ROC);
|
|
|
|
+ rd32(E1000_RNBC);
|
|
|
|
+ rd32(E1000_MPC);
|
|
|
|
+}
|
|
|
|
+
|
|
static struct e1000_mac_operations e1000_mac_ops_82575 = {
|
|
static struct e1000_mac_operations e1000_mac_ops_82575 = {
|
|
.reset_hw = igb_reset_hw_82575,
|
|
.reset_hw = igb_reset_hw_82575,
|
|
.init_hw = igb_init_hw_82575,
|
|
.init_hw = igb_init_hw_82575,
|