|
@@ -1438,9 +1438,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
|
|
|
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
u32 reg_ctl;
|
|
|
- int i;
|
|
|
|
|
|
/* shut down the DMA engines now so they can be reinitialized later */
|
|
|
|
|
@@ -1448,14 +1446,15 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
|
|
|
reg_ctl &= ~IXGBE_RXCTRL_RXEN;
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
|
|
|
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0));
|
|
|
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
|
|
|
reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
|
|
|
|
|
|
/* now Tx */
|
|
|
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0));
|
|
|
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
|
|
|
reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
|
|
|
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl);
|
|
|
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
|
|
|
+
|
|
|
if (hw->mac.type == ixgbe_mac_82599EB) {
|
|
|
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
|
|
|
reg_ctl &= ~IXGBE_DMATXCTL_TE;
|
|
@@ -1464,221 +1463,57 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
|
|
|
ixgbe_reset(adapter);
|
|
|
|
|
|
- if (tx_ring->desc && tx_ring->tx_buffer_info) {
|
|
|
- for (i = 0; i < tx_ring->count; i++) {
|
|
|
- struct ixgbe_tx_buffer *buf =
|
|
|
- &(tx_ring->tx_buffer_info[i]);
|
|
|
- if (buf->dma)
|
|
|
- dma_unmap_single(&pdev->dev, buf->dma,
|
|
|
- buf->length, DMA_TO_DEVICE);
|
|
|
- if (buf->skb)
|
|
|
- dev_kfree_skb(buf->skb);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (rx_ring->desc && rx_ring->rx_buffer_info) {
|
|
|
- for (i = 0; i < rx_ring->count; i++) {
|
|
|
- struct ixgbe_rx_buffer *buf =
|
|
|
- &(rx_ring->rx_buffer_info[i]);
|
|
|
- if (buf->dma)
|
|
|
- dma_unmap_single(&pdev->dev, buf->dma,
|
|
|
- IXGBE_RXBUFFER_2048,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- if (buf->skb)
|
|
|
- dev_kfree_skb(buf->skb);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (tx_ring->desc) {
|
|
|
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
|
|
|
- tx_ring->dma);
|
|
|
- tx_ring->desc = NULL;
|
|
|
- }
|
|
|
- if (rx_ring->desc) {
|
|
|
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
|
|
|
- rx_ring->dma);
|
|
|
- rx_ring->desc = NULL;
|
|
|
- }
|
|
|
-
|
|
|
- kfree(tx_ring->tx_buffer_info);
|
|
|
- tx_ring->tx_buffer_info = NULL;
|
|
|
- kfree(rx_ring->rx_buffer_info);
|
|
|
- rx_ring->rx_buffer_info = NULL;
|
|
|
+ ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
|
|
|
+ ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
|
|
|
}
|
|
|
|
|
|
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
|
|
|
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
u32 rctl, reg_data;
|
|
|
- int i, ret_val;
|
|
|
+ int ret_val;
|
|
|
+ int err;
|
|
|
|
|
|
/* Setup Tx descriptor ring and Tx buffers */
|
|
|
+ tx_ring->count = IXGBE_DEFAULT_TXD;
|
|
|
+ tx_ring->queue_index = 0;
|
|
|
+ tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
|
|
|
+ tx_ring->numa_node = adapter->node;
|
|
|
|
|
|
- if (!tx_ring->count)
|
|
|
- tx_ring->count = IXGBE_DEFAULT_TXD;
|
|
|
-
|
|
|
- tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
|
|
|
- sizeof(struct ixgbe_tx_buffer),
|
|
|
- GFP_KERNEL);
|
|
|
- if (!(tx_ring->tx_buffer_info)) {
|
|
|
- ret_val = 1;
|
|
|
- goto err_nomem;
|
|
|
- }
|
|
|
-
|
|
|
- tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
|
|
|
- tx_ring->size = ALIGN(tx_ring->size, 4096);
|
|
|
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
|
|
|
- &tx_ring->dma, GFP_KERNEL);
|
|
|
- if (!(tx_ring->desc)) {
|
|
|
- ret_val = 2;
|
|
|
- goto err_nomem;
|
|
|
- }
|
|
|
- tx_ring->next_to_use = tx_ring->next_to_clean = 0;
|
|
|
-
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
|
|
|
- ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
|
|
|
- ((u64) tx_ring->dma >> 32));
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
|
|
|
- tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
|
|
|
-
|
|
|
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
|
|
|
- reg_data |= IXGBE_HLREG0_TXPADEN;
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
|
|
|
+ err = ixgbe_setup_tx_resources(adapter, tx_ring);
|
|
|
+ if (err)
|
|
|
+ return 1;
|
|
|
|
|
|
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
|
|
|
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
|
|
|
reg_data |= IXGBE_DMATXCTL_TE;
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
|
|
|
}
|
|
|
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
|
|
|
- reg_data |= IXGBE_TXDCTL_ENABLE;
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
|
|
|
-
|
|
|
- for (i = 0; i < tx_ring->count; i++) {
|
|
|
- union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(tx_ring, i);
|
|
|
- struct sk_buff *skb;
|
|
|
- unsigned int size = 1024;
|
|
|
-
|
|
|
- skb = alloc_skb(size, GFP_KERNEL);
|
|
|
- if (!skb) {
|
|
|
- ret_val = 3;
|
|
|
- goto err_nomem;
|
|
|
- }
|
|
|
- skb_put(skb, size);
|
|
|
- tx_ring->tx_buffer_info[i].skb = skb;
|
|
|
- tx_ring->tx_buffer_info[i].length = skb->len;
|
|
|
- tx_ring->tx_buffer_info[i].dma =
|
|
|
- dma_map_single(&pdev->dev, skb->data, skb->len,
|
|
|
- DMA_TO_DEVICE);
|
|
|
- desc->read.buffer_addr =
|
|
|
- cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
|
|
|
- desc->read.cmd_type_len = cpu_to_le32(skb->len);
|
|
|
- desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
|
|
|
- IXGBE_TXD_CMD_IFCS |
|
|
|
- IXGBE_TXD_CMD_RS);
|
|
|
- desc->read.olinfo_status = 0;
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
|
|
|
- desc->read.olinfo_status |=
|
|
|
- (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
|
|
|
|
|
|
- }
|
|
|
+ ixgbe_configure_tx_ring(adapter, tx_ring);
|
|
|
|
|
|
/* Setup Rx Descriptor ring and Rx buffers */
|
|
|
-
|
|
|
- if (!rx_ring->count)
|
|
|
- rx_ring->count = IXGBE_DEFAULT_RXD;
|
|
|
-
|
|
|
- rx_ring->rx_buffer_info = kcalloc(rx_ring->count,
|
|
|
- sizeof(struct ixgbe_rx_buffer),
|
|
|
- GFP_KERNEL);
|
|
|
- if (!(rx_ring->rx_buffer_info)) {
|
|
|
+ rx_ring->count = IXGBE_DEFAULT_RXD;
|
|
|
+ rx_ring->queue_index = 0;
|
|
|
+ rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
|
|
|
+ rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
|
|
|
+ rx_ring->numa_node = adapter->node;
|
|
|
+
|
|
|
+ err = ixgbe_setup_rx_resources(adapter, rx_ring);
|
|
|
+ if (err) {
|
|
|
ret_val = 4;
|
|
|
goto err_nomem;
|
|
|
}
|
|
|
|
|
|
- rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
|
|
|
- rx_ring->size = ALIGN(rx_ring->size, 4096);
|
|
|
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
|
|
|
- &rx_ring->dma, GFP_KERNEL);
|
|
|
- if (!(rx_ring->desc)) {
|
|
|
- ret_val = 5;
|
|
|
- goto err_nomem;
|
|
|
- }
|
|
|
- rx_ring->next_to_use = rx_ring->next_to_clean = 0;
|
|
|
-
|
|
|
rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
|
|
|
- ((u64)rx_ring->dma & 0xFFFFFFFF));
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
|
|
|
- ((u64) rx_ring->dma >> 32));
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
|
|
|
|
|
|
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
|
|
|
- reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
|
|
|
-
|
|
|
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
|
|
|
- reg_data &= ~IXGBE_HLREG0_LPBK;
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
|
|
|
-
|
|
|
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
|
|
|
-#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
|
|
|
- Threshold Size mask */
|
|
|
- reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
|
|
|
-
|
|
|
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
|
|
|
-#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
|
|
|
- reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
|
|
|
- reg_data |= adapter->hw.mac.mc_filter_type;
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
|
|
|
-
|
|
|
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
|
|
|
- reg_data |= IXGBE_RXDCTL_ENABLE;
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
|
|
|
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
|
|
|
- int j = adapter->rx_ring[0]->reg_idx;
|
|
|
- u32 k;
|
|
|
- for (k = 0; k < 10; k++) {
|
|
|
- if (IXGBE_READ_REG(&adapter->hw,
|
|
|
- IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
|
|
|
- break;
|
|
|
- else
|
|
|
- msleep(1);
|
|
|
- }
|
|
|
- }
|
|
|
+ ixgbe_configure_rx_ring(adapter, rx_ring);
|
|
|
|
|
|
rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
|
|
|
|
|
|
- for (i = 0; i < rx_ring->count; i++) {
|
|
|
- union ixgbe_adv_rx_desc *rx_desc =
|
|
|
- IXGBE_RX_DESC_ADV(rx_ring, i);
|
|
|
- struct sk_buff *skb;
|
|
|
-
|
|
|
- skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
|
|
|
- if (!skb) {
|
|
|
- ret_val = 6;
|
|
|
- goto err_nomem;
|
|
|
- }
|
|
|
- skb_reserve(skb, NET_IP_ALIGN);
|
|
|
- rx_ring->rx_buffer_info[i].skb = skb;
|
|
|
- rx_ring->rx_buffer_info[i].dma =
|
|
|
- dma_map_single(&pdev->dev, skb->data,
|
|
|
- IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
|
|
|
- rx_desc->read.pkt_addr =
|
|
|
- cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
|
|
|
- memset(skb->data, 0x00, skb->len);
|
|
|
- }
|
|
|
-
|
|
|
return 0;
|
|
|
|
|
|
err_nomem:
|
|
@@ -1692,16 +1527,21 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
|
|
|
u32 reg_data;
|
|
|
|
|
|
/* right now we only support MAC loopback in the driver */
|
|
|
-
|
|
|
- /* Setup MAC loopback */
|
|
|
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
|
|
|
+ /* Setup MAC loopback */
|
|
|
reg_data |= IXGBE_HLREG0_LPBK;
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
|
|
|
|
|
|
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
|
|
|
+ reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
|
|
|
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
|
|
|
+
|
|
|
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
|
|
|
reg_data &= ~IXGBE_AUTOC_LMS_MASK;
|
|
|
reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
|
|
|
+ IXGBE_WRITE_FLUSH(&adapter->hw);
|
|
|
+ msleep(10);
|
|
|
|
|
|
/* Disable Atlas Tx lanes; re-enabled in reset path */
|
|
|
if (hw->mac.type == ixgbe_mac_82598EB) {
|
|
@@ -1759,15 +1599,81 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
|
|
|
return 13;
|
|
|
}
|
|
|
|
|
|
+static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
|
|
|
+ struct ixgbe_ring *rx_ring,
|
|
|
+ struct ixgbe_ring *tx_ring,
|
|
|
+ unsigned int size)
|
|
|
+{
|
|
|
+ union ixgbe_adv_rx_desc *rx_desc;
|
|
|
+ struct ixgbe_rx_buffer *rx_buffer_info;
|
|
|
+ struct ixgbe_tx_buffer *tx_buffer_info;
|
|
|
+ const int bufsz = rx_ring->rx_buf_len;
|
|
|
+ u32 staterr;
|
|
|
+ u16 rx_ntc, tx_ntc, count = 0;
|
|
|
+
|
|
|
+ /* initialize next to clean and descriptor values */
|
|
|
+ rx_ntc = rx_ring->next_to_clean;
|
|
|
+ tx_ntc = tx_ring->next_to_clean;
|
|
|
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
|
|
|
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
|
|
|
+
|
|
|
+ while (staterr & IXGBE_RXD_STAT_DD) {
|
|
|
+ /* check Rx buffer */
|
|
|
+ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
|
|
|
+
|
|
|
+ /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
|
|
|
+ dma_unmap_single(&adapter->pdev->dev,
|
|
|
+ rx_buffer_info->dma,
|
|
|
+ bufsz,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ rx_buffer_info->dma = 0;
|
|
|
+
|
|
|
+ /* verify contents of skb */
|
|
|
+ if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
|
|
|
+ count++;
|
|
|
+
|
|
|
+ /* unmap buffer on Tx side */
|
|
|
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
|
|
|
+ ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
|
|
|
+
|
|
|
+ /* increment Rx/Tx next to clean counters */
|
|
|
+ rx_ntc++;
|
|
|
+ if (rx_ntc == rx_ring->count)
|
|
|
+ rx_ntc = 0;
|
|
|
+ tx_ntc++;
|
|
|
+ if (tx_ntc == tx_ring->count)
|
|
|
+ tx_ntc = 0;
|
|
|
+
|
|
|
+ /* fetch next descriptor */
|
|
|
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
|
|
|
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* re-map buffers to ring, store next to clean values */
|
|
|
+ ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
|
|
|
+ rx_ring->next_to_clean = rx_ntc;
|
|
|
+ tx_ring->next_to_clean = tx_ntc;
|
|
|
+
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
|
|
|
{
|
|
|
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
|
|
|
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
|
|
|
- struct pci_dev *pdev = adapter->pdev;
|
|
|
- int i, j, k, l, lc, good_cnt, ret_val = 0;
|
|
|
- unsigned long time;
|
|
|
+ int i, j, lc, good_cnt, ret_val = 0;
|
|
|
+ unsigned int size = 1024;
|
|
|
+ netdev_tx_t tx_ret_val;
|
|
|
+ struct sk_buff *skb;
|
|
|
+
|
|
|
+ /* allocate test skb */
|
|
|
+ skb = alloc_skb(size, GFP_KERNEL);
|
|
|
+ if (!skb)
|
|
|
+ return 11;
|
|
|
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1);
|
|
|
+ /* place data into test skb */
|
|
|
+ ixgbe_create_lbtest_frame(skb, size);
|
|
|
+ skb_put(skb, size);
|
|
|
|
|
|
/*
|
|
|
* Calculate the loop count based on the largest descriptor ring
|
|
@@ -1780,54 +1686,40 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
|
|
|
else
|
|
|
lc = ((rx_ring->count / 64) * 2) + 1;
|
|
|
|
|
|
- k = l = 0;
|
|
|
for (j = 0; j <= lc; j++) {
|
|
|
- for (i = 0; i < 64; i++) {
|
|
|
- ixgbe_create_lbtest_frame(
|
|
|
- tx_ring->tx_buffer_info[k].skb,
|
|
|
- 1024);
|
|
|
- dma_sync_single_for_device(&pdev->dev,
|
|
|
- tx_ring->tx_buffer_info[k].dma,
|
|
|
- tx_ring->tx_buffer_info[k].length,
|
|
|
- DMA_TO_DEVICE);
|
|
|
- if (unlikely(++k == tx_ring->count))
|
|
|
- k = 0;
|
|
|
- }
|
|
|
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
|
|
|
- msleep(200);
|
|
|
- /* set the start time for the receive */
|
|
|
- time = jiffies;
|
|
|
+ /* reset count of good packets */
|
|
|
good_cnt = 0;
|
|
|
- do {
|
|
|
- /* receive the sent packets */
|
|
|
- dma_sync_single_for_cpu(&pdev->dev,
|
|
|
- rx_ring->rx_buffer_info[l].dma,
|
|
|
- IXGBE_RXBUFFER_2048,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- ret_val = ixgbe_check_lbtest_frame(
|
|
|
- rx_ring->rx_buffer_info[l].skb, 1024);
|
|
|
- if (!ret_val)
|
|
|
+
|
|
|
+ /* place 64 packets on the transmit queue*/
|
|
|
+ for (i = 0; i < 64; i++) {
|
|
|
+ skb_get(skb);
|
|
|
+ tx_ret_val = ixgbe_xmit_frame_ring(skb,
|
|
|
+ adapter->netdev,
|
|
|
+ adapter,
|
|
|
+ tx_ring);
|
|
|
+ if (tx_ret_val == NETDEV_TX_OK)
|
|
|
good_cnt++;
|
|
|
- if (++l == rx_ring->count)
|
|
|
- l = 0;
|
|
|
- /*
|
|
|
- * time + 20 msecs (200 msecs on 2.4) is more than
|
|
|
- * enough time to complete the receives, if it's
|
|
|
- * exceeded, break and error off
|
|
|
- */
|
|
|
- } while (good_cnt < 64 && jiffies < (time + 20));
|
|
|
+ }
|
|
|
+
|
|
|
if (good_cnt != 64) {
|
|
|
- /* ret_val is the same as mis-compare */
|
|
|
- ret_val = 13;
|
|
|
+ ret_val = 12;
|
|
|
break;
|
|
|
}
|
|
|
- if (jiffies >= (time + 20)) {
|
|
|
- /* Error code for time out error */
|
|
|
- ret_val = 14;
|
|
|
+
|
|
|
+ /* allow 200 milliseconds for packets to go from Tx to Rx */
|
|
|
+ msleep(200);
|
|
|
+
|
|
|
+ good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
|
|
|
+ tx_ring, size);
|
|
|
+ if (good_cnt != 64) {
|
|
|
+ ret_val = 13;
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /* free the original skb */
|
|
|
+ kfree_skb(skb);
|
|
|
+
|
|
|
return ret_val;
|
|
|
}
|
|
|
|