|
@@ -1,7 +1,7 @@
|
|
|
/*******************************************************************************
|
|
|
|
|
|
|
|
|
- Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
|
|
|
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify it
|
|
|
under the terms of the GNU General Public License as published by the Free
|
|
@@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
|
|
|
{ "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) },
|
|
|
{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
|
|
|
{ "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) },
|
|
|
+ { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
|
|
|
{ "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) },
|
|
|
{ "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) },
|
|
|
{ "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) },
|
|
@@ -593,7 +594,7 @@ e1000_set_ringparam(struct net_device *netdev,
|
|
|
tx_old = adapter->tx_ring;
|
|
|
rx_old = adapter->rx_ring;
|
|
|
|
|
|
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
|
|
+ if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
|
|
return -EINVAL;
|
|
|
|
|
|
if(netif_running(adapter->netdev))
|
|
@@ -784,8 +785,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
|
|
|
/* Hook up test interrupt handler just for this test */
|
|
|
if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
|
|
|
shared_int = FALSE;
|
|
|
- } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
|
|
|
- netdev->name, netdev)){
|
|
|
+ } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
|
|
|
+ netdev->name, netdev)){
|
|
|
*data = 1;
|
|
|
return -1;
|
|
|
}
|
|
@@ -842,10 +843,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
|
|
|
* test failed.
|
|
|
*/
|
|
|
adapter->test_icr = 0;
|
|
|
- E1000_WRITE_REG(&adapter->hw, IMC,
|
|
|
- (~mask & 0x00007FFF));
|
|
|
- E1000_WRITE_REG(&adapter->hw, ICS,
|
|
|
- (~mask & 0x00007FFF));
|
|
|
+ E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF);
|
|
|
+ E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
|
|
|
msec_delay(10);
|
|
|
|
|
|
if(adapter->test_icr) {
|
|
@@ -919,7 +918,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
|
|
|
|
|
/* Setup Tx descriptor ring and Tx buffers */
|
|
|
|
|
|
- txdr->count = 80;
|
|
|
+ if(!txdr->count)
|
|
|
+ txdr->count = E1000_DEFAULT_TXD;
|
|
|
|
|
|
size = txdr->count * sizeof(struct e1000_buffer);
|
|
|
if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
|
|
@@ -974,7 +974,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
|
|
|
|
|
/* Setup Rx descriptor ring and Rx buffers */
|
|
|
|
|
|
- rxdr->count = 80;
|
|
|
+ if(!rxdr->count)
|
|
|
+ rxdr->count = E1000_DEFAULT_RXD;
|
|
|
|
|
|
size = rxdr->count * sizeof(struct e1000_buffer);
|
|
|
if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
|
|
@@ -1008,7 +1009,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
|
|
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
- if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
|
|
|
+ if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
|
|
|
GFP_KERNEL))) {
|
|
|
ret_val = 6;
|
|
|
goto err_nomem;
|
|
@@ -1310,31 +1311,62 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
|
|
|
struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
|
|
|
struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
- int i, ret_val;
|
|
|
+ int i, j, k, l, lc, good_cnt, ret_val=0;
|
|
|
+ unsigned long time;
|
|
|
|
|
|
E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
|
|
|
|
|
|
- for(i = 0; i < 64; i++) {
|
|
|
- e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024);
|
|
|
- pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma,
|
|
|
- txdr->buffer_info[i].length,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
- }
|
|
|
- E1000_WRITE_REG(&adapter->hw, TDT, i);
|
|
|
-
|
|
|
- msec_delay(200);
|
|
|
-
|
|
|
- i = 0;
|
|
|
- do {
|
|
|
- pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma,
|
|
|
- rxdr->buffer_info[i].length,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
-
|
|
|
- ret_val = e1000_check_lbtest_frame(rxdr->buffer_info[i].skb,
|
|
|
- 1024);
|
|
|
- i++;
|
|
|
- } while (ret_val != 0 && i < 64);
|
|
|
+ /* Calculate the loop count based on the largest descriptor ring
|
|
|
+ * The idea is to wrap the largest ring a number of times using 64
|
|
|
+ * send/receive pairs during each loop
|
|
|
+ */
|
|
|
|
|
|
+ if(rxdr->count <= txdr->count)
|
|
|
+ lc = ((txdr->count / 64) * 2) + 1;
|
|
|
+ else
|
|
|
+ lc = ((rxdr->count / 64) * 2) + 1;
|
|
|
+
|
|
|
+ k = l = 0;
|
|
|
+ for(j = 0; j <= lc; j++) { /* loop count loop */
|
|
|
+ for(i = 0; i < 64; i++) { /* send the packets */
|
|
|
+ e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
|
|
|
+ 1024);
|
|
|
+ pci_dma_sync_single_for_device(pdev,
|
|
|
+ txdr->buffer_info[k].dma,
|
|
|
+ txdr->buffer_info[k].length,
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
+ if(unlikely(++k == txdr->count)) k = 0;
|
|
|
+ }
|
|
|
+ E1000_WRITE_REG(&adapter->hw, TDT, k);
|
|
|
+ msec_delay(200);
|
|
|
+ time = jiffies; /* set the start time for the receive */
|
|
|
+ good_cnt = 0;
|
|
|
+ do { /* receive the sent packets */
|
|
|
+ pci_dma_sync_single_for_cpu(pdev,
|
|
|
+ rxdr->buffer_info[l].dma,
|
|
|
+ rxdr->buffer_info[l].length,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
+
|
|
|
+ ret_val = e1000_check_lbtest_frame(
|
|
|
+ rxdr->buffer_info[l].skb,
|
|
|
+ 1024);
|
|
|
+ if(!ret_val)
|
|
|
+ good_cnt++;
|
|
|
+ if(unlikely(++l == rxdr->count)) l = 0;
|
|
|
+ /* time + 20 msecs (200 msecs on 2.4) is more than
|
|
|
+ * enough time to complete the receives, if it's
|
|
|
+ * exceeded, break and error off
|
|
|
+ */
|
|
|
+ } while (good_cnt < 64 && jiffies < (time + 20));
|
|
|
+ if(good_cnt != 64) {
|
|
|
+ ret_val = 13; /* ret_val is the same as mis-compare */
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if(jiffies >= (time + 2)) {
|
|
|
+ ret_val = 14; /* error code for time out error */
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } /* end loop count loop */
|
|
|
return ret_val;
|
|
|
}
|
|
|
|
|
@@ -1354,13 +1386,12 @@ static int
|
|
|
e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
|
|
|
{
|
|
|
*data = 0;
|
|
|
-
|
|
|
if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
|
|
|
int i = 0;
|
|
|
adapter->hw.serdes_link_down = TRUE;
|
|
|
|
|
|
- /* on some blade server designs link establishment */
|
|
|
- /* could take as long as 2-3 minutes. */
|
|
|
+ /* On some blade server designs, link establishment
|
|
|
+ * could take as long as 2-3 minutes */
|
|
|
do {
|
|
|
e1000_check_for_link(&adapter->hw);
|
|
|
if (adapter->hw.serdes_link_down == FALSE)
|
|
@@ -1368,9 +1399,11 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
|
|
|
msec_delay(20);
|
|
|
} while (i++ < 3750);
|
|
|
|
|
|
- *data = 1;
|
|
|
+ *data = 1;
|
|
|
} else {
|
|
|
e1000_check_for_link(&adapter->hw);
|
|
|
+ if(adapter->hw.autoneg) /* if auto_neg is set wait for it */
|
|
|
+ msec_delay(4000);
|
|
|
|
|
|
if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
|
|
|
*data = 1;
|