|
@@ -31,7 +31,7 @@
|
|
|
|
|
|
char e1000_driver_name[] = "e1000";
|
|
|
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
|
|
|
-#define DRV_VERSION "7.3.20-k3-NAPI"
|
|
|
+#define DRV_VERSION "7.3.21-k2-NAPI"
|
|
|
const char e1000_driver_version[] = DRV_VERSION;
|
|
|
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
|
|
|
|
|
@@ -1048,8 +1048,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|
|
if (pci_using_dac)
|
|
|
netdev->features |= NETIF_F_HIGHDMA;
|
|
|
|
|
|
- netdev->features |= NETIF_F_LLTX;
|
|
|
-
|
|
|
netdev->vlan_features |= NETIF_F_TSO;
|
|
|
netdev->vlan_features |= NETIF_F_TSO6;
|
|
|
netdev->vlan_features |= NETIF_F_HW_CSUM;
|
|
@@ -1368,8 +1366,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- spin_lock_init(&adapter->tx_queue_lock);
|
|
|
-
|
|
|
/* Explicitly disable IRQ since the NIC can be in any state. */
|
|
|
e1000_irq_disable(adapter);
|
|
|
|
|
@@ -1624,7 +1620,6 @@ setup_tx_desc_die:
|
|
|
|
|
|
txdr->next_to_use = 0;
|
|
|
txdr->next_to_clean = 0;
|
|
|
- spin_lock_init(&txdr->tx_lock);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -3185,7 +3180,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
|
|
|
unsigned int tx_flags = 0;
|
|
|
unsigned int len = skb->len - skb->data_len;
|
|
|
- unsigned long flags;
|
|
|
unsigned int nr_frags;
|
|
|
unsigned int mss;
|
|
|
int count = 0;
|
|
@@ -3290,22 +3284,15 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
(hw->mac_type == e1000_82573))
|
|
|
e1000_transfer_dhcp_info(adapter, skb);
|
|
|
|
|
|
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
|
|
|
- /* Collision - tell upper layer to requeue */
|
|
|
- return NETDEV_TX_LOCKED;
|
|
|
-
|
|
|
/* need: count + 2 desc gap to keep tail from touching
|
|
|
* head, otherwise try next time */
|
|
|
- if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
|
|
|
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
|
|
|
+ if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
|
|
|
return NETDEV_TX_BUSY;
|
|
|
- }
|
|
|
|
|
|
if (unlikely(hw->mac_type == e1000_82547)) {
|
|
|
if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
|
|
|
netif_stop_queue(netdev);
|
|
|
mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
|
|
|
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
}
|
|
@@ -3320,7 +3307,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
tso = e1000_tso(adapter, tx_ring, skb);
|
|
|
if (tso < 0) {
|
|
|
dev_kfree_skb_any(skb);
|
|
|
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
@@ -3345,7 +3331,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
/* Make sure there is space in the ring for the next send. */
|
|
|
e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
|
|
|
|
|
|
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
@@ -3773,15 +3758,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
|
|
|
|
|
|
adapter = netdev_priv(poll_dev);
|
|
|
|
|
|
- /* e1000_clean is called per-cpu. This lock protects
|
|
|
- * tx_ring[0] from being cleaned by multiple cpus
|
|
|
- * simultaneously. A failure obtaining the lock means
|
|
|
- * tx_ring[0] is currently being cleaned anyway. */
|
|
|
- if (spin_trylock(&adapter->tx_queue_lock)) {
|
|
|
- tx_cleaned = e1000_clean_tx_irq(adapter,
|
|
|
- &adapter->tx_ring[0]);
|
|
|
- spin_unlock(&adapter->tx_queue_lock);
|
|
|
- }
|
|
|
+ tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
|
|
|
|
|
|
adapter->clean_rx(adapter, &adapter->rx_ring[0],
|
|
|
&work_done, budget);
|