|
@@ -47,7 +47,7 @@
|
|
|
|
|
|
#include "e1000.h"
|
|
|
|
|
|
-#define DRV_VERSION "0.3.3.3-k6"
|
|
|
+#define DRV_VERSION "0.3.3.4-k2"
|
|
|
char e1000e_driver_name[] = "e1000e";
|
|
|
const char e1000e_driver_version[] = DRV_VERSION;
|
|
|
|
|
@@ -1698,7 +1698,6 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
|
|
|
|
|
|
tx_ring->next_to_use = 0;
|
|
|
tx_ring->next_to_clean = 0;
|
|
|
- spin_lock_init(&adapter->tx_queue_lock);
|
|
|
|
|
|
return 0;
|
|
|
err:
|
|
@@ -2007,16 +2006,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
|
|
|
!(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
|
|
|
goto clean_rx;
|
|
|
|
|
|
- /*
|
|
|
- * e1000_clean is called per-cpu. This lock protects
|
|
|
- * tx_ring from being cleaned by multiple cpus
|
|
|
- * simultaneously. A failure obtaining the lock means
|
|
|
- * tx_ring is currently being cleaned anyway.
|
|
|
- */
|
|
|
- if (spin_trylock(&adapter->tx_queue_lock)) {
|
|
|
- tx_cleaned = e1000_clean_tx_irq(adapter);
|
|
|
- spin_unlock(&adapter->tx_queue_lock);
|
|
|
- }
|
|
|
+ tx_cleaned = e1000_clean_tx_irq(adapter);
|
|
|
|
|
|
clean_rx:
|
|
|
adapter->clean_rx(adapter, &work_done, budget);
|
|
@@ -2922,8 +2912,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
|
|
|
if (e1000_alloc_queues(adapter))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- spin_lock_init(&adapter->tx_queue_lock);
|
|
|
-
|
|
|
/* Explicitly disable IRQ since the NIC can be in any state. */
|
|
|
e1000_irq_disable(adapter);
|
|
|
|
|
@@ -4069,7 +4057,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
|
|
|
unsigned int tx_flags = 0;
|
|
|
unsigned int len = skb->len - skb->data_len;
|
|
|
- unsigned long irq_flags;
|
|
|
unsigned int nr_frags;
|
|
|
unsigned int mss;
|
|
|
int count = 0;
|
|
@@ -4138,18 +4125,12 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
if (adapter->hw.mac.tx_pkt_filtering)
|
|
|
e1000_transfer_dhcp_info(adapter, skb);
|
|
|
|
|
|
- if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
|
|
|
- /* Collision - tell upper layer to requeue */
|
|
|
- return NETDEV_TX_LOCKED;
|
|
|
-
|
|
|
/*
|
|
|
* need: count + 2 desc gap to keep tail from touching
|
|
|
* head, otherwise try next time
|
|
|
*/
|
|
|
- if (e1000_maybe_stop_tx(netdev, count + 2)) {
|
|
|
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
|
|
|
+ if (e1000_maybe_stop_tx(netdev, count + 2))
|
|
|
return NETDEV_TX_BUSY;
|
|
|
- }
|
|
|
|
|
|
if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
|
|
|
tx_flags |= E1000_TX_FLAGS_VLAN;
|
|
@@ -4161,7 +4142,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
tso = e1000_tso(adapter, skb);
|
|
|
if (tso < 0) {
|
|
|
dev_kfree_skb_any(skb);
|
|
|
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
@@ -4182,7 +4162,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
if (count < 0) {
|
|
|
/* handle pci_map_single() error in e1000_tx_map */
|
|
|
dev_kfree_skb_any(skb);
|
|
|
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
@@ -4193,7 +4172,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
/* Make sure there is space in the ring for the next send. */
|
|
|
e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
|
|
|
|
|
|
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
@@ -4922,12 +4900,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|
|
if (pci_using_dac)
|
|
|
netdev->features |= NETIF_F_HIGHDMA;
|
|
|
|
|
|
- /*
|
|
|
- * We should not be using LLTX anymore, but we are still Tx faster with
|
|
|
- * it.
|
|
|
- */
|
|
|
- netdev->features |= NETIF_F_LLTX;
|
|
|
-
|
|
|
if (e1000e_enable_mng_pass_thru(&adapter->hw))
|
|
|
adapter->flags |= FLAG_MNG_PT_ENABLED;
|
|
|
|