|
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
|
|
#else
|
|
#else
|
|
#define DRIVERNAPI "-NAPI"
|
|
#define DRIVERNAPI "-NAPI"
|
|
#endif
|
|
#endif
|
|
-#define DRV_VERSION "1.0.117-k2"DRIVERNAPI
|
|
|
|
|
|
+#define DRV_VERSION "1.0.126-k2"DRIVERNAPI
|
|
char ixgb_driver_version[] = DRV_VERSION;
|
|
char ixgb_driver_version[] = DRV_VERSION;
|
|
static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
|
|
static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
|
|
|
|
|
|
@@ -1287,6 +1287,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
|
struct ixgb_buffer *buffer_info;
|
|
struct ixgb_buffer *buffer_info;
|
|
int len = skb->len;
|
|
int len = skb->len;
|
|
unsigned int offset = 0, size, count = 0, i;
|
|
unsigned int offset = 0, size, count = 0, i;
|
|
|
|
+ unsigned int mss = skb_shinfo(skb)->gso_size;
|
|
|
|
|
|
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
|
|
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
|
|
unsigned int f;
|
|
unsigned int f;
|
|
@@ -1298,6 +1299,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
|
while(len) {
|
|
while(len) {
|
|
buffer_info = &tx_ring->buffer_info[i];
|
|
buffer_info = &tx_ring->buffer_info[i];
|
|
size = min(len, IXGB_MAX_DATA_PER_TXD);
|
|
size = min(len, IXGB_MAX_DATA_PER_TXD);
|
|
|
|
+ /* Workaround for premature desc write-backs
|
|
|
|
+ * in TSO mode. Append 4-byte sentinel desc */
|
|
|
|
+ if (unlikely(mss && !nr_frags && size == len && size > 8))
|
|
|
|
+ size -= 4;
|
|
|
|
+
|
|
buffer_info->length = size;
|
|
buffer_info->length = size;
|
|
WARN_ON(buffer_info->dma != 0);
|
|
WARN_ON(buffer_info->dma != 0);
|
|
buffer_info->dma =
|
|
buffer_info->dma =
|
|
@@ -1324,6 +1330,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
|
|
while(len) {
|
|
while(len) {
|
|
buffer_info = &tx_ring->buffer_info[i];
|
|
buffer_info = &tx_ring->buffer_info[i];
|
|
size = min(len, IXGB_MAX_DATA_PER_TXD);
|
|
size = min(len, IXGB_MAX_DATA_PER_TXD);
|
|
|
|
+
|
|
|
|
+ /* Workaround for premature desc write-backs
|
|
|
|
+ * in TSO mode. Append 4-byte sentinel desc */
|
|
|
|
+ if (unlikely(mss && !nr_frags && size == len
|
|
|
|
+ && size > 8))
|
|
|
|
+ size -= 4;
|
|
|
|
+
|
|
buffer_info->length = size;
|
|
buffer_info->length = size;
|
|
buffer_info->dma =
|
|
buffer_info->dma =
|
|
pci_map_page(adapter->pdev,
|
|
pci_map_page(adapter->pdev,
|
|
@@ -1398,11 +1411,43 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
|
|
IXGB_WRITE_REG(&adapter->hw, TDT, i);
|
|
IXGB_WRITE_REG(&adapter->hw, TDT, i);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
|
|
|
|
+{
|
|
|
|
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
|
|
|
|
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
|
|
|
|
+
|
|
|
|
+ netif_stop_queue(netdev);
|
|
|
|
+ /* Herbert's original patch had:
|
|
|
|
+ * smp_mb__after_netif_stop_queue();
|
|
|
|
+ * but since that doesn't exist yet, just open code it. */
|
|
|
|
+ smp_mb();
|
|
|
|
+
|
|
|
|
+ /* We need to check again in a case another CPU has just
|
|
|
|
+ * made room available. */
|
|
|
|
+ if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
|
|
|
|
+ return -EBUSY;
|
|
|
|
+
|
|
|
|
+ /* A reprieve! */
|
|
|
|
+ netif_start_queue(netdev);
|
|
|
|
+ ++adapter->restart_queue;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int ixgb_maybe_stop_tx(struct net_device *netdev,
|
|
|
|
+ struct ixgb_desc_ring *tx_ring, int size)
|
|
|
|
+{
|
|
|
|
+ if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
|
|
|
|
+ return 0;
|
|
|
|
+ return __ixgb_maybe_stop_tx(netdev, size);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
/* Tx Descriptors needed, worst case */
|
|
/* Tx Descriptors needed, worst case */
|
|
#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
|
|
#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
|
|
(((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
|
|
(((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
|
|
-#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
|
|
|
|
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
|
|
|
|
|
|
+#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
|
|
|
|
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
|
|
|
|
+ + 1 /* one more needed for sentinel TSO workaround */
|
|
|
|
|
|
static int
|
|
static int
|
|
ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
@@ -1430,7 +1475,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
spin_lock_irqsave(&adapter->tx_lock, flags);
|
|
spin_lock_irqsave(&adapter->tx_lock, flags);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
|
|
|
|
|
|
+ if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
|
|
|
|
+ DESC_NEEDED))) {
|
|
netif_stop_queue(netdev);
|
|
netif_stop_queue(netdev);
|
|
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
|
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
|
return NETDEV_TX_BUSY;
|
|
return NETDEV_TX_BUSY;
|
|
@@ -1468,8 +1514,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
|
|
#ifdef NETIF_F_LLTX
|
|
#ifdef NETIF_F_LLTX
|
|
/* Make sure there is space in the ring for the next send. */
|
|
/* Make sure there is space in the ring for the next send. */
|
|
- if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
|
|
|
|
- netif_stop_queue(netdev);
|
|
|
|
|
|
+ ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
|
|
|
|
|
|
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
|
spin_unlock_irqrestore(&adapter->tx_lock, flags);
|
|
|
|
|