|
@@ -772,15 +772,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-#define IXGBE_MAX_TXD_PWR 14
|
|
|
|
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
|
|
|
|
-
|
|
|
|
-/* Tx Descriptors needed, worst case */
|
|
|
|
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
|
|
|
|
- (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
|
|
|
|
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
|
|
|
|
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
|
|
* ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
|
|
* @adapter: driver private struct
|
|
* @adapter: driver private struct
|
|
@@ -6832,14 +6823,34 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
|
struct ixgbe_adapter *adapter,
|
|
struct ixgbe_adapter *adapter,
|
|
struct ixgbe_ring *tx_ring)
|
|
struct ixgbe_ring *tx_ring)
|
|
{
|
|
{
|
|
- unsigned int tx_flags = 0;
|
|
|
|
int tso;
|
|
int tso;
|
|
- u16 count = 0;
|
|
|
|
|
|
+ u32 tx_flags = 0;
|
|
|
|
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
|
|
|
|
+ unsigned short f;
|
|
|
|
+#endif
|
|
u16 first;
|
|
u16 first;
|
|
- unsigned int f;
|
|
|
|
|
|
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
|
|
__be16 protocol;
|
|
__be16 protocol;
|
|
u8 hdr_len = 0;
|
|
u8 hdr_len = 0;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
|
|
|
|
+ * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
|
|
|
|
+ * + 2 desc gap to keep tail from touching head,
|
|
|
|
+ * + 1 desc for context descriptor,
|
|
|
|
+ * otherwise try next time
|
|
|
|
+ */
|
|
|
|
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
|
|
|
|
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
|
|
|
|
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
|
|
|
|
+#else
|
|
|
|
+ count += skb_shinfo(skb)->nr_frags;
|
|
|
|
+#endif
|
|
|
|
+ if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
|
|
|
|
+ tx_ring->tx_stats.tx_busy++;
|
|
|
|
+ return NETDEV_TX_BUSY;
|
|
|
|
+ }
|
|
|
|
+
|
|
protocol = vlan_get_protocol(skb);
|
|
protocol = vlan_get_protocol(skb);
|
|
|
|
|
|
if (vlan_tx_tag_present(skb)) {
|
|
if (vlan_tx_tag_present(skb)) {
|
|
@@ -6863,25 +6874,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
|
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
|
|
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
|
|
(protocol == htons(ETH_P_FCOE)))
|
|
(protocol == htons(ETH_P_FCOE)))
|
|
tx_flags |= IXGBE_TX_FLAGS_FCOE;
|
|
tx_flags |= IXGBE_TX_FLAGS_FCOE;
|
|
-#endif
|
|
|
|
-
|
|
|
|
- /* four things can cause us to need a context descriptor */
|
|
|
|
- if (skb_is_gso(skb) ||
|
|
|
|
- (skb->ip_summed == CHECKSUM_PARTIAL) ||
|
|
|
|
- (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
|
|
|
|
- (tx_flags & IXGBE_TX_FLAGS_FCOE))
|
|
|
|
- count++;
|
|
|
|
-
|
|
|
|
- count += TXD_USE_COUNT(skb_headlen(skb));
|
|
|
|
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
|
|
|
|
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
|
|
|
|
-
|
|
|
|
- if (ixgbe_maybe_stop_tx(tx_ring, count)) {
|
|
|
|
- tx_ring->tx_stats.tx_busy++;
|
|
|
|
- return NETDEV_TX_BUSY;
|
|
|
|
- }
|
|
|
|
|
|
|
|
|
|
+#endif
|
|
|
|
+ /* record the location of the first descriptor for this packet */
|
|
first = tx_ring->next_to_use;
|
|
first = tx_ring->next_to_use;
|
|
|
|
+
|
|
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
|
|
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
|
|
#ifdef IXGBE_FCOE
|
|
#ifdef IXGBE_FCOE
|
|
/* setup tx offload for FCoE */
|
|
/* setup tx offload for FCoE */
|