|
@@ -5968,12 +5968,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
|
|
|
u32 type_tucmd = 0;
|
|
|
|
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL) {
|
|
|
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
|
|
|
- if (unlikely(skb->no_fcs))
|
|
|
- first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
|
|
|
- if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
|
|
|
- return;
|
|
|
- }
|
|
|
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
|
|
|
+ !(first->tx_flags & IXGBE_TX_FLAGS_CC))
|
|
|
+ return;
|
|
|
} else {
|
|
|
u8 l4_hdr = 0;
|
|
|
switch (first->protocol) {
|
|
@@ -6031,30 +6028,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
|
|
|
type_tucmd, mss_l4len_idx);
|
|
|
}
|
|
|
|
|
|
-static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
|
|
|
+#define IXGBE_SET_FLAG(_input, _flag, _result) \
|
|
|
+ ((_flag <= _result) ? \
|
|
|
+ ((u32)(_input & _flag) * (_result / _flag)) : \
|
|
|
+ ((u32)(_input & _flag) / (_flag / _result)))
|
|
|
+
|
|
|
+static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
|
|
|
{
|
|
|
/* set type for advanced descriptor with frame checksum insertion */
|
|
|
- __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
|
|
|
- IXGBE_ADVTXD_DCMD_DEXT);
|
|
|
+ u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
|
|
|
+ IXGBE_ADVTXD_DCMD_DEXT |
|
|
|
+ IXGBE_ADVTXD_DCMD_IFCS;
|
|
|
|
|
|
/* set HW vlan bit if vlan is present */
|
|
|
- if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
|
|
|
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
|
|
|
-
|
|
|
- if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
|
|
|
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
|
|
|
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
|
|
|
+ IXGBE_ADVTXD_DCMD_VLE);
|
|
|
|
|
|
/* set segmentation enable bits for TSO/FSO */
|
|
|
-#ifdef IXGBE_FCOE
|
|
|
- if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
|
|
|
-#else
|
|
|
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
|
|
|
-#endif
|
|
|
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
|
|
|
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
|
|
|
+ IXGBE_ADVTXD_DCMD_TSE);
|
|
|
+
|
|
|
+ /* set timestamp bit if present */
|
|
|
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
|
|
|
+ IXGBE_ADVTXD_MAC_TSTAMP);
|
|
|
|
|
|
/* insert frame checksum */
|
|
|
- if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
|
|
|
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
|
|
|
+ cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
|
|
|
|
|
|
return cmd_type;
|
|
|
}
|
|
@@ -6062,28 +6061,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
|
|
|
static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
|
|
|
u32 tx_flags, unsigned int paylen)
|
|
|
{
|
|
|
- __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
|
|
|
+ u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
|
|
|
|
|
|
/* enable L4 checksum for TSO and TX checksum offload */
|
|
|
- if (tx_flags & IXGBE_TX_FLAGS_CSUM)
|
|
|
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
|
|
|
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
|
|
|
+ IXGBE_TX_FLAGS_CSUM,
|
|
|
+ IXGBE_ADVTXD_POPTS_TXSM);
|
|
|
|
|
|
/* enble IPv4 checksum for TSO */
|
|
|
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
|
|
|
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
|
|
|
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
|
|
|
+ IXGBE_TX_FLAGS_IPV4,
|
|
|
+ IXGBE_ADVTXD_POPTS_IXSM);
|
|
|
|
|
|
/*
|
|
|
* Check Context must be set if Tx switch is enabled, which it
|
|
|
* always is for case where virtual functions are running
|
|
|
*/
|
|
|
-#ifdef IXGBE_FCOE
|
|
|
- if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE))
|
|
|
-#else
|
|
|
- if (tx_flags & IXGBE_TX_FLAGS_TXSW)
|
|
|
-#endif
|
|
|
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
|
|
|
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
|
|
|
+ IXGBE_TX_FLAGS_CC,
|
|
|
+ IXGBE_ADVTXD_CC);
|
|
|
|
|
|
- tx_desc->read.olinfo_status = olinfo_status;
|
|
|
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
|
|
|
}
|
|
|
|
|
|
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
|
|
@@ -6102,13 +6100,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
|
|
unsigned int size = skb_headlen(skb);
|
|
|
unsigned int paylen = skb->len - hdr_len;
|
|
|
u32 tx_flags = first->tx_flags;
|
|
|
- __le32 cmd_type;
|
|
|
+ u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
|
|
|
u16 i = tx_ring->next_to_use;
|
|
|
|
|
|
tx_desc = IXGBE_TX_DESC(tx_ring, i);
|
|
|
|
|
|
ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
|
|
|
- cmd_type = ixgbe_tx_cmd_type(tx_flags);
|
|
|
|
|
|
#ifdef IXGBE_FCOE
|
|
|
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
|
|
@@ -6134,7 +6131,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
|
|
for (;;) {
|
|
|
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
|
|
|
tx_desc->read.cmd_type_len =
|
|
|
- cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
|
|
|
+ cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
|
|
|
|
|
|
i++;
|
|
|
tx_desc++;
|
|
@@ -6153,7 +6150,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
|
|
if (likely(!data_len))
|
|
|
break;
|
|
|
|
|
|
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
|
|
|
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
|
|
|
|
|
|
i++;
|
|
|
tx_desc++;
|
|
@@ -6185,8 +6182,8 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
|
|
}
|
|
|
|
|
|
/* write last descriptor with RS and EOP bits */
|
|
|
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
|
|
|
- tx_desc->read.cmd_type_len = cmd_type;
|
|
|
+ cmd_type |= size | IXGBE_TXD_CMD;
|
|
|
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
|
|
|
|
|
|
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
|
|
|
|
|
@@ -6447,7 +6444,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
|
|
* Tx switch had been disabled.
|
|
|
*/
|
|
|
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
|
|
|
- tx_flags |= IXGBE_TX_FLAGS_TXSW;
|
|
|
+ tx_flags |= IXGBE_TX_FLAGS_CC;
|
|
|
|
|
|
#endif
|
|
|
/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
|