|
@@ -122,7 +122,7 @@ static void igb_set_rx_mode(struct net_device *);
|
|
|
static void igb_update_phy_info(unsigned long);
|
|
|
static void igb_watchdog(unsigned long);
|
|
|
static void igb_watchdog_task(struct work_struct *);
|
|
|
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
|
|
|
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
|
|
|
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
|
|
|
struct rtnl_link_stats64 *stats);
|
|
|
static int igb_change_mtu(struct net_device *, int);
|
|
@@ -138,7 +138,7 @@ static void igb_setup_dca(struct igb_adapter *);
|
|
|
#endif /* CONFIG_IGB_DCA */
|
|
|
static bool igb_clean_tx_irq(struct igb_q_vector *);
|
|
|
static int igb_poll(struct napi_struct *, int);
|
|
|
-static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int);
|
|
|
+static bool igb_clean_rx_irq(struct igb_q_vector *, int);
|
|
|
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
|
|
|
static void igb_tx_timeout(struct net_device *);
|
|
|
static void igb_reset_task(struct work_struct *);
|
|
@@ -1436,7 +1436,7 @@ static void igb_configure(struct igb_adapter *adapter)
|
|
|
* next_to_use != next_to_clean */
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
struct igb_ring *ring = adapter->rx_ring[i];
|
|
|
- igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
|
|
|
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1784,7 +1784,7 @@ static int igb_set_features(struct net_device *netdev, u32 features)
|
|
|
static const struct net_device_ops igb_netdev_ops = {
|
|
|
.ndo_open = igb_open,
|
|
|
.ndo_stop = igb_close,
|
|
|
- .ndo_start_xmit = igb_xmit_frame_adv,
|
|
|
+ .ndo_start_xmit = igb_xmit_frame,
|
|
|
.ndo_get_stats64 = igb_get_stats64,
|
|
|
.ndo_set_rx_mode = igb_set_rx_mode,
|
|
|
.ndo_set_mac_address = igb_set_mac,
|
|
@@ -3955,8 +3955,8 @@ set_itr_now:
|
|
|
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
|
|
|
#define IGB_TX_FLAGS_VLAN_SHIFT 16
|
|
|
|
|
|
-static inline int igb_tso_adv(struct igb_ring *tx_ring,
|
|
|
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
|
|
|
+static inline int igb_tso(struct igb_ring *tx_ring,
|
|
|
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
|
|
|
{
|
|
|
struct e1000_adv_tx_context_desc *context_desc;
|
|
|
unsigned int i;
|
|
@@ -4035,8 +4035,8 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
|
|
|
- struct sk_buff *skb, u32 tx_flags)
|
|
|
+static inline bool igb_tx_csum(struct igb_ring *tx_ring,
|
|
|
+ struct sk_buff *skb, u32 tx_flags)
|
|
|
{
|
|
|
struct e1000_adv_tx_context_desc *context_desc;
|
|
|
struct device *dev = tx_ring->dev;
|
|
@@ -4120,8 +4120,8 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
|
|
|
#define IGB_MAX_TXD_PWR 16
|
|
|
#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
|
|
|
|
|
|
-static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
|
|
|
- unsigned int first)
|
|
|
+static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
|
|
|
+ unsigned int first)
|
|
|
{
|
|
|
struct igb_buffer *buffer_info;
|
|
|
struct device *dev = tx_ring->dev;
|
|
@@ -4196,9 +4196,9 @@ dma_error:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
|
|
|
- u32 tx_flags, int count, u32 paylen,
|
|
|
- u8 hdr_len)
|
|
|
+static inline void igb_tx_queue(struct igb_ring *tx_ring,
|
|
|
+ u32 tx_flags, int count, u32 paylen,
|
|
|
+ u8 hdr_len)
|
|
|
{
|
|
|
union e1000_adv_tx_desc *tx_desc;
|
|
|
struct igb_buffer *buffer_info;
|
|
@@ -4296,8 +4296,8 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
|
|
|
return __igb_maybe_stop_tx(tx_ring, size);
|
|
|
}
|
|
|
|
|
|
-netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
|
|
|
- struct igb_ring *tx_ring)
|
|
|
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
|
|
|
+ struct igb_ring *tx_ring)
|
|
|
{
|
|
|
int tso = 0, count;
|
|
|
u32 tx_flags = 0;
|
|
@@ -4329,7 +4329,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
|
|
|
|
|
|
first = tx_ring->next_to_use;
|
|
|
if (skb_is_gso(skb)) {
|
|
|
- tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
|
|
|
+ tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len);
|
|
|
|
|
|
if (tso < 0) {
|
|
|
dev_kfree_skb_any(skb);
|
|
@@ -4339,7 +4339,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
|
|
|
|
|
|
if (tso)
|
|
|
tx_flags |= IGB_TX_FLAGS_TSO;
|
|
|
- else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
|
|
|
+ else if (igb_tx_csum(tx_ring, skb, tx_flags) &&
|
|
|
(skb->ip_summed == CHECKSUM_PARTIAL))
|
|
|
tx_flags |= IGB_TX_FLAGS_CSUM;
|
|
|
|
|
@@ -4347,7 +4347,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
|
|
|
* count reflects descriptors mapped, if 0 or less then mapping error
|
|
|
* has occurred and we need to rewind the descriptor queue
|
|
|
*/
|
|
|
- count = igb_tx_map_adv(tx_ring, skb, first);
|
|
|
+ count = igb_tx_map(tx_ring, skb, first);
|
|
|
if (!count) {
|
|
|
dev_kfree_skb_any(skb);
|
|
|
tx_ring->buffer_info[first].time_stamp = 0;
|
|
@@ -4355,7 +4355,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
- igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
|
|
|
+ igb_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
|
|
|
|
|
|
/* Make sure there is space in the ring for the next send. */
|
|
|
igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
|
|
@@ -4363,8 +4363,8 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
|
|
|
- struct net_device *netdev)
|
|
|
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
|
|
|
+ struct net_device *netdev)
|
|
|
{
|
|
|
struct igb_adapter *adapter = netdev_priv(netdev);
|
|
|
struct igb_ring *tx_ring;
|
|
@@ -4387,7 +4387,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
|
|
|
* to a flow. Right now, performance is impacted slightly negatively
|
|
|
* if using multiple tx queues. If the stack breaks away from a
|
|
|
* single qdisc implementation, we can look at this again. */
|
|
|
- return igb_xmit_frame_ring_adv(skb, tx_ring);
|
|
|
+ return igb_xmit_frame_ring(skb, tx_ring);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -5491,7 +5491,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
|
|
|
clean_complete = !!igb_clean_tx_irq(q_vector);
|
|
|
|
|
|
if (q_vector->rx_ring)
|
|
|
- clean_complete &= igb_clean_rx_irq_adv(q_vector, budget);
|
|
|
+ clean_complete &= igb_clean_rx_irq(q_vector, budget);
|
|
|
|
|
|
/* If all work not completed, return budget and keep polling */
|
|
|
if (!clean_complete)
|
|
@@ -5670,8 +5670,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
|
|
|
return count < tx_ring->count;
|
|
|
}
|
|
|
|
|
|
-static inline void igb_rx_checksum_adv(struct igb_ring *ring,
|
|
|
- u32 status_err, struct sk_buff *skb)
|
|
|
+static inline void igb_rx_checksum(struct igb_ring *ring,
|
|
|
+ u32 status_err, struct sk_buff *skb)
|
|
|
{
|
|
|
skb_checksum_none_assert(skb);
|
|
|
|
|
@@ -5750,7 +5750,7 @@ static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
|
|
|
return hlen;
|
|
|
}
|
|
|
|
|
|
-static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, int budget)
|
|
|
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
|
|
|
{
|
|
|
struct igb_ring *rx_ring = q_vector->rx_ring;
|
|
|
union e1000_adv_rx_desc *rx_desc;
|
|
@@ -5836,7 +5836,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, int budget)
|
|
|
total_bytes += skb->len;
|
|
|
total_packets++;
|
|
|
|
|
|
- igb_rx_checksum_adv(rx_ring, staterr, skb);
|
|
|
+ igb_rx_checksum(rx_ring, staterr, skb);
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
|
|
|
|
|
@@ -5855,7 +5855,7 @@ next_desc:
|
|
|
cleaned_count++;
|
|
|
/* return some buffers to hardware, one at a time is too slow */
|
|
|
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
|
|
|
- igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
|
|
|
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
|
|
|
cleaned_count = 0;
|
|
|
}
|
|
|
|
|
@@ -5873,7 +5873,7 @@ next_desc:
|
|
|
rx_ring->total_bytes += total_bytes;
|
|
|
|
|
|
if (cleaned_count)
|
|
|
- igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
|
|
|
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
|
|
|
|
|
|
return !!budget;
|
|
|
}
|
|
@@ -5946,10 +5946,10 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
|
|
|
+ * igb_alloc_rx_buffers - Replace used receive buffers; packet split
|
|
|
* @adapter: address of board private structure
|
|
|
**/
|
|
|
-void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, u16 cleaned_count)
|
|
|
+void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
|
|
|
{
|
|
|
union e1000_adv_rx_desc *rx_desc;
|
|
|
struct igb_buffer *bi;
|