|
@@ -96,7 +96,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
|
|
|
static void igb_free_all_tx_resources(struct igb_adapter *);
|
|
|
static void igb_free_all_rx_resources(struct igb_adapter *);
|
|
|
static void igb_setup_mrqc(struct igb_adapter *);
|
|
|
-void igb_update_stats(struct igb_adapter *);
|
|
|
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
|
|
|
static void __devexit igb_remove(struct pci_dev *pdev);
|
|
|
static int igb_sw_init(struct igb_adapter *);
|
|
@@ -113,7 +112,8 @@ static void igb_update_phy_info(unsigned long);
|
|
|
static void igb_watchdog(unsigned long);
|
|
|
static void igb_watchdog_task(struct work_struct *);
|
|
|
static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
|
|
|
-static struct net_device_stats *igb_get_stats(struct net_device *);
|
|
|
+static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
|
|
|
+ struct rtnl_link_stats64 *stats);
|
|
|
static int igb_change_mtu(struct net_device *, int);
|
|
|
static int igb_set_mac(struct net_device *, void *);
|
|
|
static void igb_set_uta(struct igb_adapter *adapter);
|
|
@@ -1536,7 +1536,9 @@ void igb_down(struct igb_adapter *adapter)
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
|
/* record the stats before reset*/
|
|
|
- igb_update_stats(adapter);
|
|
|
+ spin_lock(&adapter->stats64_lock);
|
|
|
+ igb_update_stats(adapter, &adapter->stats64);
|
|
|
+ spin_unlock(&adapter->stats64_lock);
|
|
|
|
|
|
adapter->link_speed = 0;
|
|
|
adapter->link_duplex = 0;
|
|
@@ -1689,7 +1691,7 @@ static const struct net_device_ops igb_netdev_ops = {
|
|
|
.ndo_open = igb_open,
|
|
|
.ndo_stop = igb_close,
|
|
|
.ndo_start_xmit = igb_xmit_frame_adv,
|
|
|
- .ndo_get_stats = igb_get_stats,
|
|
|
+ .ndo_get_stats64 = igb_get_stats64,
|
|
|
.ndo_set_rx_mode = igb_set_rx_mode,
|
|
|
.ndo_set_multicast_list = igb_set_rx_mode,
|
|
|
.ndo_set_mac_address = igb_set_mac,
|
|
@@ -2276,6 +2278,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
|
|
|
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
|
|
|
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
|
|
|
|
|
|
+ spin_lock_init(&adapter->stats64_lock);
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
if (hw->mac.type == e1000_82576)
|
|
|
adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
|
|
@@ -3483,7 +3486,9 @@ static void igb_watchdog_task(struct work_struct *work)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- igb_update_stats(adapter);
|
|
|
+ spin_lock(&adapter->stats64_lock);
|
|
|
+ igb_update_stats(adapter, &adapter->stats64);
|
|
|
+ spin_unlock(&adapter->stats64_lock);
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
struct igb_ring *tx_ring = adapter->tx_ring[i];
|
|
@@ -3550,6 +3555,8 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
|
|
|
int new_val = q_vector->itr_val;
|
|
|
int avg_wire_size = 0;
|
|
|
struct igb_adapter *adapter = q_vector->adapter;
|
|
|
+ struct igb_ring *ring;
|
|
|
+ unsigned int packets;
|
|
|
|
|
|
/* For non-gigabit speeds, just fix the interrupt rate at 4000
|
|
|
* ints/sec - ITR timer value of 120 ticks.
|
|
@@ -3559,16 +3566,21 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
|
|
|
goto set_itr_val;
|
|
|
}
|
|
|
|
|
|
- if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
|
|
|
- struct igb_ring *ring = q_vector->rx_ring;
|
|
|
- avg_wire_size = ring->total_bytes / ring->total_packets;
|
|
|
+ ring = q_vector->rx_ring;
|
|
|
+ if (ring) {
|
|
|
+ packets = ACCESS_ONCE(ring->total_packets);
|
|
|
+
|
|
|
+ if (packets)
|
|
|
+ avg_wire_size = ring->total_bytes / packets;
|
|
|
}
|
|
|
|
|
|
- if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
|
|
|
- struct igb_ring *ring = q_vector->tx_ring;
|
|
|
- avg_wire_size = max_t(u32, avg_wire_size,
|
|
|
- (ring->total_bytes /
|
|
|
- ring->total_packets));
|
|
|
+ ring = q_vector->tx_ring;
|
|
|
+ if (ring) {
|
|
|
+ packets = ACCESS_ONCE(ring->total_packets);
|
|
|
+
|
|
|
+ if (packets)
|
|
|
+ avg_wire_size = max_t(u32, avg_wire_size,
|
|
|
+ ring->total_bytes / packets);
|
|
|
}
|
|
|
|
|
|
/* if avg_wire_size isn't set no work was done */
|
|
@@ -4077,7 +4089,11 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
|
|
|
|
|
|
/* A reprieve! */
|
|
|
netif_wake_subqueue(netdev, tx_ring->queue_index);
|
|
|
- tx_ring->tx_stats.restart_queue++;
|
|
|
+
|
|
|
+ u64_stats_update_begin(&tx_ring->tx_syncp2);
|
|
|
+ tx_ring->tx_stats.restart_queue2++;
|
|
|
+ u64_stats_update_end(&tx_ring->tx_syncp2);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -4214,16 +4230,22 @@ static void igb_reset_task(struct work_struct *work)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * igb_get_stats - Get System Network Statistics
|
|
|
+ * igb_get_stats64 - Get System Network Statistics
|
|
|
* @netdev: network interface device structure
|
|
|
+ * @stats: rtnl_link_stats64 pointer
|
|
|
*
|
|
|
- * Returns the address of the device statistics structure.
|
|
|
- * The statistics are actually updated from the timer callback.
|
|
|
**/
|
|
|
-static struct net_device_stats *igb_get_stats(struct net_device *netdev)
|
|
|
+static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
|
|
|
+ struct rtnl_link_stats64 *stats)
|
|
|
{
|
|
|
- /* only return the current stats */
|
|
|
- return &netdev->stats;
|
|
|
+ struct igb_adapter *adapter = netdev_priv(netdev);
|
|
|
+
|
|
|
+ spin_lock(&adapter->stats64_lock);
|
|
|
+ igb_update_stats(adapter, &adapter->stats64);
|
|
|
+ memcpy(stats, &adapter->stats64, sizeof(*stats));
|
|
|
+ spin_unlock(&adapter->stats64_lock);
|
|
|
+
|
|
|
+ return stats;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -4305,15 +4327,17 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
* @adapter: board private structure
|
|
|
**/
|
|
|
|
|
|
-void igb_update_stats(struct igb_adapter *adapter)
|
|
|
+void igb_update_stats(struct igb_adapter *adapter,
|
|
|
+ struct rtnl_link_stats64 *net_stats)
|
|
|
{
|
|
|
- struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
u32 reg, mpc;
|
|
|
u16 phy_tmp;
|
|
|
int i;
|
|
|
u64 bytes, packets;
|
|
|
+ unsigned int start;
|
|
|
+ u64 _bytes, _packets;
|
|
|
|
|
|
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
|
|
|
|
|
@@ -4331,10 +4355,17 @@ void igb_update_stats(struct igb_adapter *adapter)
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
|
|
|
struct igb_ring *ring = adapter->rx_ring[i];
|
|
|
+
|
|
|
ring->rx_stats.drops += rqdpc_tmp;
|
|
|
net_stats->rx_fifo_errors += rqdpc_tmp;
|
|
|
- bytes += ring->rx_stats.bytes;
|
|
|
- packets += ring->rx_stats.packets;
|
|
|
+
|
|
|
+ do {
|
|
|
+ start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
|
|
|
+ _bytes = ring->rx_stats.bytes;
|
|
|
+ _packets = ring->rx_stats.packets;
|
|
|
+ } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
|
|
|
+ bytes += _bytes;
|
|
|
+ packets += _packets;
|
|
|
}
|
|
|
|
|
|
net_stats->rx_bytes = bytes;
|
|
@@ -4344,8 +4375,13 @@ void igb_update_stats(struct igb_adapter *adapter)
|
|
|
packets = 0;
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
struct igb_ring *ring = adapter->tx_ring[i];
|
|
|
- bytes += ring->tx_stats.bytes;
|
|
|
- packets += ring->tx_stats.packets;
|
|
|
+ do {
|
|
|
+ start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
|
|
|
+ _bytes = ring->tx_stats.bytes;
|
|
|
+ _packets = ring->tx_stats.packets;
|
|
|
+ } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
|
|
|
+ bytes += _bytes;
|
|
|
+ packets += _packets;
|
|
|
}
|
|
|
net_stats->tx_bytes = bytes;
|
|
|
net_stats->tx_packets = packets;
|
|
@@ -5397,7 +5433,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
|
|
|
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
|
|
|
!(test_bit(__IGB_DOWN, &adapter->state))) {
|
|
|
netif_wake_subqueue(netdev, tx_ring->queue_index);
|
|
|
+
|
|
|
+ u64_stats_update_begin(&tx_ring->tx_syncp);
|
|
|
tx_ring->tx_stats.restart_queue++;
|
|
|
+ u64_stats_update_end(&tx_ring->tx_syncp);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -5437,8 +5476,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
|
|
|
}
|
|
|
tx_ring->total_bytes += total_bytes;
|
|
|
tx_ring->total_packets += total_packets;
|
|
|
+ u64_stats_update_begin(&tx_ring->tx_syncp);
|
|
|
tx_ring->tx_stats.bytes += total_bytes;
|
|
|
tx_ring->tx_stats.packets += total_packets;
|
|
|
+ u64_stats_update_end(&tx_ring->tx_syncp);
|
|
|
return count < tx_ring->count;
|
|
|
}
|
|
|
|
|
@@ -5480,9 +5521,11 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
|
|
|
* packets, (aka let the stack check the crc32c)
|
|
|
*/
|
|
|
if ((skb->len == 60) &&
|
|
|
- (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
|
|
|
+ (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
|
|
|
+ u64_stats_update_begin(&ring->rx_syncp);
|
|
|
ring->rx_stats.csum_err++;
|
|
|
-
|
|
|
+ u64_stats_update_end(&ring->rx_syncp);
|
|
|
+ }
|
|
|
/* let the stack verify checksum errors */
|
|
|
return;
|
|
|
}
|
|
@@ -5669,8 +5712,10 @@ next_desc:
|
|
|
|
|
|
rx_ring->total_packets += total_packets;
|
|
|
rx_ring->total_bytes += total_bytes;
|
|
|
+ u64_stats_update_begin(&rx_ring->rx_syncp);
|
|
|
rx_ring->rx_stats.packets += total_packets;
|
|
|
rx_ring->rx_stats.bytes += total_bytes;
|
|
|
+ u64_stats_update_end(&rx_ring->rx_syncp);
|
|
|
return cleaned;
|
|
|
}
|
|
|
|
|
@@ -5698,8 +5743,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
|
|
|
if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
|
|
|
if (!buffer_info->page) {
|
|
|
buffer_info->page = netdev_alloc_page(netdev);
|
|
|
- if (!buffer_info->page) {
|
|
|
+ if (unlikely(!buffer_info->page)) {
|
|
|
+ u64_stats_update_begin(&rx_ring->rx_syncp);
|
|
|
rx_ring->rx_stats.alloc_failed++;
|
|
|
+ u64_stats_update_end(&rx_ring->rx_syncp);
|
|
|
goto no_buffers;
|
|
|
}
|
|
|
buffer_info->page_offset = 0;
|
|
@@ -5714,7 +5761,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
|
|
|
if (dma_mapping_error(rx_ring->dev,
|
|
|
buffer_info->page_dma)) {
|
|
|
buffer_info->page_dma = 0;
|
|
|
+ u64_stats_update_begin(&rx_ring->rx_syncp);
|
|
|
rx_ring->rx_stats.alloc_failed++;
|
|
|
+ u64_stats_update_end(&rx_ring->rx_syncp);
|
|
|
goto no_buffers;
|
|
|
}
|
|
|
}
|
|
@@ -5722,8 +5771,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
|
|
|
skb = buffer_info->skb;
|
|
|
if (!skb) {
|
|
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
|
|
- if (!skb) {
|
|
|
+ if (unlikely(!skb)) {
|
|
|
+ u64_stats_update_begin(&rx_ring->rx_syncp);
|
|
|
rx_ring->rx_stats.alloc_failed++;
|
|
|
+ u64_stats_update_end(&rx_ring->rx_syncp);
|
|
|
goto no_buffers;
|
|
|
}
|
|
|
|
|
@@ -5737,7 +5788,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
|
|
|
if (dma_mapping_error(rx_ring->dev,
|
|
|
buffer_info->dma)) {
|
|
|
buffer_info->dma = 0;
|
|
|
+ u64_stats_update_begin(&rx_ring->rx_syncp);
|
|
|
rx_ring->rx_stats.alloc_failed++;
|
|
|
+ u64_stats_update_end(&rx_ring->rx_syncp);
|
|
|
goto no_buffers;
|
|
|
}
|
|
|
}
|