|
@@ -70,6 +70,14 @@ struct netfront_cb {
|
|
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
|
|
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
|
|
#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
|
|
#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
|
|
|
|
|
|
|
|
+struct netfront_stats {
|
|
|
|
+ u64 rx_packets;
|
|
|
|
+ u64 tx_packets;
|
|
|
|
+ u64 rx_bytes;
|
|
|
|
+ u64 tx_bytes;
|
|
|
|
+ struct u64_stats_sync syncp;
|
|
|
|
+};
|
|
|
|
+
|
|
struct netfront_info {
|
|
struct netfront_info {
|
|
struct list_head list;
|
|
struct list_head list;
|
|
struct net_device *netdev;
|
|
struct net_device *netdev;
|
|
@@ -122,6 +130,8 @@ struct netfront_info {
|
|
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
|
|
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
|
|
|
|
|
|
/* Statistics */
|
|
/* Statistics */
|
|
|
|
+ struct netfront_stats __percpu *stats;
|
|
|
|
+
|
|
unsigned long rx_gso_checksum_fixup;
|
|
unsigned long rx_gso_checksum_fixup;
|
|
};
|
|
};
|
|
|
|
|
|
@@ -468,6 +478,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
{
|
|
{
|
|
unsigned short id;
|
|
unsigned short id;
|
|
struct netfront_info *np = netdev_priv(dev);
|
|
struct netfront_info *np = netdev_priv(dev);
|
|
|
|
+ struct netfront_stats *stats = this_cpu_ptr(np->stats);
|
|
struct xen_netif_tx_request *tx;
|
|
struct xen_netif_tx_request *tx;
|
|
struct xen_netif_extra_info *extra;
|
|
struct xen_netif_extra_info *extra;
|
|
char *data = skb->data;
|
|
char *data = skb->data;
|
|
@@ -552,8 +563,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
if (notify)
|
|
if (notify)
|
|
notify_remote_via_irq(np->netdev->irq);
|
|
notify_remote_via_irq(np->netdev->irq);
|
|
|
|
|
|
- dev->stats.tx_bytes += skb->len;
|
|
|
|
- dev->stats.tx_packets++;
|
|
|
|
|
|
+ u64_stats_update_begin(&stats->syncp);
|
|
|
|
+ stats->tx_bytes += skb->len;
|
|
|
|
+ stats->tx_packets++;
|
|
|
|
+ u64_stats_update_end(&stats->syncp);
|
|
|
|
|
|
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
|
|
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
|
|
xennet_tx_buf_gc(dev);
|
|
xennet_tx_buf_gc(dev);
|
|
@@ -847,6 +860,8 @@ out:
|
|
static int handle_incoming_queue(struct net_device *dev,
|
|
static int handle_incoming_queue(struct net_device *dev,
|
|
struct sk_buff_head *rxq)
|
|
struct sk_buff_head *rxq)
|
|
{
|
|
{
|
|
|
|
+ struct netfront_info *np = netdev_priv(dev);
|
|
|
|
+ struct netfront_stats *stats = this_cpu_ptr(np->stats);
|
|
int packets_dropped = 0;
|
|
int packets_dropped = 0;
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
|
|
@@ -871,8 +886,10 @@ static int handle_incoming_queue(struct net_device *dev,
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
- dev->stats.rx_packets++;
|
|
|
|
- dev->stats.rx_bytes += skb->len;
|
|
|
|
|
|
+ u64_stats_update_begin(&stats->syncp);
|
|
|
|
+ stats->rx_packets++;
|
|
|
|
+ stats->rx_bytes += skb->len;
|
|
|
|
+ u64_stats_update_end(&stats->syncp);
|
|
|
|
|
|
/* Pass it up. */
|
|
/* Pass it up. */
|
|
netif_receive_skb(skb);
|
|
netif_receive_skb(skb);
|
|
@@ -1034,6 +1051,38 @@ static int xennet_change_mtu(struct net_device *dev, int mtu)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
|
|
|
|
+ struct rtnl_link_stats64 *tot)
|
|
|
|
+{
|
|
|
|
+ struct netfront_info *np = netdev_priv(dev);
|
|
|
|
+ int cpu;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
|
+ struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
|
|
|
|
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
|
|
|
|
+ unsigned int start;
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ start = u64_stats_fetch_begin_bh(&stats->syncp);
|
|
|
|
+
|
|
|
|
+ rx_packets = stats->rx_packets;
|
|
|
|
+ tx_packets = stats->tx_packets;
|
|
|
|
+ rx_bytes = stats->rx_bytes;
|
|
|
|
+ tx_bytes = stats->tx_bytes;
|
|
|
|
+ } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
|
|
|
|
+
|
|
|
|
+ tot->rx_packets += rx_packets;
|
|
|
|
+ tot->tx_packets += tx_packets;
|
|
|
|
+ tot->rx_bytes += rx_bytes;
|
|
|
|
+ tot->tx_bytes += tx_bytes;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ tot->rx_errors = dev->stats.rx_errors;
|
|
|
|
+ tot->tx_dropped = dev->stats.tx_dropped;
|
|
|
|
+
|
|
|
|
+ return tot;
|
|
|
|
+}
|
|
|
|
+
|
|
static void xennet_release_tx_bufs(struct netfront_info *np)
|
|
static void xennet_release_tx_bufs(struct netfront_info *np)
|
|
{
|
|
{
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
@@ -1182,6 +1231,7 @@ static const struct net_device_ops xennet_netdev_ops = {
|
|
.ndo_stop = xennet_close,
|
|
.ndo_stop = xennet_close,
|
|
.ndo_start_xmit = xennet_start_xmit,
|
|
.ndo_start_xmit = xennet_start_xmit,
|
|
.ndo_change_mtu = xennet_change_mtu,
|
|
.ndo_change_mtu = xennet_change_mtu,
|
|
|
|
+ .ndo_get_stats64 = xennet_get_stats64,
|
|
.ndo_set_mac_address = eth_mac_addr,
|
|
.ndo_set_mac_address = eth_mac_addr,
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
.ndo_fix_features = xennet_fix_features,
|
|
.ndo_fix_features = xennet_fix_features,
|
|
@@ -1216,6 +1266,11 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
|
|
np->rx_refill_timer.data = (unsigned long)netdev;
|
|
np->rx_refill_timer.data = (unsigned long)netdev;
|
|
np->rx_refill_timer.function = rx_refill_timeout;
|
|
np->rx_refill_timer.function = rx_refill_timeout;
|
|
|
|
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ np->stats = alloc_percpu(struct netfront_stats);
|
|
|
|
+ if (np->stats == NULL)
|
|
|
|
+ goto exit;
|
|
|
|
+
|
|
/* Initialise tx_skbs as a free chain containing every entry. */
|
|
/* Initialise tx_skbs as a free chain containing every entry. */
|
|
np->tx_skb_freelist = 0;
|
|
np->tx_skb_freelist = 0;
|
|
for (i = 0; i < NET_TX_RING_SIZE; i++) {
|
|
for (i = 0; i < NET_TX_RING_SIZE; i++) {
|
|
@@ -1234,7 +1289,7 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
|
|
&np->gref_tx_head) < 0) {
|
|
&np->gref_tx_head) < 0) {
|
|
printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
|
|
printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
|
|
err = -ENOMEM;
|
|
err = -ENOMEM;
|
|
- goto exit;
|
|
|
|
|
|
+ goto exit_free_stats;
|
|
}
|
|
}
|
|
/* A grant for every rx ring slot */
|
|
/* A grant for every rx ring slot */
|
|
if (gnttab_alloc_grant_references(RX_MAX_TARGET,
|
|
if (gnttab_alloc_grant_references(RX_MAX_TARGET,
|
|
@@ -1270,6 +1325,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
|
|
|
|
|
|
exit_free_tx:
|
|
exit_free_tx:
|
|
gnttab_free_grant_references(np->gref_tx_head);
|
|
gnttab_free_grant_references(np->gref_tx_head);
|
|
|
|
+ exit_free_stats:
|
|
|
|
+ free_percpu(np->stats);
|
|
exit:
|
|
exit:
|
|
free_netdev(netdev);
|
|
free_netdev(netdev);
|
|
return ERR_PTR(err);
|
|
return ERR_PTR(err);
|
|
@@ -1869,6 +1926,8 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
|
|
|
|
|
|
xennet_sysfs_delif(info->netdev);
|
|
xennet_sysfs_delif(info->netdev);
|
|
|
|
|
|
|
|
+ free_percpu(info->stats);
|
|
|
|
+
|
|
free_netdev(info->netdev);
|
|
free_netdev(info->netdev);
|
|
|
|
|
|
return 0;
|
|
return 0;
|