|
@@ -245,12 +245,6 @@ static int ether1394_stop(struct net_device *dev)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-/* Return statistics to the caller */
|
|
|
|
-static struct net_device_stats *ether1394_stats(struct net_device *dev)
|
|
|
|
-{
|
|
|
|
- return &(((struct eth1394_priv *)netdev_priv(dev))->stats);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* FIXME: What to do if we timeout? I think a host reset is probably in order,
|
|
/* FIXME: What to do if we timeout? I think a host reset is probably in order,
|
|
* so that's what we do. Should we increment the stat counters too? */
|
|
* so that's what we do. Should we increment the stat counters too? */
|
|
static void ether1394_tx_timeout(struct net_device *dev)
|
|
static void ether1394_tx_timeout(struct net_device *dev)
|
|
@@ -520,7 +514,6 @@ static const struct net_device_ops ether1394_netdev_ops = {
|
|
.ndo_open = ether1394_open,
|
|
.ndo_open = ether1394_open,
|
|
.ndo_stop = ether1394_stop,
|
|
.ndo_stop = ether1394_stop,
|
|
.ndo_start_xmit = ether1394_tx,
|
|
.ndo_start_xmit = ether1394_tx,
|
|
- .ndo_get_stats = ether1394_stats,
|
|
|
|
.ndo_tx_timeout = ether1394_tx_timeout,
|
|
.ndo_tx_timeout = ether1394_tx_timeout,
|
|
.ndo_change_mtu = ether1394_change_mtu,
|
|
.ndo_change_mtu = ether1394_change_mtu,
|
|
};
|
|
};
|
|
@@ -1079,7 +1072,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
|
|
HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
|
|
HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
|
|
"lookup failure: " NODE_BUS_FMT,
|
|
"lookup failure: " NODE_BUS_FMT,
|
|
NODE_BUS_ARGS(priv->host, srcid));
|
|
NODE_BUS_ARGS(priv->host, srcid));
|
|
- priv->stats.rx_dropped++;
|
|
|
|
|
|
+ dev->stats.rx_dropped++;
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
ud = node->ud;
|
|
ud = node->ud;
|
|
@@ -1102,7 +1095,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
|
|
skb = dev_alloc_skb(len + dev->hard_header_len + 15);
|
|
skb = dev_alloc_skb(len + dev->hard_header_len + 15);
|
|
if (unlikely(!skb)) {
|
|
if (unlikely(!skb)) {
|
|
ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
|
|
ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
|
|
- priv->stats.rx_dropped++;
|
|
|
|
|
|
+ dev->stats.rx_dropped++;
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
|
|
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
|
|
@@ -1221,15 +1214,15 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
|
|
|
if (!skb->protocol) {
|
|
if (!skb->protocol) {
|
|
- priv->stats.rx_errors++;
|
|
|
|
- priv->stats.rx_dropped++;
|
|
|
|
|
|
+ dev->stats.rx_errors++;
|
|
|
|
+ dev->stats.rx_dropped++;
|
|
dev_kfree_skb_any(skb);
|
|
dev_kfree_skb_any(skb);
|
|
} else if (netif_rx(skb) == NET_RX_DROP) {
|
|
} else if (netif_rx(skb) == NET_RX_DROP) {
|
|
- priv->stats.rx_errors++;
|
|
|
|
- priv->stats.rx_dropped++;
|
|
|
|
|
|
+ dev->stats.rx_errors++;
|
|
|
|
+ dev->stats.rx_dropped++;
|
|
} else {
|
|
} else {
|
|
- priv->stats.rx_packets++;
|
|
|
|
- priv->stats.rx_bytes += skb->len;
|
|
|
|
|
|
+ dev->stats.rx_packets++;
|
|
|
|
+ dev->stats.rx_bytes += skb->len;
|
|
}
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
@@ -1511,17 +1504,18 @@ static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
|
|
static void ether1394_dg_complete(struct packet_task *ptask, int fail)
|
|
static void ether1394_dg_complete(struct packet_task *ptask, int fail)
|
|
{
|
|
{
|
|
struct sk_buff *skb = ptask->skb;
|
|
struct sk_buff *skb = ptask->skb;
|
|
- struct eth1394_priv *priv = netdev_priv(skb->dev);
|
|
|
|
|
|
+ struct net_device *dev = skb->dev;
|
|
|
|
+ struct eth1394_priv *priv = netdev_priv(dev);
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
/* Statistics */
|
|
/* Statistics */
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
if (fail) {
|
|
if (fail) {
|
|
- priv->stats.tx_dropped++;
|
|
|
|
- priv->stats.tx_errors++;
|
|
|
|
|
|
+ dev->stats.tx_dropped++;
|
|
|
|
+ dev->stats.tx_errors++;
|
|
} else {
|
|
} else {
|
|
- priv->stats.tx_bytes += skb->len;
|
|
|
|
- priv->stats.tx_packets++;
|
|
|
|
|
|
+ dev->stats.tx_bytes += skb->len;
|
|
|
|
+ dev->stats.tx_packets++;
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
|
|
@@ -1698,8 +1692,8 @@ fail:
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
- priv->stats.tx_dropped++;
|
|
|
|
- priv->stats.tx_errors++;
|
|
|
|
|
|
+ dev->stats.tx_dropped++;
|
|
|
|
+ dev->stats.tx_errors++;
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
|
|
/*
|
|
/*
|