|
@@ -475,7 +475,10 @@
|
|
* Solution is to move call to dev_remove_pack outside of the
|
|
* Solution is to move call to dev_remove_pack outside of the
|
|
* spinlock.
|
|
* spinlock.
|
|
* Set version to 2.6.1.
|
|
* Set version to 2.6.1.
|
|
- *
|
|
|
|
|
|
+ * 2005/06/05 - Jay Vosburgh <fubar@us.ibm.com>
|
|
|
|
+ * - Support for generating gratuitous ARPs in active-backup mode.
|
|
|
|
+ * Includes support for VLAN tagging all bonding-generated ARPs
|
|
|
|
+ * as needed. Set version to 2.6.2.
|
|
*/
|
|
*/
|
|
|
|
|
|
//#define BONDING_DEBUG 1
|
|
//#define BONDING_DEBUG 1
|
|
@@ -519,6 +522,7 @@
|
|
#include <linux/ethtool.h>
|
|
#include <linux/ethtool.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/if_bonding.h>
|
|
#include <linux/if_bonding.h>
|
|
|
|
+#include <net/route.h>
|
|
#include "bonding.h"
|
|
#include "bonding.h"
|
|
#include "bond_3ad.h"
|
|
#include "bond_3ad.h"
|
|
#include "bond_alb.h"
|
|
#include "bond_alb.h"
|
|
@@ -574,7 +578,6 @@ static struct proc_dir_entry *bond_proc_dir = NULL;
|
|
|
|
|
|
static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ;
|
|
static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ;
|
|
static int arp_ip_count = 0;
|
|
static int arp_ip_count = 0;
|
|
-static u32 my_ip = 0;
|
|
|
|
static int bond_mode = BOND_MODE_ROUNDROBIN;
|
|
static int bond_mode = BOND_MODE_ROUNDROBIN;
|
|
static int lacp_fast = 0;
|
|
static int lacp_fast = 0;
|
|
static int app_abi_ver = 0;
|
|
static int app_abi_ver = 0;
|
|
@@ -611,6 +614,7 @@ static struct bond_parm_tbl bond_mode_tbl[] = {
|
|
/*-------------------------- Forward declarations ---------------------------*/
|
|
/*-------------------------- Forward declarations ---------------------------*/
|
|
|
|
|
|
static inline void bond_set_mode_ops(struct net_device *bond_dev, int mode);
|
|
static inline void bond_set_mode_ops(struct net_device *bond_dev, int mode);
|
|
|
|
+static void bond_send_gratuitous_arp(struct bonding *bond);
|
|
|
|
|
|
/*---------------------------- General routines -----------------------------*/
|
|
/*---------------------------- General routines -----------------------------*/
|
|
|
|
|
|
@@ -659,6 +663,7 @@ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
|
|
|
|
|
|
INIT_LIST_HEAD(&vlan->vlan_list);
|
|
INIT_LIST_HEAD(&vlan->vlan_list);
|
|
vlan->vlan_id = vlan_id;
|
|
vlan->vlan_id = vlan_id;
|
|
|
|
+ vlan->vlan_ip = 0;
|
|
|
|
|
|
write_lock_bh(&bond->lock);
|
|
write_lock_bh(&bond->lock);
|
|
|
|
|
|
@@ -1468,16 +1473,6 @@ static void bond_change_active_slave(struct bonding *bond, struct slave *new_act
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
|
|
|
|
- if (old_active) {
|
|
|
|
- bond_set_slave_inactive_flags(old_active);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (new_active) {
|
|
|
|
- bond_set_slave_active_flags(new_active);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
if (USES_PRIMARY(bond->params.mode)) {
|
|
if (USES_PRIMARY(bond->params.mode)) {
|
|
bond_mc_swap(bond, new_active, old_active);
|
|
bond_mc_swap(bond, new_active, old_active);
|
|
}
|
|
}
|
|
@@ -1488,6 +1483,17 @@ static void bond_change_active_slave(struct bonding *bond, struct slave *new_act
|
|
} else {
|
|
} else {
|
|
bond->curr_active_slave = new_active;
|
|
bond->curr_active_slave = new_active;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
|
|
|
|
+ if (old_active) {
|
|
|
|
+ bond_set_slave_inactive_flags(old_active);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (new_active) {
|
|
|
|
+ bond_set_slave_active_flags(new_active);
|
|
|
|
+ }
|
|
|
|
+ bond_send_gratuitous_arp(bond);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -2694,15 +2700,180 @@ out:
|
|
read_unlock(&bond->lock);
|
|
read_unlock(&bond->lock);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+
|
|
|
|
+static u32 bond_glean_dev_ip(struct net_device *dev)
|
|
|
|
+{
|
|
|
|
+ struct in_device *idev;
|
|
|
|
+ struct in_ifaddr *ifa;
|
|
|
|
+ u32 addr = 0;
|
|
|
|
+
|
|
|
|
+ if (!dev)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ idev = __in_dev_get(dev);
|
|
|
|
+ if (!idev)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ ifa = idev->ifa_list;
|
|
|
|
+ if (!ifa)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ addr = ifa->ifa_local;
|
|
|
|
+out:
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+ return addr;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int bond_has_ip(struct bonding *bond)
|
|
|
|
+{
|
|
|
|
+ struct vlan_entry *vlan, *vlan_next;
|
|
|
|
+
|
|
|
|
+ if (bond->master_ip)
|
|
|
|
+ return 1;
|
|
|
|
+
|
|
|
|
+ if (list_empty(&bond->vlan_list))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
|
|
|
|
+ vlan_list) {
|
|
|
|
+ if (vlan->vlan_ip)
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * We go to the (large) trouble of VLAN tagging ARP frames because
|
|
|
|
+ * switches in VLAN mode (especially if ports are configured as
|
|
|
|
+ * "native" to a VLAN) might not pass non-tagged frames.
|
|
|
|
+ */
|
|
|
|
+static void bond_arp_send(struct net_device *slave_dev, int arp_op, u32 dest_ip, u32 src_ip, unsigned short vlan_id)
|
|
|
|
+{
|
|
|
|
+ struct sk_buff *skb;
|
|
|
|
+
|
|
|
|
+ dprintk("arp %d on slave %s: dst %x src %x vid %d\n", arp_op,
|
|
|
|
+ slave_dev->name, dest_ip, src_ip, vlan_id);
|
|
|
|
+
|
|
|
|
+ skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
|
|
|
|
+ NULL, slave_dev->dev_addr, NULL);
|
|
|
|
+
|
|
|
|
+ if (!skb) {
|
|
|
|
+ printk(KERN_ERR DRV_NAME ": ARP packet allocation failed\n");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ if (vlan_id) {
|
|
|
|
+ skb = vlan_put_tag(skb, vlan_id);
|
|
|
|
+ if (!skb) {
|
|
|
|
+ printk(KERN_ERR DRV_NAME ": failed to insert VLAN tag\n");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ arp_xmit(skb);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
|
|
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
|
|
{
|
|
{
|
|
- int i;
|
|
|
|
|
|
+ int i, vlan_id, rv;
|
|
u32 *targets = bond->params.arp_targets;
|
|
u32 *targets = bond->params.arp_targets;
|
|
|
|
+ struct vlan_entry *vlan, *vlan_next;
|
|
|
|
+ struct net_device *vlan_dev;
|
|
|
|
+ struct flowi fl;
|
|
|
|
+ struct rtable *rt;
|
|
|
|
|
|
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
|
|
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
|
|
- arp_send(ARPOP_REQUEST, ETH_P_ARP, targets[i], slave->dev,
|
|
|
|
- my_ip, NULL, slave->dev->dev_addr,
|
|
|
|
- NULL);
|
|
|
|
|
|
+ dprintk("basa: target %x\n", targets[i]);
|
|
|
|
+ if (list_empty(&bond->vlan_list)) {
|
|
|
|
+ dprintk("basa: empty vlan: arp_send\n");
|
|
|
|
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
|
|
|
|
+ bond->master_ip, 0);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If VLANs are configured, we do a route lookup to
|
|
|
|
+ * determine which VLAN interface would be used, so we
|
|
|
|
+ * can tag the ARP with the proper VLAN tag.
|
|
|
|
+ */
|
|
|
|
+ memset(&fl, 0, sizeof(fl));
|
|
|
|
+ fl.fl4_dst = targets[i];
|
|
|
|
+ fl.fl4_tos = RTO_ONLINK;
|
|
|
|
+
|
|
|
|
+ rv = ip_route_output_key(&rt, &fl);
|
|
|
|
+ if (rv) {
|
|
|
|
+ if (net_ratelimit()) {
|
|
|
|
+ printk(KERN_WARNING DRV_NAME
|
|
|
|
+ ": %s: no route to arp_ip_target %u.%u.%u.%u\n",
|
|
|
|
+ bond->dev->name, NIPQUAD(fl.fl4_dst));
|
|
|
|
+ }
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This target is not on a VLAN
|
|
|
|
+ */
|
|
|
|
+ if (rt->u.dst.dev == bond->dev) {
|
|
|
|
+ dprintk("basa: rtdev == bond->dev: arp_send\n");
|
|
|
|
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
|
|
|
|
+ bond->master_ip, 0);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ vlan_id = 0;
|
|
|
|
+ list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
|
|
|
|
+ vlan_list) {
|
|
|
|
+ vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
|
|
|
|
+ if (vlan_dev == rt->u.dst.dev) {
|
|
|
|
+ vlan_id = vlan->vlan_id;
|
|
|
|
+ dprintk("basa: vlan match on %s %d\n",
|
|
|
|
+ vlan_dev->name, vlan_id);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (vlan_id) {
|
|
|
|
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
|
|
|
|
+ vlan->vlan_ip, vlan_id);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (net_ratelimit()) {
|
|
|
|
+ printk(KERN_WARNING DRV_NAME
|
|
|
|
+ ": %s: no path to arp_ip_target %u.%u.%u.%u via rt.dev %s\n",
|
|
|
|
+ bond->dev->name, NIPQUAD(fl.fl4_dst),
|
|
|
|
+ rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Kick out a gratuitous ARP for an IP on the bonding master plus one
|
|
|
|
+ * for each VLAN above us.
|
|
|
|
+ */
|
|
|
|
+static void bond_send_gratuitous_arp(struct bonding *bond)
|
|
|
|
+{
|
|
|
|
+ struct slave *slave = bond->curr_active_slave;
|
|
|
|
+ struct vlan_entry *vlan;
|
|
|
|
+ struct net_device *vlan_dev;
|
|
|
|
+
|
|
|
|
+ dprintk("bond_send_grat_arp: bond %s slave %s\n", bond->dev->name,
|
|
|
|
+ slave ? slave->dev->name : "NULL");
|
|
|
|
+ if (!slave)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (bond->master_ip) {
|
|
|
|
+ bond_arp_send(slave->dev, ARPOP_REPLY, bond->master_ip,
|
|
|
|
+ bond->master_ip, 0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
|
|
|
|
+ vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
|
|
|
|
+ if (vlan->vlan_ip) {
|
|
|
|
+ bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip,
|
|
|
|
+ vlan->vlan_ip, vlan->vlan_id);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2781,7 +2952,7 @@ static void bond_loadbalance_arp_mon(struct net_device *bond_dev)
|
|
*/
|
|
*/
|
|
if (((jiffies - slave->dev->trans_start) >= (2*delta_in_ticks)) ||
|
|
if (((jiffies - slave->dev->trans_start) >= (2*delta_in_ticks)) ||
|
|
(((jiffies - slave->dev->last_rx) >= (2*delta_in_ticks)) &&
|
|
(((jiffies - slave->dev->last_rx) >= (2*delta_in_ticks)) &&
|
|
- my_ip)) {
|
|
|
|
|
|
+ bond_has_ip(bond))) {
|
|
|
|
|
|
slave->link = BOND_LINK_DOWN;
|
|
slave->link = BOND_LINK_DOWN;
|
|
slave->state = BOND_STATE_BACKUP;
|
|
slave->state = BOND_STATE_BACKUP;
|
|
@@ -2920,7 +3091,7 @@ static void bond_activebackup_arp_mon(struct net_device *bond_dev)
|
|
if ((slave != bond->curr_active_slave) &&
|
|
if ((slave != bond->curr_active_slave) &&
|
|
(!bond->current_arp_slave) &&
|
|
(!bond->current_arp_slave) &&
|
|
(((jiffies - slave->dev->last_rx) >= 3*delta_in_ticks) &&
|
|
(((jiffies - slave->dev->last_rx) >= 3*delta_in_ticks) &&
|
|
- my_ip)) {
|
|
|
|
|
|
+ bond_has_ip(bond))) {
|
|
/* a backup slave has gone down; three times
|
|
/* a backup slave has gone down; three times
|
|
* the delta allows the current slave to be
|
|
* the delta allows the current slave to be
|
|
* taken out before the backup slave.
|
|
* taken out before the backup slave.
|
|
@@ -2966,8 +3137,8 @@ static void bond_activebackup_arp_mon(struct net_device *bond_dev)
|
|
* if it is up and needs to take over as the curr_active_slave
|
|
* if it is up and needs to take over as the curr_active_slave
|
|
*/
|
|
*/
|
|
if ((((jiffies - slave->dev->trans_start) >= (2*delta_in_ticks)) ||
|
|
if ((((jiffies - slave->dev->trans_start) >= (2*delta_in_ticks)) ||
|
|
- (((jiffies - slave->dev->last_rx) >= (2*delta_in_ticks)) &&
|
|
|
|
- my_ip)) &&
|
|
|
|
|
|
+ (((jiffies - slave->dev->last_rx) >= (2*delta_in_ticks)) &&
|
|
|
|
+ bond_has_ip(bond))) &&
|
|
((jiffies - slave->jiffies) >= 2*delta_in_ticks)) {
|
|
((jiffies - slave->jiffies) >= 2*delta_in_ticks)) {
|
|
|
|
|
|
slave->link = BOND_LINK_DOWN;
|
|
slave->link = BOND_LINK_DOWN;
|
|
@@ -3019,7 +3190,7 @@ static void bond_activebackup_arp_mon(struct net_device *bond_dev)
|
|
/* the current slave must tx an arp to ensure backup slaves
|
|
/* the current slave must tx an arp to ensure backup slaves
|
|
* rx traffic
|
|
* rx traffic
|
|
*/
|
|
*/
|
|
- if (slave && my_ip) {
|
|
|
|
|
|
+ if (slave && bond_has_ip(bond)) {
|
|
bond_arp_send_all(bond, slave);
|
|
bond_arp_send_all(bond, slave);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -3471,10 +3642,67 @@ static int bond_netdev_event(struct notifier_block *this, unsigned long event, v
|
|
return NOTIFY_DONE;
|
|
return NOTIFY_DONE;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * bond_inetaddr_event: handle inetaddr notifier chain events.
|
|
|
|
+ *
|
|
|
|
+ * We keep track of device IPs primarily to use as source addresses in
|
|
|
|
+ * ARP monitor probes (rather than spewing out broadcasts all the time).
|
|
|
|
+ *
|
|
|
|
+ * We track one IP for the main device (if it has one), plus one per VLAN.
|
|
|
|
+ */
|
|
|
|
+static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
|
|
|
|
+{
|
|
|
|
+ struct in_ifaddr *ifa = ptr;
|
|
|
|
+ struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev;
|
|
|
|
+ struct bonding *bond, *bond_next;
|
|
|
|
+ struct vlan_entry *vlan, *vlan_next;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(bond, bond_next, &bond_dev_list, bond_list) {
|
|
|
|
+ if (bond->dev == event_dev) {
|
|
|
|
+ switch (event) {
|
|
|
|
+ case NETDEV_UP:
|
|
|
|
+ bond->master_ip = ifa->ifa_local;
|
|
|
|
+ return NOTIFY_OK;
|
|
|
|
+ case NETDEV_DOWN:
|
|
|
|
+ bond->master_ip = bond_glean_dev_ip(bond->dev);
|
|
|
|
+ return NOTIFY_OK;
|
|
|
|
+ default:
|
|
|
|
+ return NOTIFY_DONE;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (list_empty(&bond->vlan_list))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
|
|
|
|
+ vlan_list) {
|
|
|
|
+ vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
|
|
|
|
+ if (vlan_dev == event_dev) {
|
|
|
|
+ switch (event) {
|
|
|
|
+ case NETDEV_UP:
|
|
|
|
+ vlan->vlan_ip = ifa->ifa_local;
|
|
|
|
+ return NOTIFY_OK;
|
|
|
|
+ case NETDEV_DOWN:
|
|
|
|
+ vlan->vlan_ip =
|
|
|
|
+ bond_glean_dev_ip(vlan_dev);
|
|
|
|
+ return NOTIFY_OK;
|
|
|
|
+ default:
|
|
|
|
+ return NOTIFY_DONE;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return NOTIFY_DONE;
|
|
|
|
+}
|
|
|
|
+
|
|
static struct notifier_block bond_netdev_notifier = {
|
|
static struct notifier_block bond_netdev_notifier = {
|
|
.notifier_call = bond_netdev_event,
|
|
.notifier_call = bond_netdev_event,
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+static struct notifier_block bond_inetaddr_notifier = {
|
|
|
|
+ .notifier_call = bond_inetaddr_event,
|
|
|
|
+};
|
|
|
|
+
|
|
/*-------------------------- Packet type handling ---------------------------*/
|
|
/*-------------------------- Packet type handling ---------------------------*/
|
|
|
|
|
|
/* register to receive lacpdus on a bond */
|
|
/* register to receive lacpdus on a bond */
|
|
@@ -4060,17 +4288,6 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
|
|
struct bonding *bond = bond_dev->priv;
|
|
struct bonding *bond = bond_dev->priv;
|
|
int res = 1;
|
|
int res = 1;
|
|
|
|
|
|
- /* if we are sending arp packets, try to at least
|
|
|
|
- identify our own ip address */
|
|
|
|
- if (bond->params.arp_interval && !my_ip &&
|
|
|
|
- (skb->protocol == __constant_htons(ETH_P_ARP))) {
|
|
|
|
- char *the_ip = (char *)skb->data +
|
|
|
|
- sizeof(struct ethhdr) +
|
|
|
|
- sizeof(struct arphdr) +
|
|
|
|
- ETH_ALEN;
|
|
|
|
- memcpy(&my_ip, the_ip, 4);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
read_lock(&bond->lock);
|
|
read_lock(&bond->lock);
|
|
read_lock(&bond->curr_slave_lock);
|
|
read_lock(&bond->curr_slave_lock);
|
|
|
|
|
|
@@ -4669,6 +4886,7 @@ static int __init bonding_init(void)
|
|
|
|
|
|
rtnl_unlock();
|
|
rtnl_unlock();
|
|
register_netdevice_notifier(&bond_netdev_notifier);
|
|
register_netdevice_notifier(&bond_netdev_notifier);
|
|
|
|
+ register_inetaddr_notifier(&bond_inetaddr_notifier);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
@@ -4684,6 +4902,7 @@ out_err:
|
|
static void __exit bonding_exit(void)
|
|
static void __exit bonding_exit(void)
|
|
{
|
|
{
|
|
unregister_netdevice_notifier(&bond_netdev_notifier);
|
|
unregister_netdevice_notifier(&bond_netdev_notifier);
|
|
|
|
+ unregister_inetaddr_notifier(&bond_inetaddr_notifier);
|
|
|
|
|
|
rtnl_lock();
|
|
rtnl_lock();
|
|
bond_free_all();
|
|
bond_free_all();
|