|
@@ -1931,7 +1931,7 @@ static int pktgen_device_event(struct notifier_block *unused,
|
|
{
|
|
{
|
|
struct net_device *dev = ptr;
|
|
struct net_device *dev = ptr;
|
|
|
|
|
|
- if (!net_eq(dev_net(dev), &init_net))
|
|
|
|
|
|
+ if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
|
|
return NOTIFY_DONE;
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
/* It is OK that we do not hold the group lock right now,
|
|
/* It is OK that we do not hold the group lock right now,
|
|
@@ -3755,12 +3755,18 @@ static void __exit pg_cleanup(void)
|
|
{
|
|
{
|
|
struct pktgen_thread *t;
|
|
struct pktgen_thread *t;
|
|
struct list_head *q, *n;
|
|
struct list_head *q, *n;
|
|
|
|
+ struct list_head list;
|
|
|
|
|
|
/* Stop all interfaces & threads */
|
|
/* Stop all interfaces & threads */
|
|
pktgen_exiting = true;
|
|
pktgen_exiting = true;
|
|
|
|
|
|
- list_for_each_safe(q, n, &pktgen_threads) {
|
|
|
|
|
|
+ mutex_lock(&pktgen_thread_lock);
|
|
|
|
+ list_splice(&list, &pktgen_threads);
|
|
|
|
+ mutex_unlock(&pktgen_thread_lock);
|
|
|
|
+
|
|
|
|
+ list_for_each_safe(q, n, &list) {
|
|
t = list_entry(q, struct pktgen_thread, th_list);
|
|
t = list_entry(q, struct pktgen_thread, th_list);
|
|
|
|
+ list_del(&t->th_list);
|
|
kthread_stop(t->tsk);
|
|
kthread_stop(t->tsk);
|
|
kfree(t);
|
|
kfree(t);
|
|
}
|
|
}
|