|
@@ -115,38 +115,34 @@ static void net_free(struct net *net)
|
|
|
kmem_cache_free(net_cachep, net);
|
|
|
}
|
|
|
|
|
|
-struct net *copy_net_ns(unsigned long flags, struct net *old_net)
|
|
|
+static struct net *net_create(void)
|
|
|
{
|
|
|
- struct net *new_net = NULL;
|
|
|
- int err;
|
|
|
-
|
|
|
- if (!(flags & CLONE_NEWNET))
|
|
|
- return get_net(old_net);
|
|
|
-
|
|
|
- err = -ENOMEM;
|
|
|
- new_net = net_alloc();
|
|
|
- if (!new_net)
|
|
|
- goto out_err;
|
|
|
+ struct net *net;
|
|
|
+ int rv;
|
|
|
|
|
|
+ net = net_alloc();
|
|
|
+ if (!net)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
mutex_lock(&net_mutex);
|
|
|
- err = setup_net(new_net);
|
|
|
- if (!err) {
|
|
|
+ rv = setup_net(net);
|
|
|
+ if (rv == 0) {
|
|
|
rtnl_lock();
|
|
|
- list_add_tail(&new_net->list, &net_namespace_list);
|
|
|
+ list_add_tail(&net->list, &net_namespace_list);
|
|
|
rtnl_unlock();
|
|
|
}
|
|
|
mutex_unlock(&net_mutex);
|
|
|
+ if (rv < 0) {
|
|
|
+ net_free(net);
|
|
|
+ return ERR_PTR(rv);
|
|
|
+ }
|
|
|
+ return net;
|
|
|
+}
|
|
|
|
|
|
- if (err)
|
|
|
- goto out_free;
|
|
|
-out:
|
|
|
- return new_net;
|
|
|
-
|
|
|
-out_free:
|
|
|
- net_free(new_net);
|
|
|
-out_err:
|
|
|
- new_net = ERR_PTR(err);
|
|
|
- goto out;
|
|
|
+struct net *copy_net_ns(unsigned long flags, struct net *old_net)
|
|
|
+{
|
|
|
+ if (!(flags & CLONE_NEWNET))
|
|
|
+ return get_net(old_net);
|
|
|
+ return net_create();
|
|
|
}
|
|
|
|
|
|
static void cleanup_net(struct work_struct *work)
|