net_namespace.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. #include <linux/workqueue.h>
  2. #include <linux/rtnetlink.h>
  3. #include <linux/cache.h>
  4. #include <linux/slab.h>
  5. #include <linux/list.h>
  6. #include <linux/delay.h>
  7. #include <linux/sched.h>
  8. #include <linux/idr.h>
  9. #include <linux/rculist.h>
  10. #include <linux/nsproxy.h>
  11. #include <linux/proc_fs.h>
  12. #include <linux/file.h>
  13. #include <net/net_namespace.h>
  14. #include <net/netns/generic.h>
  15. /*
  16. * Our network namespace constructor/destructor lists
  17. */
  18. static LIST_HEAD(pernet_list);
  19. static struct list_head *first_device = &pernet_list;
  20. static DEFINE_MUTEX(net_mutex);
  21. LIST_HEAD(net_namespace_list);
  22. EXPORT_SYMBOL_GPL(net_namespace_list);
  23. struct net init_net;
  24. EXPORT_SYMBOL(init_net);
  25. #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
  26. static int net_assign_generic(struct net *net, int id, void *data)
  27. {
  28. struct net_generic *ng, *old_ng;
  29. BUG_ON(!mutex_is_locked(&net_mutex));
  30. BUG_ON(id == 0);
  31. old_ng = rcu_dereference_protected(net->gen,
  32. lockdep_is_held(&net_mutex));
  33. ng = old_ng;
  34. if (old_ng->len >= id)
  35. goto assign;
  36. ng = kzalloc(sizeof(struct net_generic) +
  37. id * sizeof(void *), GFP_KERNEL);
  38. if (ng == NULL)
  39. return -ENOMEM;
  40. /*
  41. * Some synchronisation notes:
  42. *
  43. * The net_generic explores the net->gen array inside rcu
  44. * read section. Besides once set the net->gen->ptr[x]
  45. * pointer never changes (see rules in netns/generic.h).
  46. *
  47. * That said, we simply duplicate this array and schedule
  48. * the old copy for kfree after a grace period.
  49. */
  50. ng->len = id;
  51. memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
  52. rcu_assign_pointer(net->gen, ng);
  53. kfree_rcu(old_ng, rcu);
  54. assign:
  55. ng->ptr[id - 1] = data;
  56. return 0;
  57. }
  58. static int ops_init(const struct pernet_operations *ops, struct net *net)
  59. {
  60. int err;
  61. if (ops->id && ops->size) {
  62. void *data = kzalloc(ops->size, GFP_KERNEL);
  63. if (!data)
  64. return -ENOMEM;
  65. err = net_assign_generic(net, *ops->id, data);
  66. if (err) {
  67. kfree(data);
  68. return err;
  69. }
  70. }
  71. if (ops->init)
  72. return ops->init(net);
  73. return 0;
  74. }
  75. static void ops_free(const struct pernet_operations *ops, struct net *net)
  76. {
  77. if (ops->id && ops->size) {
  78. int id = *ops->id;
  79. kfree(net_generic(net, id));
  80. }
  81. }
  82. static void ops_exit_list(const struct pernet_operations *ops,
  83. struct list_head *net_exit_list)
  84. {
  85. struct net *net;
  86. if (ops->exit) {
  87. list_for_each_entry(net, net_exit_list, exit_list)
  88. ops->exit(net);
  89. }
  90. if (ops->exit_batch)
  91. ops->exit_batch(net_exit_list);
  92. }
  93. static void ops_free_list(const struct pernet_operations *ops,
  94. struct list_head *net_exit_list)
  95. {
  96. struct net *net;
  97. if (ops->size && ops->id) {
  98. list_for_each_entry(net, net_exit_list, exit_list)
  99. ops_free(ops, net);
  100. }
  101. }
  102. /*
  103. * setup_net runs the initializers for the network namespace object.
  104. */
  105. static __net_init int setup_net(struct net *net)
  106. {
  107. /* Must be called with net_mutex held */
  108. const struct pernet_operations *ops, *saved_ops;
  109. int error = 0;
  110. LIST_HEAD(net_exit_list);
  111. atomic_set(&net->count, 1);
  112. atomic_set(&net->passive, 1);
  113. net->dev_base_seq = 1;
  114. #ifdef NETNS_REFCNT_DEBUG
  115. atomic_set(&net->use_count, 0);
  116. #endif
  117. list_for_each_entry(ops, &pernet_list, list) {
  118. error = ops_init(ops, net);
  119. if (error < 0)
  120. goto out_undo;
  121. }
  122. out:
  123. return error;
  124. out_undo:
  125. /* Walk through the list backwards calling the exit functions
  126. * for the pernet modules whose init functions did not fail.
  127. */
  128. list_add(&net->exit_list, &net_exit_list);
  129. saved_ops = ops;
  130. list_for_each_entry_continue_reverse(ops, &pernet_list, list)
  131. ops_exit_list(ops, &net_exit_list);
  132. ops = saved_ops;
  133. list_for_each_entry_continue_reverse(ops, &pernet_list, list)
  134. ops_free_list(ops, &net_exit_list);
  135. rcu_barrier();
  136. goto out;
  137. }
  138. static struct net_generic *net_alloc_generic(void)
  139. {
  140. struct net_generic *ng;
  141. size_t generic_size = sizeof(struct net_generic) +
  142. INITIAL_NET_GEN_PTRS * sizeof(void *);
  143. ng = kzalloc(generic_size, GFP_KERNEL);
  144. if (ng)
  145. ng->len = INITIAL_NET_GEN_PTRS;
  146. return ng;
  147. }
  148. #ifdef CONFIG_NET_NS
  149. static struct kmem_cache *net_cachep;
  150. static struct workqueue_struct *netns_wq;
  151. static struct net *net_alloc(void)
  152. {
  153. struct net *net = NULL;
  154. struct net_generic *ng;
  155. ng = net_alloc_generic();
  156. if (!ng)
  157. goto out;
  158. net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
  159. if (!net)
  160. goto out_free;
  161. rcu_assign_pointer(net->gen, ng);
  162. out:
  163. return net;
  164. out_free:
  165. kfree(ng);
  166. goto out;
  167. }
  168. static void net_free(struct net *net)
  169. {
  170. #ifdef NETNS_REFCNT_DEBUG
  171. if (unlikely(atomic_read(&net->use_count) != 0)) {
  172. printk(KERN_EMERG "network namespace not free! Usage: %d\n",
  173. atomic_read(&net->use_count));
  174. return;
  175. }
  176. #endif
  177. kfree(net->gen);
  178. kmem_cache_free(net_cachep, net);
  179. }
  180. void net_drop_ns(void *p)
  181. {
  182. struct net *ns = p;
  183. if (ns && atomic_dec_and_test(&ns->passive))
  184. net_free(ns);
  185. }
  186. struct net *copy_net_ns(unsigned long flags, struct net *old_net)
  187. {
  188. struct net *net;
  189. int rv;
  190. if (!(flags & CLONE_NEWNET))
  191. return get_net(old_net);
  192. net = net_alloc();
  193. if (!net)
  194. return ERR_PTR(-ENOMEM);
  195. mutex_lock(&net_mutex);
  196. rv = setup_net(net);
  197. if (rv == 0) {
  198. rtnl_lock();
  199. list_add_tail_rcu(&net->list, &net_namespace_list);
  200. rtnl_unlock();
  201. }
  202. mutex_unlock(&net_mutex);
  203. if (rv < 0) {
  204. net_drop_ns(net);
  205. return ERR_PTR(rv);
  206. }
  207. return net;
  208. }
  209. static DEFINE_SPINLOCK(cleanup_list_lock);
  210. static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
  211. static void cleanup_net(struct work_struct *work)
  212. {
  213. const struct pernet_operations *ops;
  214. struct net *net, *tmp;
  215. LIST_HEAD(net_kill_list);
  216. LIST_HEAD(net_exit_list);
  217. /* Atomically snapshot the list of namespaces to cleanup */
  218. spin_lock_irq(&cleanup_list_lock);
  219. list_replace_init(&cleanup_list, &net_kill_list);
  220. spin_unlock_irq(&cleanup_list_lock);
  221. mutex_lock(&net_mutex);
  222. /* Don't let anyone else find us. */
  223. rtnl_lock();
  224. list_for_each_entry(net, &net_kill_list, cleanup_list) {
  225. list_del_rcu(&net->list);
  226. list_add_tail(&net->exit_list, &net_exit_list);
  227. }
  228. rtnl_unlock();
  229. /*
  230. * Another CPU might be rcu-iterating the list, wait for it.
  231. * This needs to be before calling the exit() notifiers, so
  232. * the rcu_barrier() below isn't sufficient alone.
  233. */
  234. synchronize_rcu();
  235. /* Run all of the network namespace exit methods */
  236. list_for_each_entry_reverse(ops, &pernet_list, list)
  237. ops_exit_list(ops, &net_exit_list);
  238. /* Free the net generic variables */
  239. list_for_each_entry_reverse(ops, &pernet_list, list)
  240. ops_free_list(ops, &net_exit_list);
  241. mutex_unlock(&net_mutex);
  242. /* Ensure there are no outstanding rcu callbacks using this
  243. * network namespace.
  244. */
  245. rcu_barrier();
  246. /* Finally it is safe to free my network namespace structure */
  247. list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
  248. list_del_init(&net->exit_list);
  249. net_drop_ns(net);
  250. }
  251. }
  252. static DECLARE_WORK(net_cleanup_work, cleanup_net);
  253. void __put_net(struct net *net)
  254. {
  255. /* Cleanup the network namespace in process context */
  256. unsigned long flags;
  257. spin_lock_irqsave(&cleanup_list_lock, flags);
  258. list_add(&net->cleanup_list, &cleanup_list);
  259. spin_unlock_irqrestore(&cleanup_list_lock, flags);
  260. queue_work(netns_wq, &net_cleanup_work);
  261. }
  262. EXPORT_SYMBOL_GPL(__put_net);
  263. struct net *get_net_ns_by_fd(int fd)
  264. {
  265. struct proc_inode *ei;
  266. struct file *file;
  267. struct net *net;
  268. file = proc_ns_fget(fd);
  269. if (IS_ERR(file))
  270. return ERR_CAST(file);
  271. ei = PROC_I(file->f_dentry->d_inode);
  272. if (ei->ns_ops == &netns_operations)
  273. net = get_net(ei->ns);
  274. else
  275. net = ERR_PTR(-EINVAL);
  276. fput(file);
  277. return net;
  278. }
  279. #else
  280. struct net *copy_net_ns(unsigned long flags, struct net *old_net)
  281. {
  282. if (flags & CLONE_NEWNET)
  283. return ERR_PTR(-EINVAL);
  284. return old_net;
  285. }
  286. struct net *get_net_ns_by_fd(int fd)
  287. {
  288. return ERR_PTR(-EINVAL);
  289. }
  290. #endif
  291. struct net *get_net_ns_by_pid(pid_t pid)
  292. {
  293. struct task_struct *tsk;
  294. struct net *net;
  295. /* Lookup the network namespace */
  296. net = ERR_PTR(-ESRCH);
  297. rcu_read_lock();
  298. tsk = find_task_by_vpid(pid);
  299. if (tsk) {
  300. struct nsproxy *nsproxy;
  301. nsproxy = task_nsproxy(tsk);
  302. if (nsproxy)
  303. net = get_net(nsproxy->net_ns);
  304. }
  305. rcu_read_unlock();
  306. return net;
  307. }
  308. EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
  309. static int __init net_ns_init(void)
  310. {
  311. struct net_generic *ng;
  312. #ifdef CONFIG_NET_NS
  313. net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
  314. SMP_CACHE_BYTES,
  315. SLAB_PANIC, NULL);
  316. /* Create workqueue for cleanup */
  317. netns_wq = create_singlethread_workqueue("netns");
  318. if (!netns_wq)
  319. panic("Could not create netns workq");
  320. #endif
  321. ng = net_alloc_generic();
  322. if (!ng)
  323. panic("Could not allocate generic netns");
  324. rcu_assign_pointer(init_net.gen, ng);
  325. mutex_lock(&net_mutex);
  326. if (setup_net(&init_net))
  327. panic("Could not setup the initial network namespace");
  328. rtnl_lock();
  329. list_add_tail_rcu(&init_net.list, &net_namespace_list);
  330. rtnl_unlock();
  331. mutex_unlock(&net_mutex);
  332. return 0;
  333. }
  334. pure_initcall(net_ns_init);
  335. #ifdef CONFIG_NET_NS
  336. static int __register_pernet_operations(struct list_head *list,
  337. struct pernet_operations *ops)
  338. {
  339. struct net *net;
  340. int error;
  341. LIST_HEAD(net_exit_list);
  342. list_add_tail(&ops->list, list);
  343. if (ops->init || (ops->id && ops->size)) {
  344. for_each_net(net) {
  345. error = ops_init(ops, net);
  346. if (error)
  347. goto out_undo;
  348. list_add_tail(&net->exit_list, &net_exit_list);
  349. }
  350. }
  351. return 0;
  352. out_undo:
  353. /* If I have an error cleanup all namespaces I initialized */
  354. list_del(&ops->list);
  355. ops_exit_list(ops, &net_exit_list);
  356. ops_free_list(ops, &net_exit_list);
  357. return error;
  358. }
  359. static void __unregister_pernet_operations(struct pernet_operations *ops)
  360. {
  361. struct net *net;
  362. LIST_HEAD(net_exit_list);
  363. list_del(&ops->list);
  364. for_each_net(net)
  365. list_add_tail(&net->exit_list, &net_exit_list);
  366. ops_exit_list(ops, &net_exit_list);
  367. ops_free_list(ops, &net_exit_list);
  368. }
  369. #else
  370. static int __register_pernet_operations(struct list_head *list,
  371. struct pernet_operations *ops)
  372. {
  373. int err = 0;
  374. err = ops_init(ops, &init_net);
  375. if (err)
  376. ops_free(ops, &init_net);
  377. return err;
  378. }
  379. static void __unregister_pernet_operations(struct pernet_operations *ops)
  380. {
  381. LIST_HEAD(net_exit_list);
  382. list_add(&init_net.exit_list, &net_exit_list);
  383. ops_exit_list(ops, &net_exit_list);
  384. ops_free_list(ops, &net_exit_list);
  385. }
  386. #endif /* CONFIG_NET_NS */
  387. static DEFINE_IDA(net_generic_ids);
  388. static int register_pernet_operations(struct list_head *list,
  389. struct pernet_operations *ops)
  390. {
  391. int error;
  392. if (ops->id) {
  393. again:
  394. error = ida_get_new_above(&net_generic_ids, 1, ops->id);
  395. if (error < 0) {
  396. if (error == -EAGAIN) {
  397. ida_pre_get(&net_generic_ids, GFP_KERNEL);
  398. goto again;
  399. }
  400. return error;
  401. }
  402. }
  403. error = __register_pernet_operations(list, ops);
  404. if (error) {
  405. rcu_barrier();
  406. if (ops->id)
  407. ida_remove(&net_generic_ids, *ops->id);
  408. }
  409. return error;
  410. }
  411. static void unregister_pernet_operations(struct pernet_operations *ops)
  412. {
  413. __unregister_pernet_operations(ops);
  414. rcu_barrier();
  415. if (ops->id)
  416. ida_remove(&net_generic_ids, *ops->id);
  417. }
  418. /**
  419. * register_pernet_subsys - register a network namespace subsystem
  420. * @ops: pernet operations structure for the subsystem
  421. *
  422. * Register a subsystem which has init and exit functions
  423. * that are called when network namespaces are created and
  424. * destroyed respectively.
  425. *
  426. * When registered all network namespace init functions are
  427. * called for every existing network namespace. Allowing kernel
  428. * modules to have a race free view of the set of network namespaces.
  429. *
  430. * When a new network namespace is created all of the init
  431. * methods are called in the order in which they were registered.
  432. *
  433. * When a network namespace is destroyed all of the exit methods
  434. * are called in the reverse of the order with which they were
  435. * registered.
  436. */
  437. int register_pernet_subsys(struct pernet_operations *ops)
  438. {
  439. int error;
  440. mutex_lock(&net_mutex);
  441. error = register_pernet_operations(first_device, ops);
  442. mutex_unlock(&net_mutex);
  443. return error;
  444. }
  445. EXPORT_SYMBOL_GPL(register_pernet_subsys);
  446. /**
  447. * unregister_pernet_subsys - unregister a network namespace subsystem
  448. * @ops: pernet operations structure to manipulate
  449. *
  450. * Remove the pernet operations structure from the list to be
  451. * used when network namespaces are created or destroyed. In
  452. * addition run the exit method for all existing network
  453. * namespaces.
  454. */
  455. void unregister_pernet_subsys(struct pernet_operations *ops)
  456. {
  457. mutex_lock(&net_mutex);
  458. unregister_pernet_operations(ops);
  459. mutex_unlock(&net_mutex);
  460. }
  461. EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
  462. /**
  463. * register_pernet_device - register a network namespace device
  464. * @ops: pernet operations structure for the subsystem
  465. *
  466. * Register a device which has init and exit functions
  467. * that are called when network namespaces are created and
  468. * destroyed respectively.
  469. *
  470. * When registered all network namespace init functions are
  471. * called for every existing network namespace. Allowing kernel
  472. * modules to have a race free view of the set of network namespaces.
  473. *
  474. * When a new network namespace is created all of the init
  475. * methods are called in the order in which they were registered.
  476. *
  477. * When a network namespace is destroyed all of the exit methods
  478. * are called in the reverse of the order with which they were
  479. * registered.
  480. */
  481. int register_pernet_device(struct pernet_operations *ops)
  482. {
  483. int error;
  484. mutex_lock(&net_mutex);
  485. error = register_pernet_operations(&pernet_list, ops);
  486. if (!error && (first_device == &pernet_list))
  487. first_device = &ops->list;
  488. mutex_unlock(&net_mutex);
  489. return error;
  490. }
  491. EXPORT_SYMBOL_GPL(register_pernet_device);
  492. /**
  493. * unregister_pernet_device - unregister a network namespace netdevice
  494. * @ops: pernet operations structure to manipulate
  495. *
  496. * Remove the pernet operations structure from the list to be
  497. * used when network namespaces are created or destroyed. In
  498. * addition run the exit method for all existing network
  499. * namespaces.
  500. */
  501. void unregister_pernet_device(struct pernet_operations *ops)
  502. {
  503. mutex_lock(&net_mutex);
  504. if (&ops->list == first_device)
  505. first_device = first_device->next;
  506. unregister_pernet_operations(ops);
  507. mutex_unlock(&net_mutex);
  508. }
  509. EXPORT_SYMBOL_GPL(unregister_pernet_device);
  510. #ifdef CONFIG_NET_NS
  511. static void *netns_get(struct task_struct *task)
  512. {
  513. struct net *net = NULL;
  514. struct nsproxy *nsproxy;
  515. rcu_read_lock();
  516. nsproxy = task_nsproxy(task);
  517. if (nsproxy)
  518. net = get_net(nsproxy->net_ns);
  519. rcu_read_unlock();
  520. return net;
  521. }
  522. static void netns_put(void *ns)
  523. {
  524. put_net(ns);
  525. }
  526. static int netns_install(struct nsproxy *nsproxy, void *ns)
  527. {
  528. put_net(nsproxy->net_ns);
  529. nsproxy->net_ns = get_net(ns);
  530. return 0;
  531. }
  532. const struct proc_ns_operations netns_operations = {
  533. .name = "net",
  534. .type = CLONE_NEWNET,
  535. .get = netns_get,
  536. .put = netns_put,
  537. .install = netns_install,
  538. };
  539. #endif