net_namespace.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. #include <linux/workqueue.h>
  2. #include <linux/rtnetlink.h>
  3. #include <linux/cache.h>
  4. #include <linux/slab.h>
  5. #include <linux/list.h>
  6. #include <linux/delay.h>
  7. #include <linux/sched.h>
  8. #include <linux/idr.h>
  9. #include <linux/rculist.h>
  10. #include <linux/nsproxy.h>
  11. #include <net/net_namespace.h>
  12. #include <net/netns/generic.h>
  13. /*
  14. * Our network namespace constructor/destructor lists
  15. */
  16. static LIST_HEAD(pernet_list);
  17. static struct list_head *first_device = &pernet_list;
  18. static DEFINE_MUTEX(net_mutex);
  19. LIST_HEAD(net_namespace_list);
  20. EXPORT_SYMBOL_GPL(net_namespace_list);
  21. struct net init_net;
  22. EXPORT_SYMBOL(init_net);
  23. #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
  24. static int ops_init(const struct pernet_operations *ops, struct net *net)
  25. {
  26. int err;
  27. if (ops->id && ops->size) {
  28. void *data = kzalloc(ops->size, GFP_KERNEL);
  29. if (!data)
  30. return -ENOMEM;
  31. err = net_assign_generic(net, *ops->id, data);
  32. if (err) {
  33. kfree(data);
  34. return err;
  35. }
  36. }
  37. if (ops->init)
  38. return ops->init(net);
  39. return 0;
  40. }
  41. static void ops_free(const struct pernet_operations *ops, struct net *net)
  42. {
  43. if (ops->id && ops->size) {
  44. int id = *ops->id;
  45. kfree(net_generic(net, id));
  46. }
  47. }
  48. static void ops_exit_list(const struct pernet_operations *ops,
  49. struct list_head *net_exit_list)
  50. {
  51. struct net *net;
  52. if (ops->exit) {
  53. list_for_each_entry(net, net_exit_list, exit_list)
  54. ops->exit(net);
  55. }
  56. if (ops->exit_batch)
  57. ops->exit_batch(net_exit_list);
  58. }
  59. static void ops_free_list(const struct pernet_operations *ops,
  60. struct list_head *net_exit_list)
  61. {
  62. struct net *net;
  63. if (ops->size && ops->id) {
  64. list_for_each_entry(net, net_exit_list, exit_list)
  65. ops_free(ops, net);
  66. }
  67. }
  68. /*
  69. * setup_net runs the initializers for the network namespace object.
  70. */
  71. static __net_init int setup_net(struct net *net)
  72. {
  73. /* Must be called with net_mutex held */
  74. const struct pernet_operations *ops, *saved_ops;
  75. int error = 0;
  76. LIST_HEAD(net_exit_list);
  77. atomic_set(&net->count, 1);
  78. #ifdef NETNS_REFCNT_DEBUG
  79. atomic_set(&net->use_count, 0);
  80. #endif
  81. list_for_each_entry(ops, &pernet_list, list) {
  82. error = ops_init(ops, net);
  83. if (error < 0)
  84. goto out_undo;
  85. }
  86. out:
  87. return error;
  88. out_undo:
  89. /* Walk through the list backwards calling the exit functions
  90. * for the pernet modules whose init functions did not fail.
  91. */
  92. list_add(&net->exit_list, &net_exit_list);
  93. saved_ops = ops;
  94. list_for_each_entry_continue_reverse(ops, &pernet_list, list)
  95. ops_exit_list(ops, &net_exit_list);
  96. ops = saved_ops;
  97. list_for_each_entry_continue_reverse(ops, &pernet_list, list)
  98. ops_free_list(ops, &net_exit_list);
  99. rcu_barrier();
  100. goto out;
  101. }
  102. static struct net_generic *net_alloc_generic(void)
  103. {
  104. struct net_generic *ng;
  105. size_t generic_size = sizeof(struct net_generic) +
  106. INITIAL_NET_GEN_PTRS * sizeof(void *);
  107. ng = kzalloc(generic_size, GFP_KERNEL);
  108. if (ng)
  109. ng->len = INITIAL_NET_GEN_PTRS;
  110. return ng;
  111. }
  112. #ifdef CONFIG_NET_NS
  113. static struct kmem_cache *net_cachep;
  114. static struct workqueue_struct *netns_wq;
  115. static struct net *net_alloc(void)
  116. {
  117. struct net *net = NULL;
  118. struct net_generic *ng;
  119. ng = net_alloc_generic();
  120. if (!ng)
  121. goto out;
  122. net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
  123. if (!net)
  124. goto out_free;
  125. rcu_assign_pointer(net->gen, ng);
  126. out:
  127. return net;
  128. out_free:
  129. kfree(ng);
  130. goto out;
  131. }
  132. static void net_free(struct net *net)
  133. {
  134. #ifdef NETNS_REFCNT_DEBUG
  135. if (unlikely(atomic_read(&net->use_count) != 0)) {
  136. printk(KERN_EMERG "network namespace not free! Usage: %d\n",
  137. atomic_read(&net->use_count));
  138. return;
  139. }
  140. #endif
  141. kfree(net->gen);
  142. kmem_cache_free(net_cachep, net);
  143. }
  144. static struct net *net_create(void)
  145. {
  146. struct net *net;
  147. int rv;
  148. net = net_alloc();
  149. if (!net)
  150. return ERR_PTR(-ENOMEM);
  151. mutex_lock(&net_mutex);
  152. rv = setup_net(net);
  153. if (rv == 0) {
  154. rtnl_lock();
  155. list_add_tail_rcu(&net->list, &net_namespace_list);
  156. rtnl_unlock();
  157. }
  158. mutex_unlock(&net_mutex);
  159. if (rv < 0) {
  160. net_free(net);
  161. return ERR_PTR(rv);
  162. }
  163. return net;
  164. }
  165. struct net *copy_net_ns(unsigned long flags, struct net *old_net)
  166. {
  167. if (!(flags & CLONE_NEWNET))
  168. return get_net(old_net);
  169. return net_create();
  170. }
  171. static DEFINE_SPINLOCK(cleanup_list_lock);
  172. static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
  173. static void cleanup_net(struct work_struct *work)
  174. {
  175. const struct pernet_operations *ops;
  176. struct net *net, *tmp;
  177. LIST_HEAD(net_kill_list);
  178. LIST_HEAD(net_exit_list);
  179. /* Atomically snapshot the list of namespaces to cleanup */
  180. spin_lock_irq(&cleanup_list_lock);
  181. list_replace_init(&cleanup_list, &net_kill_list);
  182. spin_unlock_irq(&cleanup_list_lock);
  183. mutex_lock(&net_mutex);
  184. /* Don't let anyone else find us. */
  185. rtnl_lock();
  186. list_for_each_entry(net, &net_kill_list, cleanup_list) {
  187. list_del_rcu(&net->list);
  188. list_add_tail(&net->exit_list, &net_exit_list);
  189. }
  190. rtnl_unlock();
  191. /*
  192. * Another CPU might be rcu-iterating the list, wait for it.
  193. * This needs to be before calling the exit() notifiers, so
  194. * the rcu_barrier() below isn't sufficient alone.
  195. */
  196. synchronize_rcu();
  197. /* Run all of the network namespace exit methods */
  198. list_for_each_entry_reverse(ops, &pernet_list, list)
  199. ops_exit_list(ops, &net_exit_list);
  200. /* Free the net generic variables */
  201. list_for_each_entry_reverse(ops, &pernet_list, list)
  202. ops_free_list(ops, &net_exit_list);
  203. mutex_unlock(&net_mutex);
  204. /* Ensure there are no outstanding rcu callbacks using this
  205. * network namespace.
  206. */
  207. rcu_barrier();
  208. /* Finally it is safe to free my network namespace structure */
  209. list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
  210. list_del_init(&net->exit_list);
  211. net_free(net);
  212. }
  213. }
  214. static DECLARE_WORK(net_cleanup_work, cleanup_net);
  215. void __put_net(struct net *net)
  216. {
  217. /* Cleanup the network namespace in process context */
  218. unsigned long flags;
  219. spin_lock_irqsave(&cleanup_list_lock, flags);
  220. list_add(&net->cleanup_list, &cleanup_list);
  221. spin_unlock_irqrestore(&cleanup_list_lock, flags);
  222. queue_work(netns_wq, &net_cleanup_work);
  223. }
  224. EXPORT_SYMBOL_GPL(__put_net);
  225. #else
  226. struct net *copy_net_ns(unsigned long flags, struct net *old_net)
  227. {
  228. if (flags & CLONE_NEWNET)
  229. return ERR_PTR(-EINVAL);
  230. return old_net;
  231. }
  232. #endif
  233. struct net *get_net_ns_by_pid(pid_t pid)
  234. {
  235. struct task_struct *tsk;
  236. struct net *net;
  237. /* Lookup the network namespace */
  238. net = ERR_PTR(-ESRCH);
  239. rcu_read_lock();
  240. tsk = find_task_by_vpid(pid);
  241. if (tsk) {
  242. struct nsproxy *nsproxy;
  243. nsproxy = task_nsproxy(tsk);
  244. if (nsproxy)
  245. net = get_net(nsproxy->net_ns);
  246. }
  247. rcu_read_unlock();
  248. return net;
  249. }
  250. EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
  251. static int __init net_ns_init(void)
  252. {
  253. struct net_generic *ng;
  254. #ifdef CONFIG_NET_NS
  255. net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
  256. SMP_CACHE_BYTES,
  257. SLAB_PANIC, NULL);
  258. /* Create workqueue for cleanup */
  259. netns_wq = create_singlethread_workqueue("netns");
  260. if (!netns_wq)
  261. panic("Could not create netns workq");
  262. #endif
  263. ng = net_alloc_generic();
  264. if (!ng)
  265. panic("Could not allocate generic netns");
  266. rcu_assign_pointer(init_net.gen, ng);
  267. mutex_lock(&net_mutex);
  268. if (setup_net(&init_net))
  269. panic("Could not setup the initial network namespace");
  270. rtnl_lock();
  271. list_add_tail_rcu(&init_net.list, &net_namespace_list);
  272. rtnl_unlock();
  273. mutex_unlock(&net_mutex);
  274. return 0;
  275. }
  276. pure_initcall(net_ns_init);
  277. #ifdef CONFIG_NET_NS
  278. static int __register_pernet_operations(struct list_head *list,
  279. struct pernet_operations *ops)
  280. {
  281. struct net *net;
  282. int error;
  283. LIST_HEAD(net_exit_list);
  284. list_add_tail(&ops->list, list);
  285. if (ops->init || (ops->id && ops->size)) {
  286. for_each_net(net) {
  287. error = ops_init(ops, net);
  288. if (error)
  289. goto out_undo;
  290. list_add_tail(&net->exit_list, &net_exit_list);
  291. }
  292. }
  293. return 0;
  294. out_undo:
  295. /* If I have an error cleanup all namespaces I initialized */
  296. list_del(&ops->list);
  297. ops_exit_list(ops, &net_exit_list);
  298. ops_free_list(ops, &net_exit_list);
  299. return error;
  300. }
  301. static void __unregister_pernet_operations(struct pernet_operations *ops)
  302. {
  303. struct net *net;
  304. LIST_HEAD(net_exit_list);
  305. list_del(&ops->list);
  306. for_each_net(net)
  307. list_add_tail(&net->exit_list, &net_exit_list);
  308. ops_exit_list(ops, &net_exit_list);
  309. ops_free_list(ops, &net_exit_list);
  310. }
  311. #else
  312. static int __register_pernet_operations(struct list_head *list,
  313. struct pernet_operations *ops)
  314. {
  315. int err = 0;
  316. err = ops_init(ops, &init_net);
  317. if (err)
  318. ops_free(ops, &init_net);
  319. return err;
  320. }
  321. static void __unregister_pernet_operations(struct pernet_operations *ops)
  322. {
  323. LIST_HEAD(net_exit_list);
  324. list_add(&init_net.exit_list, &net_exit_list);
  325. ops_exit_list(ops, &net_exit_list);
  326. ops_free_list(ops, &net_exit_list);
  327. }
  328. #endif /* CONFIG_NET_NS */
  329. static DEFINE_IDA(net_generic_ids);
  330. static int register_pernet_operations(struct list_head *list,
  331. struct pernet_operations *ops)
  332. {
  333. int error;
  334. if (ops->id) {
  335. again:
  336. error = ida_get_new_above(&net_generic_ids, 1, ops->id);
  337. if (error < 0) {
  338. if (error == -EAGAIN) {
  339. ida_pre_get(&net_generic_ids, GFP_KERNEL);
  340. goto again;
  341. }
  342. return error;
  343. }
  344. }
  345. error = __register_pernet_operations(list, ops);
  346. if (error) {
  347. rcu_barrier();
  348. if (ops->id)
  349. ida_remove(&net_generic_ids, *ops->id);
  350. }
  351. return error;
  352. }
  353. static void unregister_pernet_operations(struct pernet_operations *ops)
  354. {
  355. __unregister_pernet_operations(ops);
  356. rcu_barrier();
  357. if (ops->id)
  358. ida_remove(&net_generic_ids, *ops->id);
  359. }
  360. /**
  361. * register_pernet_subsys - register a network namespace subsystem
  362. * @ops: pernet operations structure for the subsystem
  363. *
  364. * Register a subsystem which has init and exit functions
  365. * that are called when network namespaces are created and
  366. * destroyed respectively.
  367. *
  368. * When registered all network namespace init functions are
  369. * called for every existing network namespace. Allowing kernel
  370. * modules to have a race free view of the set of network namespaces.
  371. *
  372. * When a new network namespace is created all of the init
  373. * methods are called in the order in which they were registered.
  374. *
  375. * When a network namespace is destroyed all of the exit methods
  376. * are called in the reverse of the order with which they were
  377. * registered.
  378. */
  379. int register_pernet_subsys(struct pernet_operations *ops)
  380. {
  381. int error;
  382. mutex_lock(&net_mutex);
  383. error = register_pernet_operations(first_device, ops);
  384. mutex_unlock(&net_mutex);
  385. return error;
  386. }
  387. EXPORT_SYMBOL_GPL(register_pernet_subsys);
  388. /**
  389. * unregister_pernet_subsys - unregister a network namespace subsystem
  390. * @ops: pernet operations structure to manipulate
  391. *
  392. * Remove the pernet operations structure from the list to be
  393. * used when network namespaces are created or destroyed. In
  394. * addition run the exit method for all existing network
  395. * namespaces.
  396. */
  397. void unregister_pernet_subsys(struct pernet_operations *module)
  398. {
  399. mutex_lock(&net_mutex);
  400. unregister_pernet_operations(module);
  401. mutex_unlock(&net_mutex);
  402. }
  403. EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
  404. /**
  405. * register_pernet_device - register a network namespace device
  406. * @ops: pernet operations structure for the subsystem
  407. *
  408. * Register a device which has init and exit functions
  409. * that are called when network namespaces are created and
  410. * destroyed respectively.
  411. *
  412. * When registered all network namespace init functions are
  413. * called for every existing network namespace. Allowing kernel
  414. * modules to have a race free view of the set of network namespaces.
  415. *
  416. * When a new network namespace is created all of the init
  417. * methods are called in the order in which they were registered.
  418. *
  419. * When a network namespace is destroyed all of the exit methods
  420. * are called in the reverse of the order with which they were
  421. * registered.
  422. */
  423. int register_pernet_device(struct pernet_operations *ops)
  424. {
  425. int error;
  426. mutex_lock(&net_mutex);
  427. error = register_pernet_operations(&pernet_list, ops);
  428. if (!error && (first_device == &pernet_list))
  429. first_device = &ops->list;
  430. mutex_unlock(&net_mutex);
  431. return error;
  432. }
  433. EXPORT_SYMBOL_GPL(register_pernet_device);
  434. /**
  435. * unregister_pernet_device - unregister a network namespace netdevice
  436. * @ops: pernet operations structure to manipulate
  437. *
  438. * Remove the pernet operations structure from the list to be
  439. * used when network namespaces are created or destroyed. In
  440. * addition run the exit method for all existing network
  441. * namespaces.
  442. */
  443. void unregister_pernet_device(struct pernet_operations *ops)
  444. {
  445. mutex_lock(&net_mutex);
  446. if (&ops->list == first_device)
  447. first_device = first_device->next;
  448. unregister_pernet_operations(ops);
  449. mutex_unlock(&net_mutex);
  450. }
  451. EXPORT_SYMBOL_GPL(unregister_pernet_device);
  452. static void net_generic_release(struct rcu_head *rcu)
  453. {
  454. struct net_generic *ng;
  455. ng = container_of(rcu, struct net_generic, rcu);
  456. kfree(ng);
  457. }
  458. int net_assign_generic(struct net *net, int id, void *data)
  459. {
  460. struct net_generic *ng, *old_ng;
  461. BUG_ON(!mutex_is_locked(&net_mutex));
  462. BUG_ON(id == 0);
  463. ng = old_ng = net->gen;
  464. if (old_ng->len >= id)
  465. goto assign;
  466. ng = kzalloc(sizeof(struct net_generic) +
  467. id * sizeof(void *), GFP_KERNEL);
  468. if (ng == NULL)
  469. return -ENOMEM;
  470. /*
  471. * Some synchronisation notes:
  472. *
  473. * The net_generic explores the net->gen array inside rcu
  474. * read section. Besides once set the net->gen->ptr[x]
  475. * pointer never changes (see rules in netns/generic.h).
  476. *
  477. * That said, we simply duplicate this array and schedule
  478. * the old copy for kfree after a grace period.
  479. */
  480. ng->len = id;
  481. memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
  482. rcu_assign_pointer(net->gen, ng);
  483. call_rcu(&old_ng->rcu, net_generic_release);
  484. assign:
  485. ng->ptr[id - 1] = data;
  486. return 0;
  487. }
  488. EXPORT_SYMBOL_GPL(net_assign_generic);