net_namespace.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. #include <linux/workqueue.h>
  2. #include <linux/rtnetlink.h>
  3. #include <linux/cache.h>
  4. #include <linux/slab.h>
  5. #include <linux/list.h>
  6. #include <linux/delay.h>
  7. #include <linux/sched.h>
  8. #include <linux/idr.h>
  9. #include <linux/rculist.h>
  10. #include <linux/nsproxy.h>
  11. #include <net/net_namespace.h>
  12. #include <net/netns/generic.h>
  13. /*
  14. * Our network namespace constructor/destructor lists
  15. */
  16. static LIST_HEAD(pernet_list);
  17. static struct list_head *first_device = &pernet_list;
  18. static DEFINE_MUTEX(net_mutex);
  19. LIST_HEAD(net_namespace_list);
  20. EXPORT_SYMBOL_GPL(net_namespace_list);
  21. struct net init_net;
  22. EXPORT_SYMBOL(init_net);
  23. #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
  24. static int ops_init(const struct pernet_operations *ops, struct net *net)
  25. {
  26. int err;
  27. if (ops->id && ops->size) {
  28. void *data = kzalloc(ops->size, GFP_KERNEL);
  29. if (!data)
  30. return -ENOMEM;
  31. err = net_assign_generic(net, *ops->id, data);
  32. if (err) {
  33. kfree(data);
  34. return err;
  35. }
  36. }
  37. if (ops->init)
  38. return ops->init(net);
  39. return 0;
  40. }
  41. static void ops_free(const struct pernet_operations *ops, struct net *net)
  42. {
  43. if (ops->id && ops->size) {
  44. int id = *ops->id;
  45. kfree(net_generic(net, id));
  46. }
  47. }
  48. static void ops_exit_list(const struct pernet_operations *ops,
  49. struct list_head *net_exit_list)
  50. {
  51. struct net *net;
  52. if (ops->exit) {
  53. list_for_each_entry(net, net_exit_list, exit_list)
  54. ops->exit(net);
  55. }
  56. if (ops->exit_batch)
  57. ops->exit_batch(net_exit_list);
  58. }
  59. static void ops_free_list(const struct pernet_operations *ops,
  60. struct list_head *net_exit_list)
  61. {
  62. struct net *net;
  63. if (ops->size && ops->id) {
  64. list_for_each_entry(net, net_exit_list, exit_list)
  65. ops_free(ops, net);
  66. }
  67. }
  68. /*
  69. * setup_net runs the initializers for the network namespace object.
  70. */
  71. static __net_init int setup_net(struct net *net)
  72. {
  73. /* Must be called with net_mutex held */
  74. const struct pernet_operations *ops, *saved_ops;
  75. int error = 0;
  76. LIST_HEAD(net_exit_list);
  77. atomic_set(&net->count, 1);
  78. #ifdef NETNS_REFCNT_DEBUG
  79. atomic_set(&net->use_count, 0);
  80. #endif
  81. list_for_each_entry(ops, &pernet_list, list) {
  82. error = ops_init(ops, net);
  83. if (error < 0)
  84. goto out_undo;
  85. }
  86. out:
  87. return error;
  88. out_undo:
  89. /* Walk through the list backwards calling the exit functions
  90. * for the pernet modules whose init functions did not fail.
  91. */
  92. list_add(&net->exit_list, &net_exit_list);
  93. saved_ops = ops;
  94. list_for_each_entry_continue_reverse(ops, &pernet_list, list)
  95. ops_exit_list(ops, &net_exit_list);
  96. ops = saved_ops;
  97. list_for_each_entry_continue_reverse(ops, &pernet_list, list)
  98. ops_free_list(ops, &net_exit_list);
  99. rcu_barrier();
  100. goto out;
  101. }
  102. static struct net_generic *net_alloc_generic(void)
  103. {
  104. struct net_generic *ng;
  105. size_t generic_size = sizeof(struct net_generic) +
  106. INITIAL_NET_GEN_PTRS * sizeof(void *);
  107. ng = kzalloc(generic_size, GFP_KERNEL);
  108. if (ng)
  109. ng->len = INITIAL_NET_GEN_PTRS;
  110. return ng;
  111. }
  112. #ifdef CONFIG_NET_NS
  113. static struct kmem_cache *net_cachep;
  114. static struct workqueue_struct *netns_wq;
  115. static struct net *net_alloc(void)
  116. {
  117. struct net *net = NULL;
  118. struct net_generic *ng;
  119. ng = net_alloc_generic();
  120. if (!ng)
  121. goto out;
  122. net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
  123. if (!net)
  124. goto out_free;
  125. rcu_assign_pointer(net->gen, ng);
  126. out:
  127. return net;
  128. out_free:
  129. kfree(ng);
  130. goto out;
  131. }
  132. static void net_free(struct net *net)
  133. {
  134. #ifdef NETNS_REFCNT_DEBUG
  135. if (unlikely(atomic_read(&net->use_count) != 0)) {
  136. printk(KERN_EMERG "network namespace not free! Usage: %d\n",
  137. atomic_read(&net->use_count));
  138. return;
  139. }
  140. #endif
  141. kfree(net->gen);
  142. kmem_cache_free(net_cachep, net);
  143. }
  144. static struct net *net_create(void)
  145. {
  146. struct net *net;
  147. int rv;
  148. net = net_alloc();
  149. if (!net)
  150. return ERR_PTR(-ENOMEM);
  151. mutex_lock(&net_mutex);
  152. rv = setup_net(net);
  153. if (rv == 0) {
  154. rtnl_lock();
  155. list_add_tail_rcu(&net->list, &net_namespace_list);
  156. rtnl_unlock();
  157. }
  158. mutex_unlock(&net_mutex);
  159. if (rv < 0) {
  160. net_free(net);
  161. return ERR_PTR(rv);
  162. }
  163. return net;
  164. }
  165. struct net *copy_net_ns(unsigned long flags, struct net *old_net)
  166. {
  167. if (!(flags & CLONE_NEWNET))
  168. return get_net(old_net);
  169. return net_create();
  170. }
  171. static DEFINE_SPINLOCK(cleanup_list_lock);
  172. static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
  173. static void cleanup_net(struct work_struct *work)
  174. {
  175. const struct pernet_operations *ops;
  176. struct net *net, *tmp;
  177. LIST_HEAD(net_kill_list);
  178. LIST_HEAD(net_exit_list);
  179. /* Atomically snapshot the list of namespaces to cleanup */
  180. spin_lock_irq(&cleanup_list_lock);
  181. list_replace_init(&cleanup_list, &net_kill_list);
  182. spin_unlock_irq(&cleanup_list_lock);
  183. mutex_lock(&net_mutex);
  184. /* Don't let anyone else find us. */
  185. rtnl_lock();
  186. list_for_each_entry(net, &net_kill_list, cleanup_list) {
  187. list_del_rcu(&net->list);
  188. list_add_tail(&net->exit_list, &net_exit_list);
  189. }
  190. rtnl_unlock();
  191. /*
  192. * Another CPU might be rcu-iterating the list, wait for it.
  193. * This needs to be before calling the exit() notifiers, so
  194. * the rcu_barrier() below isn't sufficient alone.
  195. */
  196. synchronize_rcu();
  197. /* Run all of the network namespace exit methods */
  198. list_for_each_entry_reverse(ops, &pernet_list, list)
  199. ops_exit_list(ops, &net_exit_list);
  200. /* Free the net generic variables */
  201. list_for_each_entry_reverse(ops, &pernet_list, list)
  202. ops_free_list(ops, &net_exit_list);
  203. mutex_unlock(&net_mutex);
  204. /* Ensure there are no outstanding rcu callbacks using this
  205. * network namespace.
  206. */
  207. rcu_barrier();
  208. /* Finally it is safe to free my network namespace structure */
  209. list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
  210. list_del_init(&net->exit_list);
  211. net_free(net);
  212. }
  213. }
  214. static DECLARE_WORK(net_cleanup_work, cleanup_net);
  215. void __put_net(struct net *net)
  216. {
  217. /* Cleanup the network namespace in process context */
  218. unsigned long flags;
  219. spin_lock_irqsave(&cleanup_list_lock, flags);
  220. list_add(&net->cleanup_list, &cleanup_list);
  221. spin_unlock_irqrestore(&cleanup_list_lock, flags);
  222. queue_work(netns_wq, &net_cleanup_work);
  223. }
  224. EXPORT_SYMBOL_GPL(__put_net);
  225. #else
  226. struct net *copy_net_ns(unsigned long flags, struct net *old_net)
  227. {
  228. if (flags & CLONE_NEWNET)
  229. return ERR_PTR(-EINVAL);
  230. return old_net;
  231. }
  232. #endif
  233. struct net *get_net_ns_by_pid(pid_t pid)
  234. {
  235. struct task_struct *tsk;
  236. struct net *net;
  237. /* Lookup the network namespace */
  238. net = ERR_PTR(-ESRCH);
  239. rcu_read_lock();
  240. tsk = find_task_by_vpid(pid);
  241. if (tsk) {
  242. struct nsproxy *nsproxy;
  243. nsproxy = task_nsproxy(tsk);
  244. if (nsproxy)
  245. net = get_net(nsproxy->net_ns);
  246. }
  247. rcu_read_unlock();
  248. return net;
  249. }
  250. EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
  251. static int __init net_ns_init(void)
  252. {
  253. struct net_generic *ng;
  254. #ifdef CONFIG_NET_NS
  255. net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
  256. SMP_CACHE_BYTES,
  257. SLAB_PANIC, NULL);
  258. /* Create workqueue for cleanup */
  259. netns_wq = create_singlethread_workqueue("netns");
  260. if (!netns_wq)
  261. panic("Could not create netns workq");
  262. #endif
  263. ng = net_alloc_generic();
  264. if (!ng)
  265. panic("Could not allocate generic netns");
  266. rcu_assign_pointer(init_net.gen, ng);
  267. mutex_lock(&net_mutex);
  268. if (setup_net(&init_net))
  269. panic("Could not setup the initial network namespace");
  270. rtnl_lock();
  271. list_add_tail_rcu(&init_net.list, &net_namespace_list);
  272. rtnl_unlock();
  273. mutex_unlock(&net_mutex);
  274. return 0;
  275. }
  276. pure_initcall(net_ns_init);
  277. #ifdef CONFIG_NET_NS
  278. static int __register_pernet_operations(struct list_head *list,
  279. struct pernet_operations *ops)
  280. {
  281. struct net *net;
  282. int error;
  283. LIST_HEAD(net_exit_list);
  284. list_add_tail(&ops->list, list);
  285. if (ops->init || (ops->id && ops->size)) {
  286. for_each_net(net) {
  287. error = ops_init(ops, net);
  288. if (error)
  289. goto out_undo;
  290. list_add_tail(&net->exit_list, &net_exit_list);
  291. }
  292. }
  293. return 0;
  294. out_undo:
  295. /* If I have an error cleanup all namespaces I initialized */
  296. list_del(&ops->list);
  297. ops_exit_list(ops, &net_exit_list);
  298. ops_free_list(ops, &net_exit_list);
  299. return error;
  300. }
  301. static void __unregister_pernet_operations(struct pernet_operations *ops)
  302. {
  303. struct net *net;
  304. LIST_HEAD(net_exit_list);
  305. list_del(&ops->list);
  306. for_each_net(net)
  307. list_add_tail(&net->exit_list, &net_exit_list);
  308. ops_exit_list(ops, &net_exit_list);
  309. ops_free_list(ops, &net_exit_list);
  310. }
  311. #else
  312. static int __register_pernet_operations(struct list_head *list,
  313. struct pernet_operations *ops)
  314. {
  315. int err = 0;
  316. err = ops_init(ops, &init_net);
  317. if (err)
  318. ops_free(ops, &init_net);
  319. return err;
  320. }
  321. static void __unregister_pernet_operations(struct pernet_operations *ops)
  322. {
  323. LIST_HEAD(net_exit_list);
  324. list_add(&init_net.exit_list, &net_exit_list);
  325. ops_exit_list(ops, &net_exit_list);
  326. ops_free_list(ops, &net_exit_list);
  327. }
  328. #endif /* CONFIG_NET_NS */
  329. static DEFINE_IDA(net_generic_ids);
  330. static int register_pernet_operations(struct list_head *list,
  331. struct pernet_operations *ops)
  332. {
  333. int error;
  334. if (ops->id) {
  335. again:
  336. error = ida_get_new_above(&net_generic_ids, 1, ops->id);
  337. if (error < 0) {
  338. if (error == -EAGAIN) {
  339. ida_pre_get(&net_generic_ids, GFP_KERNEL);
  340. goto again;
  341. }
  342. return error;
  343. }
  344. }
  345. error = __register_pernet_operations(list, ops);
  346. if (error && ops->id)
  347. ida_remove(&net_generic_ids, *ops->id);
  348. return error;
  349. }
  350. static void unregister_pernet_operations(struct pernet_operations *ops)
  351. {
  352. __unregister_pernet_operations(ops);
  353. if (ops->id)
  354. ida_remove(&net_generic_ids, *ops->id);
  355. }
  356. /**
  357. * register_pernet_subsys - register a network namespace subsystem
  358. * @ops: pernet operations structure for the subsystem
  359. *
  360. * Register a subsystem which has init and exit functions
  361. * that are called when network namespaces are created and
  362. * destroyed respectively.
  363. *
  364. * When registered all network namespace init functions are
  365. * called for every existing network namespace. Allowing kernel
  366. * modules to have a race free view of the set of network namespaces.
  367. *
  368. * When a new network namespace is created all of the init
  369. * methods are called in the order in which they were registered.
  370. *
  371. * When a network namespace is destroyed all of the exit methods
  372. * are called in the reverse of the order with which they were
  373. * registered.
  374. */
  375. int register_pernet_subsys(struct pernet_operations *ops)
  376. {
  377. int error;
  378. mutex_lock(&net_mutex);
  379. error = register_pernet_operations(first_device, ops);
  380. mutex_unlock(&net_mutex);
  381. return error;
  382. }
  383. EXPORT_SYMBOL_GPL(register_pernet_subsys);
  384. /**
  385. * unregister_pernet_subsys - unregister a network namespace subsystem
  386. * @ops: pernet operations structure to manipulate
  387. *
  388. * Remove the pernet operations structure from the list to be
  389. * used when network namespaces are created or destroyed. In
  390. * addition run the exit method for all existing network
  391. * namespaces.
  392. */
  393. void unregister_pernet_subsys(struct pernet_operations *module)
  394. {
  395. mutex_lock(&net_mutex);
  396. unregister_pernet_operations(module);
  397. mutex_unlock(&net_mutex);
  398. }
  399. EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
  400. /**
  401. * register_pernet_device - register a network namespace device
  402. * @ops: pernet operations structure for the subsystem
  403. *
  404. * Register a device which has init and exit functions
  405. * that are called when network namespaces are created and
  406. * destroyed respectively.
  407. *
  408. * When registered all network namespace init functions are
  409. * called for every existing network namespace. Allowing kernel
  410. * modules to have a race free view of the set of network namespaces.
  411. *
  412. * When a new network namespace is created all of the init
  413. * methods are called in the order in which they were registered.
  414. *
  415. * When a network namespace is destroyed all of the exit methods
  416. * are called in the reverse of the order with which they were
  417. * registered.
  418. */
  419. int register_pernet_device(struct pernet_operations *ops)
  420. {
  421. int error;
  422. mutex_lock(&net_mutex);
  423. error = register_pernet_operations(&pernet_list, ops);
  424. if (!error && (first_device == &pernet_list))
  425. first_device = &ops->list;
  426. mutex_unlock(&net_mutex);
  427. return error;
  428. }
  429. EXPORT_SYMBOL_GPL(register_pernet_device);
  430. /**
  431. * unregister_pernet_device - unregister a network namespace netdevice
  432. * @ops: pernet operations structure to manipulate
  433. *
  434. * Remove the pernet operations structure from the list to be
  435. * used when network namespaces are created or destroyed. In
  436. * addition run the exit method for all existing network
  437. * namespaces.
  438. */
  439. void unregister_pernet_device(struct pernet_operations *ops)
  440. {
  441. mutex_lock(&net_mutex);
  442. if (&ops->list == first_device)
  443. first_device = first_device->next;
  444. unregister_pernet_operations(ops);
  445. mutex_unlock(&net_mutex);
  446. }
  447. EXPORT_SYMBOL_GPL(unregister_pernet_device);
  448. static void net_generic_release(struct rcu_head *rcu)
  449. {
  450. struct net_generic *ng;
  451. ng = container_of(rcu, struct net_generic, rcu);
  452. kfree(ng);
  453. }
  454. int net_assign_generic(struct net *net, int id, void *data)
  455. {
  456. struct net_generic *ng, *old_ng;
  457. BUG_ON(!mutex_is_locked(&net_mutex));
  458. BUG_ON(id == 0);
  459. ng = old_ng = net->gen;
  460. if (old_ng->len >= id)
  461. goto assign;
  462. ng = kzalloc(sizeof(struct net_generic) +
  463. id * sizeof(void *), GFP_KERNEL);
  464. if (ng == NULL)
  465. return -ENOMEM;
  466. /*
  467. * Some synchronisation notes:
  468. *
  469. * The net_generic explores the net->gen array inside rcu
  470. * read section. Besides once set the net->gen->ptr[x]
  471. * pointer never changes (see rules in netns/generic.h).
  472. *
  473. * That said, we simply duplicate this array and schedule
  474. * the old copy for kfree after a grace period.
  475. */
  476. ng->len = id;
  477. memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
  478. rcu_assign_pointer(net->gen, ng);
  479. call_rcu(&old_ng->rcu, net_generic_release);
  480. assign:
  481. ng->ptr[id - 1] = data;
  482. return 0;
  483. }
  484. EXPORT_SYMBOL_GPL(net_assign_generic);