net_namespace.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. #include <linux/workqueue.h>
  2. #include <linux/rtnetlink.h>
  3. #include <linux/cache.h>
  4. #include <linux/slab.h>
  5. #include <linux/list.h>
  6. #include <linux/delay.h>
  7. #include <linux/sched.h>
  8. #include <linux/idr.h>
  9. #include <linux/rculist.h>
  10. #include <linux/nsproxy.h>
  11. #include <linux/netdevice.h>
  12. #include <net/net_namespace.h>
  13. #include <net/netns/generic.h>
  14. #include <net/rtnetlink.h>
  15. /*
  16. * Our network namespace constructor/destructor lists
  17. */
  18. static LIST_HEAD(pernet_list);
  19. static struct list_head *first_device = &pernet_list;
  20. static DEFINE_MUTEX(net_mutex);
  21. LIST_HEAD(net_namespace_list);
  22. EXPORT_SYMBOL_GPL(net_namespace_list);
  23. struct net init_net;
  24. EXPORT_SYMBOL(init_net);
  25. #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
  26. static void unregister_netdevices(struct net *net, struct list_head *list)
  27. {
  28. struct net_device *dev;
  29. /* At exit all network devices most be removed from a network
  30. * namespace. Do this in the reverse order of registeration.
  31. */
  32. for_each_netdev_reverse(net, dev) {
  33. if (dev->rtnl_link_ops)
  34. dev->rtnl_link_ops->dellink(dev, list);
  35. else
  36. unregister_netdevice_queue(dev, list);
  37. }
  38. }
  39. /*
  40. * setup_net runs the initializers for the network namespace object.
  41. */
  42. static __net_init int setup_net(struct net *net)
  43. {
  44. /* Must be called with net_mutex held */
  45. struct pernet_operations *ops;
  46. int error = 0;
  47. atomic_set(&net->count, 1);
  48. #ifdef NETNS_REFCNT_DEBUG
  49. atomic_set(&net->use_count, 0);
  50. #endif
  51. list_for_each_entry(ops, &pernet_list, list) {
  52. if (ops->init) {
  53. error = ops->init(net);
  54. if (error < 0)
  55. goto out_undo;
  56. }
  57. }
  58. out:
  59. return error;
  60. out_undo:
  61. /* Walk through the list backwards calling the exit functions
  62. * for the pernet modules whose init functions did not fail.
  63. */
  64. list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
  65. if (ops->exit)
  66. ops->exit(net);
  67. if (&ops->list == first_device) {
  68. LIST_HEAD(dev_kill_list);
  69. rtnl_lock();
  70. unregister_netdevices(net, &dev_kill_list);
  71. unregister_netdevice_many(&dev_kill_list);
  72. rtnl_unlock();
  73. }
  74. }
  75. rcu_barrier();
  76. goto out;
  77. }
  78. static struct net_generic *net_alloc_generic(void)
  79. {
  80. struct net_generic *ng;
  81. size_t generic_size = sizeof(struct net_generic) +
  82. INITIAL_NET_GEN_PTRS * sizeof(void *);
  83. ng = kzalloc(generic_size, GFP_KERNEL);
  84. if (ng)
  85. ng->len = INITIAL_NET_GEN_PTRS;
  86. return ng;
  87. }
  88. #ifdef CONFIG_NET_NS
  89. static struct kmem_cache *net_cachep;
  90. static struct workqueue_struct *netns_wq;
  91. static struct net *net_alloc(void)
  92. {
  93. struct net *net = NULL;
  94. struct net_generic *ng;
  95. ng = net_alloc_generic();
  96. if (!ng)
  97. goto out;
  98. net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
  99. if (!net)
  100. goto out_free;
  101. rcu_assign_pointer(net->gen, ng);
  102. out:
  103. return net;
  104. out_free:
  105. kfree(ng);
  106. goto out;
  107. }
  108. static void net_free(struct net *net)
  109. {
  110. #ifdef NETNS_REFCNT_DEBUG
  111. if (unlikely(atomic_read(&net->use_count) != 0)) {
  112. printk(KERN_EMERG "network namespace not free! Usage: %d\n",
  113. atomic_read(&net->use_count));
  114. return;
  115. }
  116. #endif
  117. kfree(net->gen);
  118. kmem_cache_free(net_cachep, net);
  119. }
  120. static struct net *net_create(void)
  121. {
  122. struct net *net;
  123. int rv;
  124. net = net_alloc();
  125. if (!net)
  126. return ERR_PTR(-ENOMEM);
  127. mutex_lock(&net_mutex);
  128. rv = setup_net(net);
  129. if (rv == 0) {
  130. rtnl_lock();
  131. list_add_tail_rcu(&net->list, &net_namespace_list);
  132. rtnl_unlock();
  133. }
  134. mutex_unlock(&net_mutex);
  135. if (rv < 0) {
  136. net_free(net);
  137. return ERR_PTR(rv);
  138. }
  139. return net;
  140. }
  141. struct net *copy_net_ns(unsigned long flags, struct net *old_net)
  142. {
  143. if (!(flags & CLONE_NEWNET))
  144. return get_net(old_net);
  145. return net_create();
  146. }
  147. static DEFINE_SPINLOCK(cleanup_list_lock);
  148. static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
  149. static void cleanup_net(struct work_struct *work)
  150. {
  151. struct pernet_operations *ops;
  152. struct net *net, *tmp;
  153. LIST_HEAD(net_kill_list);
  154. /* Atomically snapshot the list of namespaces to cleanup */
  155. spin_lock_irq(&cleanup_list_lock);
  156. list_replace_init(&cleanup_list, &net_kill_list);
  157. spin_unlock_irq(&cleanup_list_lock);
  158. mutex_lock(&net_mutex);
  159. /* Don't let anyone else find us. */
  160. rtnl_lock();
  161. list_for_each_entry(net, &net_kill_list, cleanup_list)
  162. list_del_rcu(&net->list);
  163. rtnl_unlock();
  164. /*
  165. * Another CPU might be rcu-iterating the list, wait for it.
  166. * This needs to be before calling the exit() notifiers, so
  167. * the rcu_barrier() below isn't sufficient alone.
  168. */
  169. synchronize_rcu();
  170. /* Run all of the network namespace exit methods */
  171. list_for_each_entry_reverse(ops, &pernet_list, list) {
  172. if (ops->exit) {
  173. list_for_each_entry(net, &net_kill_list, cleanup_list)
  174. ops->exit(net);
  175. }
  176. if (&ops->list == first_device) {
  177. LIST_HEAD(dev_kill_list);
  178. rtnl_lock();
  179. list_for_each_entry(net, &net_kill_list, cleanup_list)
  180. unregister_netdevices(net, &dev_kill_list);
  181. unregister_netdevice_many(&dev_kill_list);
  182. rtnl_unlock();
  183. }
  184. }
  185. mutex_unlock(&net_mutex);
  186. /* Ensure there are no outstanding rcu callbacks using this
  187. * network namespace.
  188. */
  189. rcu_barrier();
  190. /* Finally it is safe to free my network namespace structure */
  191. list_for_each_entry_safe(net, tmp, &net_kill_list, cleanup_list) {
  192. list_del_init(&net->cleanup_list);
  193. net_free(net);
  194. }
  195. }
  196. static DECLARE_WORK(net_cleanup_work, cleanup_net);
  197. void __put_net(struct net *net)
  198. {
  199. /* Cleanup the network namespace in process context */
  200. unsigned long flags;
  201. spin_lock_irqsave(&cleanup_list_lock, flags);
  202. list_add(&net->cleanup_list, &cleanup_list);
  203. spin_unlock_irqrestore(&cleanup_list_lock, flags);
  204. queue_work(netns_wq, &net_cleanup_work);
  205. }
  206. EXPORT_SYMBOL_GPL(__put_net);
  207. #else
  208. struct net *copy_net_ns(unsigned long flags, struct net *old_net)
  209. {
  210. if (flags & CLONE_NEWNET)
  211. return ERR_PTR(-EINVAL);
  212. return old_net;
  213. }
  214. #endif
  215. struct net *get_net_ns_by_pid(pid_t pid)
  216. {
  217. struct task_struct *tsk;
  218. struct net *net;
  219. /* Lookup the network namespace */
  220. net = ERR_PTR(-ESRCH);
  221. rcu_read_lock();
  222. tsk = find_task_by_vpid(pid);
  223. if (tsk) {
  224. struct nsproxy *nsproxy;
  225. nsproxy = task_nsproxy(tsk);
  226. if (nsproxy)
  227. net = get_net(nsproxy->net_ns);
  228. }
  229. rcu_read_unlock();
  230. return net;
  231. }
  232. EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
  233. static int __init net_ns_init(void)
  234. {
  235. struct net_generic *ng;
  236. #ifdef CONFIG_NET_NS
  237. net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
  238. SMP_CACHE_BYTES,
  239. SLAB_PANIC, NULL);
  240. /* Create workqueue for cleanup */
  241. netns_wq = create_singlethread_workqueue("netns");
  242. if (!netns_wq)
  243. panic("Could not create netns workq");
  244. #endif
  245. ng = net_alloc_generic();
  246. if (!ng)
  247. panic("Could not allocate generic netns");
  248. rcu_assign_pointer(init_net.gen, ng);
  249. mutex_lock(&net_mutex);
  250. if (setup_net(&init_net))
  251. panic("Could not setup the initial network namespace");
  252. rtnl_lock();
  253. list_add_tail_rcu(&init_net.list, &net_namespace_list);
  254. rtnl_unlock();
  255. mutex_unlock(&net_mutex);
  256. return 0;
  257. }
  258. pure_initcall(net_ns_init);
  259. #ifdef CONFIG_NET_NS
  260. static int register_pernet_operations(struct list_head *list,
  261. struct pernet_operations *ops)
  262. {
  263. struct net *net, *undo_net;
  264. int error;
  265. list_add_tail(&ops->list, list);
  266. if (ops->init) {
  267. for_each_net(net) {
  268. error = ops->init(net);
  269. if (error)
  270. goto out_undo;
  271. }
  272. }
  273. return 0;
  274. out_undo:
  275. /* If I have an error cleanup all namespaces I initialized */
  276. list_del(&ops->list);
  277. if (ops->exit) {
  278. for_each_net(undo_net) {
  279. if (net_eq(undo_net, net))
  280. goto undone;
  281. ops->exit(undo_net);
  282. }
  283. }
  284. undone:
  285. return error;
  286. }
  287. static void unregister_pernet_operations(struct pernet_operations *ops)
  288. {
  289. struct net *net;
  290. list_del(&ops->list);
  291. if (ops->exit)
  292. for_each_net(net)
  293. ops->exit(net);
  294. }
  295. #else
  296. static int register_pernet_operations(struct list_head *list,
  297. struct pernet_operations *ops)
  298. {
  299. if (ops->init == NULL)
  300. return 0;
  301. return ops->init(&init_net);
  302. }
  303. static void unregister_pernet_operations(struct pernet_operations *ops)
  304. {
  305. if (ops->exit)
  306. ops->exit(&init_net);
  307. }
  308. #endif
  309. static DEFINE_IDA(net_generic_ids);
  310. /**
  311. * register_pernet_subsys - register a network namespace subsystem
  312. * @ops: pernet operations structure for the subsystem
  313. *
  314. * Register a subsystem which has init and exit functions
  315. * that are called when network namespaces are created and
  316. * destroyed respectively.
  317. *
  318. * When registered all network namespace init functions are
  319. * called for every existing network namespace. Allowing kernel
  320. * modules to have a race free view of the set of network namespaces.
  321. *
  322. * When a new network namespace is created all of the init
  323. * methods are called in the order in which they were registered.
  324. *
  325. * When a network namespace is destroyed all of the exit methods
  326. * are called in the reverse of the order with which they were
  327. * registered.
  328. */
  329. int register_pernet_subsys(struct pernet_operations *ops)
  330. {
  331. int error;
  332. mutex_lock(&net_mutex);
  333. error = register_pernet_operations(first_device, ops);
  334. mutex_unlock(&net_mutex);
  335. return error;
  336. }
  337. EXPORT_SYMBOL_GPL(register_pernet_subsys);
  338. /**
  339. * unregister_pernet_subsys - unregister a network namespace subsystem
  340. * @ops: pernet operations structure to manipulate
  341. *
  342. * Remove the pernet operations structure from the list to be
  343. * used when network namespaces are created or destroyed. In
  344. * addition run the exit method for all existing network
  345. * namespaces.
  346. */
  347. void unregister_pernet_subsys(struct pernet_operations *module)
  348. {
  349. mutex_lock(&net_mutex);
  350. unregister_pernet_operations(module);
  351. mutex_unlock(&net_mutex);
  352. }
  353. EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
  354. int register_pernet_gen_subsys(int *id, struct pernet_operations *ops)
  355. {
  356. int rv;
  357. mutex_lock(&net_mutex);
  358. again:
  359. rv = ida_get_new_above(&net_generic_ids, 1, id);
  360. if (rv < 0) {
  361. if (rv == -EAGAIN) {
  362. ida_pre_get(&net_generic_ids, GFP_KERNEL);
  363. goto again;
  364. }
  365. goto out;
  366. }
  367. rv = register_pernet_operations(first_device, ops);
  368. if (rv < 0)
  369. ida_remove(&net_generic_ids, *id);
  370. out:
  371. mutex_unlock(&net_mutex);
  372. return rv;
  373. }
  374. EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
  375. void unregister_pernet_gen_subsys(int id, struct pernet_operations *ops)
  376. {
  377. mutex_lock(&net_mutex);
  378. unregister_pernet_operations(ops);
  379. ida_remove(&net_generic_ids, id);
  380. mutex_unlock(&net_mutex);
  381. }
  382. EXPORT_SYMBOL_GPL(unregister_pernet_gen_subsys);
  383. /**
  384. * register_pernet_device - register a network namespace device
  385. * @ops: pernet operations structure for the subsystem
  386. *
  387. * Register a device which has init and exit functions
  388. * that are called when network namespaces are created and
  389. * destroyed respectively.
  390. *
  391. * When registered all network namespace init functions are
  392. * called for every existing network namespace. Allowing kernel
  393. * modules to have a race free view of the set of network namespaces.
  394. *
  395. * When a new network namespace is created all of the init
  396. * methods are called in the order in which they were registered.
  397. *
  398. * When a network namespace is destroyed all of the exit methods
  399. * are called in the reverse of the order with which they were
  400. * registered.
  401. */
  402. int register_pernet_device(struct pernet_operations *ops)
  403. {
  404. int error;
  405. mutex_lock(&net_mutex);
  406. error = register_pernet_operations(&pernet_list, ops);
  407. if (!error && (first_device == &pernet_list))
  408. first_device = &ops->list;
  409. mutex_unlock(&net_mutex);
  410. return error;
  411. }
  412. EXPORT_SYMBOL_GPL(register_pernet_device);
  413. int register_pernet_gen_device(int *id, struct pernet_operations *ops)
  414. {
  415. int error;
  416. mutex_lock(&net_mutex);
  417. again:
  418. error = ida_get_new_above(&net_generic_ids, 1, id);
  419. if (error) {
  420. if (error == -EAGAIN) {
  421. ida_pre_get(&net_generic_ids, GFP_KERNEL);
  422. goto again;
  423. }
  424. goto out;
  425. }
  426. error = register_pernet_operations(&pernet_list, ops);
  427. if (error)
  428. ida_remove(&net_generic_ids, *id);
  429. else if (first_device == &pernet_list)
  430. first_device = &ops->list;
  431. out:
  432. mutex_unlock(&net_mutex);
  433. return error;
  434. }
  435. EXPORT_SYMBOL_GPL(register_pernet_gen_device);
  436. /**
  437. * unregister_pernet_device - unregister a network namespace netdevice
  438. * @ops: pernet operations structure to manipulate
  439. *
  440. * Remove the pernet operations structure from the list to be
  441. * used when network namespaces are created or destroyed. In
  442. * addition run the exit method for all existing network
  443. * namespaces.
  444. */
  445. void unregister_pernet_device(struct pernet_operations *ops)
  446. {
  447. mutex_lock(&net_mutex);
  448. if (&ops->list == first_device)
  449. first_device = first_device->next;
  450. unregister_pernet_operations(ops);
  451. mutex_unlock(&net_mutex);
  452. }
  453. EXPORT_SYMBOL_GPL(unregister_pernet_device);
  454. void unregister_pernet_gen_device(int id, struct pernet_operations *ops)
  455. {
  456. mutex_lock(&net_mutex);
  457. if (&ops->list == first_device)
  458. first_device = first_device->next;
  459. unregister_pernet_operations(ops);
  460. ida_remove(&net_generic_ids, id);
  461. mutex_unlock(&net_mutex);
  462. }
  463. EXPORT_SYMBOL_GPL(unregister_pernet_gen_device);
  464. static void net_generic_release(struct rcu_head *rcu)
  465. {
  466. struct net_generic *ng;
  467. ng = container_of(rcu, struct net_generic, rcu);
  468. kfree(ng);
  469. }
  470. int net_assign_generic(struct net *net, int id, void *data)
  471. {
  472. struct net_generic *ng, *old_ng;
  473. BUG_ON(!mutex_is_locked(&net_mutex));
  474. BUG_ON(id == 0);
  475. ng = old_ng = net->gen;
  476. if (old_ng->len >= id)
  477. goto assign;
  478. ng = kzalloc(sizeof(struct net_generic) +
  479. id * sizeof(void *), GFP_KERNEL);
  480. if (ng == NULL)
  481. return -ENOMEM;
  482. /*
  483. * Some synchronisation notes:
  484. *
  485. * The net_generic explores the net->gen array inside rcu
  486. * read section. Besides once set the net->gen->ptr[x]
  487. * pointer never changes (see rules in netns/generic.h).
  488. *
  489. * That said, we simply duplicate this array and schedule
  490. * the old copy for kfree after a grace period.
  491. */
  492. ng->len = id;
  493. memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
  494. rcu_assign_pointer(net->gen, ng);
  495. call_rcu(&old_ng->rcu, net_generic_release);
  496. assign:
  497. ng->ptr[id - 1] = data;
  498. return 0;
  499. }
  500. EXPORT_SYMBOL_GPL(net_assign_generic);