pid_namespace.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * Pid namespaces
  3. *
  4. * Authors:
  5. * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  6. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  7. * Many thanks to Oleg Nesterov for comments and help
  8. *
  9. */
  10. #include <linux/pid.h>
  11. #include <linux/pid_namespace.h>
  12. #include <linux/syscalls.h>
  13. #include <linux/err.h>
  14. #include <linux/acct.h>
  15. #include <linux/slab.h>
  16. #define BITS_PER_PAGE (PAGE_SIZE*8)
  17. struct pid_cache {
  18. int nr_ids;
  19. char name[16];
  20. struct kmem_cache *cachep;
  21. struct list_head list;
  22. };
  23. static LIST_HEAD(pid_caches_lh);
  24. static DEFINE_MUTEX(pid_caches_mutex);
  25. static struct kmem_cache *pid_ns_cachep;
  26. /*
  27. * creates the kmem cache to allocate pids from.
  28. * @nr_ids: the number of numerical ids this pid will have to carry
  29. */
  30. static struct kmem_cache *create_pid_cachep(int nr_ids)
  31. {
  32. struct pid_cache *pcache;
  33. struct kmem_cache *cachep;
  34. mutex_lock(&pid_caches_mutex);
  35. list_for_each_entry(pcache, &pid_caches_lh, list)
  36. if (pcache->nr_ids == nr_ids)
  37. goto out;
  38. pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
  39. if (pcache == NULL)
  40. goto err_alloc;
  41. snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
  42. cachep = kmem_cache_create(pcache->name,
  43. sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
  44. 0, SLAB_HWCACHE_ALIGN, NULL);
  45. if (cachep == NULL)
  46. goto err_cachep;
  47. pcache->nr_ids = nr_ids;
  48. pcache->cachep = cachep;
  49. list_add(&pcache->list, &pid_caches_lh);
  50. out:
  51. mutex_unlock(&pid_caches_mutex);
  52. return pcache->cachep;
  53. err_cachep:
  54. kfree(pcache);
  55. err_alloc:
  56. mutex_unlock(&pid_caches_mutex);
  57. return NULL;
  58. }
  59. static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
  60. {
  61. struct pid_namespace *ns;
  62. unsigned int level = parent_pid_ns->level + 1;
  63. int i;
  64. ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
  65. if (ns == NULL)
  66. goto out;
  67. ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  68. if (!ns->pidmap[0].page)
  69. goto out_free;
  70. ns->pid_cachep = create_pid_cachep(level + 1);
  71. if (ns->pid_cachep == NULL)
  72. goto out_free_map;
  73. kref_init(&ns->kref);
  74. ns->level = level;
  75. ns->parent = get_pid_ns(parent_pid_ns);
  76. set_bit(0, ns->pidmap[0].page);
  77. atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
  78. for (i = 1; i < PIDMAP_ENTRIES; i++)
  79. atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
  80. return ns;
  81. out_free_map:
  82. kfree(ns->pidmap[0].page);
  83. out_free:
  84. kmem_cache_free(pid_ns_cachep, ns);
  85. out:
  86. return ERR_PTR(-ENOMEM);
  87. }
  88. static void destroy_pid_namespace(struct pid_namespace *ns)
  89. {
  90. int i;
  91. for (i = 0; i < PIDMAP_ENTRIES; i++)
  92. kfree(ns->pidmap[i].page);
  93. kmem_cache_free(pid_ns_cachep, ns);
  94. }
  95. struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
  96. {
  97. if (!(flags & CLONE_NEWPID))
  98. return get_pid_ns(old_ns);
  99. if (flags & (CLONE_THREAD|CLONE_PARENT))
  100. return ERR_PTR(-EINVAL);
  101. return create_pid_namespace(old_ns);
  102. }
  103. void free_pid_ns(struct kref *kref)
  104. {
  105. struct pid_namespace *ns, *parent;
  106. ns = container_of(kref, struct pid_namespace, kref);
  107. parent = ns->parent;
  108. destroy_pid_namespace(ns);
  109. if (parent != NULL)
  110. put_pid_ns(parent);
  111. }
  112. void zap_pid_ns_processes(struct pid_namespace *pid_ns)
  113. {
  114. int nr;
  115. int rc;
  116. struct task_struct *task;
  117. /*
  118. * The last thread in the cgroup-init thread group is terminating.
  119. * Find remaining pid_ts in the namespace, signal and wait for them
  120. * to exit.
  121. *
  122. * Note: This signals each threads in the namespace - even those that
  123. * belong to the same thread group, To avoid this, we would have
  124. * to walk the entire tasklist looking a processes in this
  125. * namespace, but that could be unnecessarily expensive if the
  126. * pid namespace has just a few processes. Or we need to
  127. * maintain a tasklist for each pid namespace.
  128. *
  129. */
  130. read_lock(&tasklist_lock);
  131. nr = next_pidmap(pid_ns, 1);
  132. while (nr > 0) {
  133. rcu_read_lock();
  134. /*
  135. * Any nested-container's init processes won't ignore the
  136. * SEND_SIG_NOINFO signal, see send_signal()->si_fromuser().
  137. */
  138. task = pid_task(find_vpid(nr), PIDTYPE_PID);
  139. if (task)
  140. send_sig_info(SIGKILL, SEND_SIG_NOINFO, task);
  141. rcu_read_unlock();
  142. nr = next_pidmap(pid_ns, nr);
  143. }
  144. read_unlock(&tasklist_lock);
  145. do {
  146. clear_thread_flag(TIF_SIGPENDING);
  147. rc = sys_wait4(-1, NULL, __WALL, NULL);
  148. } while (rc != -ECHILD);
  149. acct_exit_ns(pid_ns);
  150. return;
  151. }
  152. static __init int pid_namespaces_init(void)
  153. {
  154. pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
  155. return 0;
  156. }
  157. __initcall(pid_namespaces_init);