pid_namespace.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * Pid namespaces
  3. *
  4. * Authors:
  5. * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  6. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  7. * Many thanks to Oleg Nesterov for comments and help
  8. *
  9. */
  10. #include <linux/pid.h>
  11. #include <linux/pid_namespace.h>
  12. #include <linux/syscalls.h>
  13. #include <linux/err.h>
  14. #include <linux/acct.h>
  15. #define BITS_PER_PAGE (PAGE_SIZE*8)
  16. struct pid_cache {
  17. int nr_ids;
  18. char name[16];
  19. struct kmem_cache *cachep;
  20. struct list_head list;
  21. };
  22. static LIST_HEAD(pid_caches_lh);
  23. static DEFINE_MUTEX(pid_caches_mutex);
  24. static struct kmem_cache *pid_ns_cachep;
  25. /*
  26. * creates the kmem cache to allocate pids from.
  27. * @nr_ids: the number of numerical ids this pid will have to carry
  28. */
  29. static struct kmem_cache *create_pid_cachep(int nr_ids)
  30. {
  31. struct pid_cache *pcache;
  32. struct kmem_cache *cachep;
  33. mutex_lock(&pid_caches_mutex);
  34. list_for_each_entry(pcache, &pid_caches_lh, list)
  35. if (pcache->nr_ids == nr_ids)
  36. goto out;
  37. pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
  38. if (pcache == NULL)
  39. goto err_alloc;
  40. snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
  41. cachep = kmem_cache_create(pcache->name,
  42. sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
  43. 0, SLAB_HWCACHE_ALIGN, NULL);
  44. if (cachep == NULL)
  45. goto err_cachep;
  46. pcache->nr_ids = nr_ids;
  47. pcache->cachep = cachep;
  48. list_add(&pcache->list, &pid_caches_lh);
  49. out:
  50. mutex_unlock(&pid_caches_mutex);
  51. return pcache->cachep;
  52. err_cachep:
  53. kfree(pcache);
  54. err_alloc:
  55. mutex_unlock(&pid_caches_mutex);
  56. return NULL;
  57. }
  58. static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
  59. {
  60. struct pid_namespace *ns;
  61. unsigned int level = parent_pid_ns->level + 1;
  62. int i;
  63. ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
  64. if (ns == NULL)
  65. goto out;
  66. ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  67. if (!ns->pidmap[0].page)
  68. goto out_free;
  69. ns->pid_cachep = create_pid_cachep(level + 1);
  70. if (ns->pid_cachep == NULL)
  71. goto out_free_map;
  72. kref_init(&ns->kref);
  73. ns->level = level;
  74. ns->parent = get_pid_ns(parent_pid_ns);
  75. set_bit(0, ns->pidmap[0].page);
  76. atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
  77. for (i = 1; i < PIDMAP_ENTRIES; i++)
  78. atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
  79. return ns;
  80. out_free_map:
  81. kfree(ns->pidmap[0].page);
  82. out_free:
  83. kmem_cache_free(pid_ns_cachep, ns);
  84. out:
  85. return ERR_PTR(-ENOMEM);
  86. }
  87. static void destroy_pid_namespace(struct pid_namespace *ns)
  88. {
  89. int i;
  90. for (i = 0; i < PIDMAP_ENTRIES; i++)
  91. kfree(ns->pidmap[i].page);
  92. kmem_cache_free(pid_ns_cachep, ns);
  93. }
  94. struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
  95. {
  96. struct pid_namespace *new_ns;
  97. BUG_ON(!old_ns);
  98. new_ns = get_pid_ns(old_ns);
  99. if (!(flags & CLONE_NEWPID))
  100. goto out;
  101. new_ns = ERR_PTR(-EINVAL);
  102. if (flags & CLONE_THREAD)
  103. goto out_put;
  104. new_ns = create_pid_namespace(old_ns);
  105. out_put:
  106. put_pid_ns(old_ns);
  107. out:
  108. return new_ns;
  109. }
  110. void free_pid_ns(struct kref *kref)
  111. {
  112. struct pid_namespace *ns, *parent;
  113. ns = container_of(kref, struct pid_namespace, kref);
  114. parent = ns->parent;
  115. destroy_pid_namespace(ns);
  116. if (parent != NULL)
  117. put_pid_ns(parent);
  118. }
  119. void zap_pid_ns_processes(struct pid_namespace *pid_ns)
  120. {
  121. int nr;
  122. int rc;
  123. struct task_struct *task;
  124. /*
  125. * The last thread in the cgroup-init thread group is terminating.
  126. * Find remaining pid_ts in the namespace, signal and wait for them
  127. * to exit.
  128. *
  129. * Note: This signals each threads in the namespace - even those that
  130. * belong to the same thread group, To avoid this, we would have
  131. * to walk the entire tasklist looking a processes in this
  132. * namespace, but that could be unnecessarily expensive if the
  133. * pid namespace has just a few processes. Or we need to
  134. * maintain a tasklist for each pid namespace.
  135. *
  136. */
  137. read_lock(&tasklist_lock);
  138. nr = next_pidmap(pid_ns, 1);
  139. while (nr > 0) {
  140. rcu_read_lock();
  141. /*
  142. * Use force_sig() since it clears SIGNAL_UNKILLABLE ensuring
  143. * any nested-container's init processes don't ignore the
  144. * signal
  145. */
  146. task = pid_task(find_vpid(nr), PIDTYPE_PID);
  147. if (task)
  148. force_sig(SIGKILL, task);
  149. rcu_read_unlock();
  150. nr = next_pidmap(pid_ns, nr);
  151. }
  152. read_unlock(&tasklist_lock);
  153. do {
  154. clear_thread_flag(TIF_SIGPENDING);
  155. rc = sys_wait4(-1, NULL, __WALL, NULL);
  156. } while (rc != -ECHILD);
  157. acct_exit_ns(pid_ns);
  158. return;
  159. }
  160. static __init int pid_namespaces_init(void)
  161. {
  162. pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
  163. return 0;
  164. }
  165. __initcall(pid_namespaces_init);