pid_namespace.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. /*
  2. * Pid namespaces
  3. *
  4. * Authors:
  5. * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  6. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  7. * Many thanks to Oleg Nesterov for comments and help
  8. *
  9. */
  10. #include <linux/pid.h>
  11. #include <linux/pid_namespace.h>
  12. #include <linux/syscalls.h>
  13. #include <linux/err.h>
  14. #define BITS_PER_PAGE (PAGE_SIZE*8)
  15. struct pid_cache {
  16. int nr_ids;
  17. char name[16];
  18. struct kmem_cache *cachep;
  19. struct list_head list;
  20. };
  21. static LIST_HEAD(pid_caches_lh);
  22. static DEFINE_MUTEX(pid_caches_mutex);
  23. static struct kmem_cache *pid_ns_cachep;
  24. /*
  25. * creates the kmem cache to allocate pids from.
  26. * @nr_ids: the number of numerical ids this pid will have to carry
  27. */
  28. static struct kmem_cache *create_pid_cachep(int nr_ids)
  29. {
  30. struct pid_cache *pcache;
  31. struct kmem_cache *cachep;
  32. mutex_lock(&pid_caches_mutex);
  33. list_for_each_entry(pcache, &pid_caches_lh, list)
  34. if (pcache->nr_ids == nr_ids)
  35. goto out;
  36. pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
  37. if (pcache == NULL)
  38. goto err_alloc;
  39. snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
  40. cachep = kmem_cache_create(pcache->name,
  41. sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
  42. 0, SLAB_HWCACHE_ALIGN, NULL);
  43. if (cachep == NULL)
  44. goto err_cachep;
  45. pcache->nr_ids = nr_ids;
  46. pcache->cachep = cachep;
  47. list_add(&pcache->list, &pid_caches_lh);
  48. out:
  49. mutex_unlock(&pid_caches_mutex);
  50. return pcache->cachep;
  51. err_cachep:
  52. kfree(pcache);
  53. err_alloc:
  54. mutex_unlock(&pid_caches_mutex);
  55. return NULL;
  56. }
  57. static struct pid_namespace *create_pid_namespace(unsigned int level)
  58. {
  59. struct pid_namespace *ns;
  60. int i;
  61. ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL);
  62. if (ns == NULL)
  63. goto out;
  64. ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  65. if (!ns->pidmap[0].page)
  66. goto out_free;
  67. ns->pid_cachep = create_pid_cachep(level + 1);
  68. if (ns->pid_cachep == NULL)
  69. goto out_free_map;
  70. kref_init(&ns->kref);
  71. ns->last_pid = 0;
  72. ns->child_reaper = NULL;
  73. ns->level = level;
  74. set_bit(0, ns->pidmap[0].page);
  75. atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
  76. for (i = 1; i < PIDMAP_ENTRIES; i++) {
  77. ns->pidmap[i].page = NULL;
  78. atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
  79. }
  80. return ns;
  81. out_free_map:
  82. kfree(ns->pidmap[0].page);
  83. out_free:
  84. kmem_cache_free(pid_ns_cachep, ns);
  85. out:
  86. return ERR_PTR(-ENOMEM);
  87. }
  88. static void destroy_pid_namespace(struct pid_namespace *ns)
  89. {
  90. int i;
  91. for (i = 0; i < PIDMAP_ENTRIES; i++)
  92. kfree(ns->pidmap[i].page);
  93. kmem_cache_free(pid_ns_cachep, ns);
  94. }
  95. struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
  96. {
  97. struct pid_namespace *new_ns;
  98. BUG_ON(!old_ns);
  99. new_ns = get_pid_ns(old_ns);
  100. if (!(flags & CLONE_NEWPID))
  101. goto out;
  102. new_ns = ERR_PTR(-EINVAL);
  103. if (flags & CLONE_THREAD)
  104. goto out_put;
  105. new_ns = create_pid_namespace(old_ns->level + 1);
  106. if (!IS_ERR(new_ns))
  107. new_ns->parent = get_pid_ns(old_ns);
  108. out_put:
  109. put_pid_ns(old_ns);
  110. out:
  111. return new_ns;
  112. }
  113. void free_pid_ns(struct kref *kref)
  114. {
  115. struct pid_namespace *ns, *parent;
  116. ns = container_of(kref, struct pid_namespace, kref);
  117. parent = ns->parent;
  118. destroy_pid_namespace(ns);
  119. if (parent != NULL)
  120. put_pid_ns(parent);
  121. }
  122. void zap_pid_ns_processes(struct pid_namespace *pid_ns)
  123. {
  124. int nr;
  125. int rc;
  126. /*
  127. * The last thread in the cgroup-init thread group is terminating.
  128. * Find remaining pid_ts in the namespace, signal and wait for them
  129. * to exit.
  130. *
  131. * Note: This signals each threads in the namespace - even those that
  132. * belong to the same thread group, To avoid this, we would have
  133. * to walk the entire tasklist looking a processes in this
  134. * namespace, but that could be unnecessarily expensive if the
  135. * pid namespace has just a few processes. Or we need to
  136. * maintain a tasklist for each pid namespace.
  137. *
  138. */
  139. read_lock(&tasklist_lock);
  140. nr = next_pidmap(pid_ns, 1);
  141. while (nr > 0) {
  142. kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr);
  143. nr = next_pidmap(pid_ns, nr);
  144. }
  145. read_unlock(&tasklist_lock);
  146. do {
  147. clear_thread_flag(TIF_SIGPENDING);
  148. rc = sys_wait4(-1, NULL, __WALL, NULL);
  149. } while (rc != -ECHILD);
  150. /* Child reaper for the pid namespace is going away */
  151. pid_ns->child_reaper = NULL;
  152. return;
  153. }
  154. static __init int pid_namespaces_init(void)
  155. {
  156. pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
  157. return 0;
  158. }
  159. __initcall(pid_namespaces_init);