pid_namespace.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * Pid namespaces
  3. *
  4. * Authors:
  5. * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  6. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  7. * Many thanks to Oleg Nesterov for comments and help
  8. *
  9. */
  10. #include <linux/pid.h>
  11. #include <linux/pid_namespace.h>
  12. #include <linux/syscalls.h>
  13. #include <linux/err.h>
  14. #include <linux/acct.h>
  15. #define BITS_PER_PAGE (PAGE_SIZE*8)
  16. struct pid_cache {
  17. int nr_ids;
  18. char name[16];
  19. struct kmem_cache *cachep;
  20. struct list_head list;
  21. };
  22. static LIST_HEAD(pid_caches_lh);
  23. static DEFINE_MUTEX(pid_caches_mutex);
  24. static struct kmem_cache *pid_ns_cachep;
  25. /*
  26. * creates the kmem cache to allocate pids from.
  27. * @nr_ids: the number of numerical ids this pid will have to carry
  28. */
  29. static struct kmem_cache *create_pid_cachep(int nr_ids)
  30. {
  31. struct pid_cache *pcache;
  32. struct kmem_cache *cachep;
  33. mutex_lock(&pid_caches_mutex);
  34. list_for_each_entry(pcache, &pid_caches_lh, list)
  35. if (pcache->nr_ids == nr_ids)
  36. goto out;
  37. pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
  38. if (pcache == NULL)
  39. goto err_alloc;
  40. snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
  41. cachep = kmem_cache_create(pcache->name,
  42. sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
  43. 0, SLAB_HWCACHE_ALIGN, NULL);
  44. if (cachep == NULL)
  45. goto err_cachep;
  46. pcache->nr_ids = nr_ids;
  47. pcache->cachep = cachep;
  48. list_add(&pcache->list, &pid_caches_lh);
  49. out:
  50. mutex_unlock(&pid_caches_mutex);
  51. return pcache->cachep;
  52. err_cachep:
  53. kfree(pcache);
  54. err_alloc:
  55. mutex_unlock(&pid_caches_mutex);
  56. return NULL;
  57. }
  58. static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
  59. {
  60. struct pid_namespace *ns;
  61. unsigned int level = parent_pid_ns->level + 1;
  62. int i;
  63. ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
  64. if (ns == NULL)
  65. goto out;
  66. ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  67. if (!ns->pidmap[0].page)
  68. goto out_free;
  69. ns->pid_cachep = create_pid_cachep(level + 1);
  70. if (ns->pid_cachep == NULL)
  71. goto out_free_map;
  72. kref_init(&ns->kref);
  73. ns->level = level;
  74. ns->parent = get_pid_ns(parent_pid_ns);
  75. set_bit(0, ns->pidmap[0].page);
  76. atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
  77. for (i = 1; i < PIDMAP_ENTRIES; i++)
  78. atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
  79. return ns;
  80. out_free_map:
  81. kfree(ns->pidmap[0].page);
  82. out_free:
  83. kmem_cache_free(pid_ns_cachep, ns);
  84. out:
  85. return ERR_PTR(-ENOMEM);
  86. }
  87. static void destroy_pid_namespace(struct pid_namespace *ns)
  88. {
  89. int i;
  90. for (i = 0; i < PIDMAP_ENTRIES; i++)
  91. kfree(ns->pidmap[i].page);
  92. kmem_cache_free(pid_ns_cachep, ns);
  93. }
  94. struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
  95. {
  96. if (!(flags & CLONE_NEWPID))
  97. return get_pid_ns(old_ns);
  98. if (flags & CLONE_THREAD)
  99. return ERR_PTR(-EINVAL);
  100. return create_pid_namespace(old_ns);
  101. }
  102. void free_pid_ns(struct kref *kref)
  103. {
  104. struct pid_namespace *ns, *parent;
  105. ns = container_of(kref, struct pid_namespace, kref);
  106. parent = ns->parent;
  107. destroy_pid_namespace(ns);
  108. if (parent != NULL)
  109. put_pid_ns(parent);
  110. }
  111. void zap_pid_ns_processes(struct pid_namespace *pid_ns)
  112. {
  113. int nr;
  114. int rc;
  115. struct task_struct *task;
  116. /*
  117. * The last thread in the cgroup-init thread group is terminating.
  118. * Find remaining pid_ts in the namespace, signal and wait for them
  119. * to exit.
  120. *
  121. * Note: This signals each threads in the namespace - even those that
  122. * belong to the same thread group, To avoid this, we would have
  123. * to walk the entire tasklist looking a processes in this
  124. * namespace, but that could be unnecessarily expensive if the
  125. * pid namespace has just a few processes. Or we need to
  126. * maintain a tasklist for each pid namespace.
  127. *
  128. */
  129. read_lock(&tasklist_lock);
  130. nr = next_pidmap(pid_ns, 1);
  131. while (nr > 0) {
  132. rcu_read_lock();
  133. /*
  134. * Use force_sig() since it clears SIGNAL_UNKILLABLE ensuring
  135. * any nested-container's init processes don't ignore the
  136. * signal
  137. */
  138. task = pid_task(find_vpid(nr), PIDTYPE_PID);
  139. if (task)
  140. force_sig(SIGKILL, task);
  141. rcu_read_unlock();
  142. nr = next_pidmap(pid_ns, nr);
  143. }
  144. read_unlock(&tasklist_lock);
  145. do {
  146. clear_thread_flag(TIF_SIGPENDING);
  147. rc = sys_wait4(-1, NULL, __WALL, NULL);
  148. } while (rc != -ECHILD);
  149. acct_exit_ns(pid_ns);
  150. return;
  151. }
  152. static __init int pid_namespaces_init(void)
  153. {
  154. pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
  155. return 0;
  156. }
  157. __initcall(pid_namespaces_init);