user.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * The "user cache".
  3. *
  4. * (C) Copyright 1991-2000 Linus Torvalds
  5. *
  6. * We have a per-user structure to keep track of how many
  7. * processes, files etc the user has claimed, in order to be
  8. * able to have per-user limits for system resources.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/bitops.h>
  14. #include <linux/key.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/user_namespace.h>
  18. /*
  19. * UID task count cache, to get fast user lookup in "alloc_uid"
  20. * when changing user ID's (ie setuid() and friends).
  21. */
  22. #define UIDHASH_MASK (UIDHASH_SZ - 1)
  23. #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
  24. #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
  25. static struct kmem_cache *uid_cachep;
  26. /*
  27. * The uidhash_lock is mostly taken from process context, but it is
  28. * occasionally also taken from softirq/tasklet context, when
  29. * task-structs get RCU-freed. Hence all locking must be softirq-safe.
  30. * But free_uid() is also called with local interrupts disabled, and running
  31. * local_bh_enable() with local interrupts disabled is an error - we'll run
  32. * softirq callbacks, and they can unconditionally enable interrupts, and
  33. * the caller of free_uid() didn't expect that..
  34. */
  35. static DEFINE_SPINLOCK(uidhash_lock);
  36. struct user_struct root_user = {
  37. .__count = ATOMIC_INIT(1),
  38. .processes = ATOMIC_INIT(1),
  39. .files = ATOMIC_INIT(0),
  40. .sigpending = ATOMIC_INIT(0),
  41. .mq_bytes = 0,
  42. .locked_shm = 0,
  43. #ifdef CONFIG_KEYS
  44. .uid_keyring = &root_user_keyring,
  45. .session_keyring = &root_session_keyring,
  46. #endif
  47. };
  48. /*
  49. * These routines must be called with the uidhash spinlock held!
  50. */
  51. static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
  52. {
  53. hlist_add_head(&up->uidhash_node, hashent);
  54. }
  55. static inline void uid_hash_remove(struct user_struct *up)
  56. {
  57. hlist_del_init(&up->uidhash_node);
  58. }
  59. static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
  60. {
  61. struct user_struct *user;
  62. struct hlist_node *h;
  63. hlist_for_each_entry(user, h, hashent, uidhash_node) {
  64. if(user->uid == uid) {
  65. atomic_inc(&user->__count);
  66. return user;
  67. }
  68. }
  69. return NULL;
  70. }
  71. /*
  72. * Locate the user_struct for the passed UID. If found, take a ref on it. The
  73. * caller must undo that ref with free_uid().
  74. *
  75. * If the user_struct could not be found, return NULL.
  76. */
  77. struct user_struct *find_user(uid_t uid)
  78. {
  79. struct user_struct *ret;
  80. unsigned long flags;
  81. struct user_namespace *ns = current->nsproxy->user_ns;
  82. spin_lock_irqsave(&uidhash_lock, flags);
  83. ret = uid_hash_find(uid, uidhashentry(ns, uid));
  84. spin_unlock_irqrestore(&uidhash_lock, flags);
  85. return ret;
  86. }
  87. void free_uid(struct user_struct *up)
  88. {
  89. unsigned long flags;
  90. if (!up)
  91. return;
  92. local_irq_save(flags);
  93. if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
  94. uid_hash_remove(up);
  95. spin_unlock_irqrestore(&uidhash_lock, flags);
  96. key_put(up->uid_keyring);
  97. key_put(up->session_keyring);
  98. kmem_cache_free(uid_cachep, up);
  99. } else {
  100. local_irq_restore(flags);
  101. }
  102. }
  103. struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
  104. {
  105. struct hlist_head *hashent = uidhashentry(ns, uid);
  106. struct user_struct *up;
  107. spin_lock_irq(&uidhash_lock);
  108. up = uid_hash_find(uid, hashent);
  109. spin_unlock_irq(&uidhash_lock);
  110. if (!up) {
  111. struct user_struct *new;
  112. new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
  113. if (!new)
  114. return NULL;
  115. new->uid = uid;
  116. atomic_set(&new->__count, 1);
  117. atomic_set(&new->processes, 0);
  118. atomic_set(&new->files, 0);
  119. atomic_set(&new->sigpending, 0);
  120. #ifdef CONFIG_INOTIFY_USER
  121. atomic_set(&new->inotify_watches, 0);
  122. atomic_set(&new->inotify_devs, 0);
  123. #endif
  124. new->mq_bytes = 0;
  125. new->locked_shm = 0;
  126. if (alloc_uid_keyring(new, current) < 0) {
  127. kmem_cache_free(uid_cachep, new);
  128. return NULL;
  129. }
  130. /*
  131. * Before adding this, check whether we raced
  132. * on adding the same user already..
  133. */
  134. spin_lock_irq(&uidhash_lock);
  135. up = uid_hash_find(uid, hashent);
  136. if (up) {
  137. key_put(new->uid_keyring);
  138. key_put(new->session_keyring);
  139. kmem_cache_free(uid_cachep, new);
  140. } else {
  141. uid_hash_insert(new, hashent);
  142. up = new;
  143. }
  144. spin_unlock_irq(&uidhash_lock);
  145. }
  146. return up;
  147. }
  148. void switch_uid(struct user_struct *new_user)
  149. {
  150. struct user_struct *old_user;
  151. /* What if a process setreuid()'s and this brings the
  152. * new uid over his NPROC rlimit? We can check this now
  153. * cheaply with the new uid cache, so if it matters
  154. * we should be checking for it. -DaveM
  155. */
  156. old_user = current->user;
  157. atomic_inc(&new_user->processes);
  158. atomic_dec(&old_user->processes);
  159. switch_uid_keyring(new_user);
  160. current->user = new_user;
  161. /*
  162. * We need to synchronize with __sigqueue_alloc()
  163. * doing a get_uid(p->user).. If that saw the old
  164. * user value, we need to wait until it has exited
  165. * its critical region before we can free the old
  166. * structure.
  167. */
  168. smp_mb();
  169. spin_unlock_wait(&current->sighand->siglock);
  170. free_uid(old_user);
  171. suid_keys(current);
  172. }
  173. void release_uids(struct user_namespace *ns)
  174. {
  175. int i;
  176. unsigned long flags;
  177. struct hlist_head *head;
  178. struct hlist_node *nd;
  179. spin_lock_irqsave(&uidhash_lock, flags);
  180. /*
  181. * collapse the chains so that the user_struct-s will
  182. * be still alive, but not in hashes. subsequent free_uid()
  183. * will free them.
  184. */
  185. for (i = 0; i < UIDHASH_SZ; i++) {
  186. head = ns->uidhash_table + i;
  187. while (!hlist_empty(head)) {
  188. nd = head->first;
  189. hlist_del_init(nd);
  190. }
  191. }
  192. spin_unlock_irqrestore(&uidhash_lock, flags);
  193. free_uid(ns->root_user);
  194. }
  195. static int __init uid_cache_init(void)
  196. {
  197. int n;
  198. uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
  199. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  200. for(n = 0; n < UIDHASH_SZ; ++n)
  201. INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
  202. /* Insert the root user immediately (init already runs as root) */
  203. spin_lock_irq(&uidhash_lock);
  204. uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
  205. spin_unlock_irq(&uidhash_lock);
  206. return 0;
  207. }
  208. module_init(uid_cache_init);