user.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /*
  2. * The "user cache".
  3. *
  4. * (C) Copyright 1991-2000 Linus Torvalds
  5. *
  6. * We have a per-user structure to keep track of how many
  7. * processes, files etc the user has claimed, in order to be
  8. * able to have per-user limits for system resources.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/bitops.h>
  14. #include <linux/key.h>
  15. #include <linux/interrupt.h>
  16. /*
  17. * UID task count cache, to get fast user lookup in "alloc_uid"
  18. * when changing user ID's (ie setuid() and friends).
  19. */
  20. #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8)
  21. #define UIDHASH_SZ (1 << UIDHASH_BITS)
  22. #define UIDHASH_MASK (UIDHASH_SZ - 1)
  23. #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
  24. #define uidhashentry(uid) (uidhash_table + __uidhashfn((uid)))
  25. static kmem_cache_t *uid_cachep;
  26. static struct list_head uidhash_table[UIDHASH_SZ];
  27. /*
  28. * The uidhash_lock is mostly taken from process context, but it is
  29. * occasionally also taken from softirq/tasklet context, when
  30. * task-structs get RCU-freed. Hence all locking must be softirq-safe.
  31. */
  32. static DEFINE_SPINLOCK(uidhash_lock);
  33. struct user_struct root_user = {
  34. .__count = ATOMIC_INIT(1),
  35. .processes = ATOMIC_INIT(1),
  36. .files = ATOMIC_INIT(0),
  37. .sigpending = ATOMIC_INIT(0),
  38. .mq_bytes = 0,
  39. .locked_shm = 0,
  40. #ifdef CONFIG_KEYS
  41. .uid_keyring = &root_user_keyring,
  42. .session_keyring = &root_session_keyring,
  43. #endif
  44. };
  45. /*
  46. * These routines must be called with the uidhash spinlock held!
  47. */
  48. static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
  49. {
  50. list_add(&up->uidhash_list, hashent);
  51. }
  52. static inline void uid_hash_remove(struct user_struct *up)
  53. {
  54. list_del(&up->uidhash_list);
  55. }
  56. static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent)
  57. {
  58. struct list_head *up;
  59. list_for_each(up, hashent) {
  60. struct user_struct *user;
  61. user = list_entry(up, struct user_struct, uidhash_list);
  62. if(user->uid == uid) {
  63. atomic_inc(&user->__count);
  64. return user;
  65. }
  66. }
  67. return NULL;
  68. }
  69. /*
  70. * Locate the user_struct for the passed UID. If found, take a ref on it. The
  71. * caller must undo that ref with free_uid().
  72. *
  73. * If the user_struct could not be found, return NULL.
  74. */
  75. struct user_struct *find_user(uid_t uid)
  76. {
  77. struct user_struct *ret;
  78. spin_lock_bh(&uidhash_lock);
  79. ret = uid_hash_find(uid, uidhashentry(uid));
  80. spin_unlock_bh(&uidhash_lock);
  81. return ret;
  82. }
  83. void free_uid(struct user_struct *up)
  84. {
  85. local_bh_disable();
  86. if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
  87. uid_hash_remove(up);
  88. key_put(up->uid_keyring);
  89. key_put(up->session_keyring);
  90. kmem_cache_free(uid_cachep, up);
  91. spin_unlock(&uidhash_lock);
  92. }
  93. local_bh_enable();
  94. }
  95. struct user_struct * alloc_uid(uid_t uid)
  96. {
  97. struct list_head *hashent = uidhashentry(uid);
  98. struct user_struct *up;
  99. spin_lock_bh(&uidhash_lock);
  100. up = uid_hash_find(uid, hashent);
  101. spin_unlock_bh(&uidhash_lock);
  102. if (!up) {
  103. struct user_struct *new;
  104. new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
  105. if (!new)
  106. return NULL;
  107. new->uid = uid;
  108. atomic_set(&new->__count, 1);
  109. atomic_set(&new->processes, 0);
  110. atomic_set(&new->files, 0);
  111. atomic_set(&new->sigpending, 0);
  112. #ifdef CONFIG_INOTIFY
  113. atomic_set(&new->inotify_watches, 0);
  114. atomic_set(&new->inotify_devs, 0);
  115. #endif
  116. new->mq_bytes = 0;
  117. new->locked_shm = 0;
  118. if (alloc_uid_keyring(new) < 0) {
  119. kmem_cache_free(uid_cachep, new);
  120. return NULL;
  121. }
  122. /*
  123. * Before adding this, check whether we raced
  124. * on adding the same user already..
  125. */
  126. spin_lock_bh(&uidhash_lock);
  127. up = uid_hash_find(uid, hashent);
  128. if (up) {
  129. key_put(new->uid_keyring);
  130. key_put(new->session_keyring);
  131. kmem_cache_free(uid_cachep, new);
  132. } else {
  133. uid_hash_insert(new, hashent);
  134. up = new;
  135. }
  136. spin_unlock_bh(&uidhash_lock);
  137. }
  138. return up;
  139. }
  140. void switch_uid(struct user_struct *new_user)
  141. {
  142. struct user_struct *old_user;
  143. /* What if a process setreuid()'s and this brings the
  144. * new uid over his NPROC rlimit? We can check this now
  145. * cheaply with the new uid cache, so if it matters
  146. * we should be checking for it. -DaveM
  147. */
  148. old_user = current->user;
  149. atomic_inc(&new_user->processes);
  150. atomic_dec(&old_user->processes);
  151. switch_uid_keyring(new_user);
  152. current->user = new_user;
  153. free_uid(old_user);
  154. suid_keys(current);
  155. }
  156. static int __init uid_cache_init(void)
  157. {
  158. int n;
  159. uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
  160. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
  161. for(n = 0; n < UIDHASH_SZ; ++n)
  162. INIT_LIST_HEAD(uidhash_table + n);
  163. /* Insert the root user immediately (init already runs as root) */
  164. spin_lock_bh(&uidhash_lock);
  165. uid_hash_insert(&root_user, uidhashentry(0));
  166. spin_unlock_bh(&uidhash_lock);
  167. return 0;
  168. }
  169. module_init(uid_cache_init);