user.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /*
  2. * The "user cache".
  3. *
  4. * (C) Copyright 1991-2000 Linus Torvalds
  5. *
  6. * We have a per-user structure to keep track of how many
  7. * processes, files etc the user has claimed, in order to be
  8. * able to have per-user limits for system resources.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/bitops.h>
  14. #include <linux/key.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/user_namespace.h>
  18. /*
  19. * UID task count cache, to get fast user lookup in "alloc_uid"
  20. * when changing user ID's (ie setuid() and friends).
  21. */
  22. #define UIDHASH_MASK (UIDHASH_SZ - 1)
  23. #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
  24. #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
  25. static struct kmem_cache *uid_cachep;
  26. /*
  27. * The uidhash_lock is mostly taken from process context, but it is
  28. * occasionally also taken from softirq/tasklet context, when
  29. * task-structs get RCU-freed. Hence all locking must be softirq-safe.
  30. * But free_uid() is also called with local interrupts disabled, and running
  31. * local_bh_enable() with local interrupts disabled is an error - we'll run
  32. * softirq callbacks, and they can unconditionally enable interrupts, and
  33. * the caller of free_uid() didn't expect that..
  34. */
  35. static DEFINE_SPINLOCK(uidhash_lock);
  36. struct user_struct root_user = {
  37. .__count = ATOMIC_INIT(1),
  38. .processes = ATOMIC_INIT(1),
  39. .files = ATOMIC_INIT(0),
  40. .sigpending = ATOMIC_INIT(0),
  41. .locked_shm = 0,
  42. #ifdef CONFIG_KEYS
  43. .uid_keyring = &root_user_keyring,
  44. .session_keyring = &root_session_keyring,
  45. #endif
  46. #ifdef CONFIG_FAIR_USER_SCHED
  47. .tg = &init_task_group,
  48. #endif
  49. };
  50. /*
  51. * These routines must be called with the uidhash spinlock held!
  52. */
  53. static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
  54. {
  55. hlist_add_head(&up->uidhash_node, hashent);
  56. }
  57. static void uid_hash_remove(struct user_struct *up)
  58. {
  59. hlist_del_init(&up->uidhash_node);
  60. }
  61. static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
  62. {
  63. struct user_struct *user;
  64. struct hlist_node *h;
  65. hlist_for_each_entry(user, h, hashent, uidhash_node) {
  66. if (user->uid == uid) {
  67. atomic_inc(&user->__count);
  68. return user;
  69. }
  70. }
  71. return NULL;
  72. }
  73. #ifdef CONFIG_FAIR_USER_SCHED
  74. static void sched_destroy_user(struct user_struct *up)
  75. {
  76. sched_destroy_group(up->tg);
  77. }
  78. static int sched_create_user(struct user_struct *up)
  79. {
  80. int rc = 0;
  81. up->tg = sched_create_group();
  82. if (IS_ERR(up->tg))
  83. rc = -ENOMEM;
  84. return rc;
  85. }
  86. static void sched_switch_user(struct task_struct *p)
  87. {
  88. sched_move_task(p);
  89. }
  90. #else /* CONFIG_FAIR_USER_SCHED */
  91. static void sched_destroy_user(struct user_struct *up) { }
  92. static int sched_create_user(struct user_struct *up) { return 0; }
  93. static void sched_switch_user(struct task_struct *p) { }
  94. #endif /* CONFIG_FAIR_USER_SCHED */
  95. #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
  96. static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
  97. static DEFINE_MUTEX(uids_mutex);
  98. static inline void uids_mutex_lock(void)
  99. {
  100. mutex_lock(&uids_mutex);
  101. }
  102. static inline void uids_mutex_unlock(void)
  103. {
  104. mutex_unlock(&uids_mutex);
  105. }
  106. /* return cpu shares held by the user */
  107. ssize_t cpu_shares_show(struct kset *kset, char *buffer)
  108. {
  109. struct user_struct *up = container_of(kset, struct user_struct, kset);
  110. return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
  111. }
  112. /* modify cpu shares held by the user */
  113. ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size)
  114. {
  115. struct user_struct *up = container_of(kset, struct user_struct, kset);
  116. unsigned long shares;
  117. int rc;
  118. sscanf(buffer, "%lu", &shares);
  119. rc = sched_group_set_shares(up->tg, shares);
  120. return (rc ? rc : size);
  121. }
  122. static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
  123. {
  124. sa->attr.name = name;
  125. sa->attr.mode = mode;
  126. sa->show = cpu_shares_show;
  127. sa->store = cpu_shares_store;
  128. }
  129. /* Create "/sys/kernel/uids/<uid>" directory and
  130. * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
  131. */
  132. static int user_kobject_create(struct user_struct *up)
  133. {
  134. struct kset *kset = &up->kset;
  135. struct kobject *kobj = &kset->kobj;
  136. int error;
  137. memset(kset, 0, sizeof(struct kset));
  138. kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */
  139. kobject_set_name(kobj, "%d", up->uid);
  140. kset_init(kset);
  141. user_attr_init(&up->user_attr, "cpu_share", 0644);
  142. error = kobject_add(kobj);
  143. if (error)
  144. goto done;
  145. error = sysfs_create_file(kobj, &up->user_attr.attr);
  146. if (error)
  147. kobject_del(kobj);
  148. kobject_uevent(kobj, KOBJ_ADD);
  149. done:
  150. return error;
  151. }
  152. /* create these in sysfs filesystem:
  153. * "/sys/kernel/uids" directory
  154. * "/sys/kernel/uids/0" directory (for root user)
  155. * "/sys/kernel/uids/0/cpu_share" file (for root user)
  156. */
  157. int __init uids_kobject_init(void)
  158. {
  159. int error;
  160. /* create under /sys/kernel dir */
  161. uids_kobject.parent = &kernel_subsys.kobj;
  162. uids_kobject.kset = &kernel_subsys;
  163. kobject_set_name(&uids_kobject, "uids");
  164. kobject_init(&uids_kobject);
  165. error = kobject_add(&uids_kobject);
  166. if (!error)
  167. error = user_kobject_create(&root_user);
  168. return error;
  169. }
  170. /* work function to remove sysfs directory for a user and free up
  171. * corresponding structures.
  172. */
  173. static void remove_user_sysfs_dir(struct work_struct *w)
  174. {
  175. struct user_struct *up = container_of(w, struct user_struct, work);
  176. struct kobject *kobj = &up->kset.kobj;
  177. unsigned long flags;
  178. int remove_user = 0;
  179. /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
  180. * atomic.
  181. */
  182. uids_mutex_lock();
  183. local_irq_save(flags);
  184. if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
  185. uid_hash_remove(up);
  186. remove_user = 1;
  187. spin_unlock_irqrestore(&uidhash_lock, flags);
  188. } else {
  189. local_irq_restore(flags);
  190. }
  191. if (!remove_user)
  192. goto done;
  193. sysfs_remove_file(kobj, &up->user_attr.attr);
  194. kobject_uevent(kobj, KOBJ_REMOVE);
  195. kobject_del(kobj);
  196. sched_destroy_user(up);
  197. key_put(up->uid_keyring);
  198. key_put(up->session_keyring);
  199. kmem_cache_free(uid_cachep, up);
  200. done:
  201. uids_mutex_unlock();
  202. }
  203. /* IRQs are disabled and uidhash_lock is held upon function entry.
  204. * IRQ state (as stored in flags) is restored and uidhash_lock released
  205. * upon function exit.
  206. */
  207. static inline void free_user(struct user_struct *up, unsigned long flags)
  208. {
  209. /* restore back the count */
  210. atomic_inc(&up->__count);
  211. spin_unlock_irqrestore(&uidhash_lock, flags);
  212. INIT_WORK(&up->work, remove_user_sysfs_dir);
  213. schedule_work(&up->work);
  214. }
  215. #else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
  216. static inline int user_kobject_create(struct user_struct *up) { return 0; }
  217. static inline void uids_mutex_lock(void) { }
  218. static inline void uids_mutex_unlock(void) { }
  219. /* IRQs are disabled and uidhash_lock is held upon function entry.
  220. * IRQ state (as stored in flags) is restored and uidhash_lock released
  221. * upon function exit.
  222. */
  223. static inline void free_user(struct user_struct *up, unsigned long flags)
  224. {
  225. uid_hash_remove(up);
  226. spin_unlock_irqrestore(&uidhash_lock, flags);
  227. sched_destroy_user(up);
  228. key_put(up->uid_keyring);
  229. key_put(up->session_keyring);
  230. kmem_cache_free(uid_cachep, up);
  231. }
  232. #endif
  233. /*
  234. * Locate the user_struct for the passed UID. If found, take a ref on it. The
  235. * caller must undo that ref with free_uid().
  236. *
  237. * If the user_struct could not be found, return NULL.
  238. */
  239. struct user_struct *find_user(uid_t uid)
  240. {
  241. struct user_struct *ret;
  242. unsigned long flags;
  243. struct user_namespace *ns = current->nsproxy->user_ns;
  244. spin_lock_irqsave(&uidhash_lock, flags);
  245. ret = uid_hash_find(uid, uidhashentry(ns, uid));
  246. spin_unlock_irqrestore(&uidhash_lock, flags);
  247. return ret;
  248. }
  249. void free_uid(struct user_struct *up)
  250. {
  251. unsigned long flags;
  252. if (!up)
  253. return;
  254. local_irq_save(flags);
  255. if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
  256. free_user(up, flags);
  257. else
  258. local_irq_restore(flags);
  259. }
  260. struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
  261. {
  262. struct hlist_head *hashent = uidhashentry(ns, uid);
  263. struct user_struct *up;
  264. /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
  265. * atomic.
  266. */
  267. uids_mutex_lock();
  268. spin_lock_irq(&uidhash_lock);
  269. up = uid_hash_find(uid, hashent);
  270. spin_unlock_irq(&uidhash_lock);
  271. if (!up) {
  272. struct user_struct *new;
  273. new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
  274. if (!new)
  275. return NULL;
  276. new->uid = uid;
  277. atomic_set(&new->__count, 1);
  278. atomic_set(&new->processes, 0);
  279. atomic_set(&new->files, 0);
  280. atomic_set(&new->sigpending, 0);
  281. #ifdef CONFIG_INOTIFY_USER
  282. atomic_set(&new->inotify_watches, 0);
  283. atomic_set(&new->inotify_devs, 0);
  284. #endif
  285. #ifdef CONFIG_POSIX_MQUEUE
  286. new->mq_bytes = 0;
  287. #endif
  288. new->locked_shm = 0;
  289. if (alloc_uid_keyring(new, current) < 0) {
  290. kmem_cache_free(uid_cachep, new);
  291. return NULL;
  292. }
  293. if (sched_create_user(new) < 0) {
  294. key_put(new->uid_keyring);
  295. key_put(new->session_keyring);
  296. kmem_cache_free(uid_cachep, new);
  297. return NULL;
  298. }
  299. if (user_kobject_create(new)) {
  300. sched_destroy_user(new);
  301. key_put(new->uid_keyring);
  302. key_put(new->session_keyring);
  303. kmem_cache_free(uid_cachep, new);
  304. uids_mutex_unlock();
  305. return NULL;
  306. }
  307. /*
  308. * Before adding this, check whether we raced
  309. * on adding the same user already..
  310. */
  311. spin_lock_irq(&uidhash_lock);
  312. up = uid_hash_find(uid, hashent);
  313. if (up) {
  314. /* This case is not possible when CONFIG_FAIR_USER_SCHED
  315. * is defined, since we serialize alloc_uid() using
  316. * uids_mutex. Hence no need to call
  317. * sched_destroy_user() or remove_user_sysfs_dir().
  318. */
  319. key_put(new->uid_keyring);
  320. key_put(new->session_keyring);
  321. kmem_cache_free(uid_cachep, new);
  322. } else {
  323. uid_hash_insert(new, hashent);
  324. up = new;
  325. }
  326. spin_unlock_irq(&uidhash_lock);
  327. }
  328. uids_mutex_unlock();
  329. return up;
  330. }
  331. void switch_uid(struct user_struct *new_user)
  332. {
  333. struct user_struct *old_user;
  334. /* What if a process setreuid()'s and this brings the
  335. * new uid over his NPROC rlimit? We can check this now
  336. * cheaply with the new uid cache, so if it matters
  337. * we should be checking for it. -DaveM
  338. */
  339. old_user = current->user;
  340. atomic_inc(&new_user->processes);
  341. atomic_dec(&old_user->processes);
  342. switch_uid_keyring(new_user);
  343. current->user = new_user;
  344. sched_switch_user(current);
  345. /*
  346. * We need to synchronize with __sigqueue_alloc()
  347. * doing a get_uid(p->user).. If that saw the old
  348. * user value, we need to wait until it has exited
  349. * its critical region before we can free the old
  350. * structure.
  351. */
  352. smp_mb();
  353. spin_unlock_wait(&current->sighand->siglock);
  354. free_uid(old_user);
  355. suid_keys(current);
  356. }
  357. void release_uids(struct user_namespace *ns)
  358. {
  359. int i;
  360. unsigned long flags;
  361. struct hlist_head *head;
  362. struct hlist_node *nd;
  363. spin_lock_irqsave(&uidhash_lock, flags);
  364. /*
  365. * collapse the chains so that the user_struct-s will
  366. * be still alive, but not in hashes. subsequent free_uid()
  367. * will free them.
  368. */
  369. for (i = 0; i < UIDHASH_SZ; i++) {
  370. head = ns->uidhash_table + i;
  371. while (!hlist_empty(head)) {
  372. nd = head->first;
  373. hlist_del_init(nd);
  374. }
  375. }
  376. spin_unlock_irqrestore(&uidhash_lock, flags);
  377. free_uid(ns->root_user);
  378. }
  379. static int __init uid_cache_init(void)
  380. {
  381. int n;
  382. uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
  383. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  384. for(n = 0; n < UIDHASH_SZ; ++n)
  385. INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
  386. /* Insert the root user immediately (init already runs as root) */
  387. spin_lock_irq(&uidhash_lock);
  388. uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
  389. spin_unlock_irq(&uidhash_lock);
  390. return 0;
  391. }
  392. module_init(uid_cache_init);