user.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. /*
  2. * The "user cache".
  3. *
  4. * (C) Copyright 1991-2000 Linus Torvalds
  5. *
  6. * We have a per-user structure to keep track of how many
  7. * processes, files etc the user has claimed, in order to be
  8. * able to have per-user limits for system resources.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/bitops.h>
  14. #include <linux/key.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/user_namespace.h>
  18. struct user_namespace init_user_ns = {
  19. .kref = {
  20. .refcount = ATOMIC_INIT(2),
  21. },
  22. .root_user = &root_user,
  23. };
  24. EXPORT_SYMBOL_GPL(init_user_ns);
  25. /*
  26. * UID task count cache, to get fast user lookup in "alloc_uid"
  27. * when changing user ID's (ie setuid() and friends).
  28. */
  29. #define UIDHASH_MASK (UIDHASH_SZ - 1)
  30. #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
  31. #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
  32. static struct kmem_cache *uid_cachep;
  33. /*
  34. * The uidhash_lock is mostly taken from process context, but it is
  35. * occasionally also taken from softirq/tasklet context, when
  36. * task-structs get RCU-freed. Hence all locking must be softirq-safe.
  37. * But free_uid() is also called with local interrupts disabled, and running
  38. * local_bh_enable() with local interrupts disabled is an error - we'll run
  39. * softirq callbacks, and they can unconditionally enable interrupts, and
  40. * the caller of free_uid() didn't expect that..
  41. */
  42. static DEFINE_SPINLOCK(uidhash_lock);
  43. struct user_struct root_user = {
  44. .__count = ATOMIC_INIT(1),
  45. .processes = ATOMIC_INIT(1),
  46. .files = ATOMIC_INIT(0),
  47. .sigpending = ATOMIC_INIT(0),
  48. .locked_shm = 0,
  49. #ifdef CONFIG_USER_SCHED
  50. .tg = &init_task_group,
  51. #endif
  52. };
  53. /*
  54. * These routines must be called with the uidhash spinlock held!
  55. */
  56. static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
  57. {
  58. hlist_add_head(&up->uidhash_node, hashent);
  59. }
  60. static void uid_hash_remove(struct user_struct *up)
  61. {
  62. hlist_del_init(&up->uidhash_node);
  63. }
  64. static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
  65. {
  66. struct user_struct *user;
  67. struct hlist_node *h;
  68. hlist_for_each_entry(user, h, hashent, uidhash_node) {
  69. if (user->uid == uid) {
  70. atomic_inc(&user->__count);
  71. return user;
  72. }
  73. }
  74. return NULL;
  75. }
  76. #ifdef CONFIG_USER_SCHED
  77. static void sched_destroy_user(struct user_struct *up)
  78. {
  79. sched_destroy_group(up->tg);
  80. }
  81. static int sched_create_user(struct user_struct *up)
  82. {
  83. int rc = 0;
  84. up->tg = sched_create_group(&root_task_group);
  85. if (IS_ERR(up->tg))
  86. rc = -ENOMEM;
  87. set_tg_uid(up);
  88. return rc;
  89. }
  90. static void sched_switch_user(struct task_struct *p)
  91. {
  92. sched_move_task(p);
  93. }
  94. #else /* CONFIG_USER_SCHED */
  95. static void sched_destroy_user(struct user_struct *up) { }
  96. static int sched_create_user(struct user_struct *up) { return 0; }
  97. static void sched_switch_user(struct task_struct *p) { }
  98. #endif /* CONFIG_USER_SCHED */
  99. #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
  100. static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
  101. static DEFINE_MUTEX(uids_mutex);
  102. static inline void uids_mutex_lock(void)
  103. {
  104. mutex_lock(&uids_mutex);
  105. }
  106. static inline void uids_mutex_unlock(void)
  107. {
  108. mutex_unlock(&uids_mutex);
  109. }
  110. /* uid directory attributes */
  111. #ifdef CONFIG_FAIR_GROUP_SCHED
  112. static ssize_t cpu_shares_show(struct kobject *kobj,
  113. struct kobj_attribute *attr,
  114. char *buf)
  115. {
  116. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  117. return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
  118. }
  119. static ssize_t cpu_shares_store(struct kobject *kobj,
  120. struct kobj_attribute *attr,
  121. const char *buf, size_t size)
  122. {
  123. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  124. unsigned long shares;
  125. int rc;
  126. sscanf(buf, "%lu", &shares);
  127. rc = sched_group_set_shares(up->tg, shares);
  128. return (rc ? rc : size);
  129. }
  130. static struct kobj_attribute cpu_share_attr =
  131. __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
  132. #endif
  133. #ifdef CONFIG_RT_GROUP_SCHED
  134. static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
  135. struct kobj_attribute *attr,
  136. char *buf)
  137. {
  138. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  139. return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
  140. }
  141. static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
  142. struct kobj_attribute *attr,
  143. const char *buf, size_t size)
  144. {
  145. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  146. unsigned long rt_runtime;
  147. int rc;
  148. sscanf(buf, "%ld", &rt_runtime);
  149. rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
  150. return (rc ? rc : size);
  151. }
  152. static struct kobj_attribute cpu_rt_runtime_attr =
  153. __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
  154. static ssize_t cpu_rt_period_show(struct kobject *kobj,
  155. struct kobj_attribute *attr,
  156. char *buf)
  157. {
  158. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  159. return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
  160. }
  161. static ssize_t cpu_rt_period_store(struct kobject *kobj,
  162. struct kobj_attribute *attr,
  163. const char *buf, size_t size)
  164. {
  165. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  166. unsigned long rt_period;
  167. int rc;
  168. sscanf(buf, "%lu", &rt_period);
  169. rc = sched_group_set_rt_period(up->tg, rt_period);
  170. return (rc ? rc : size);
  171. }
  172. static struct kobj_attribute cpu_rt_period_attr =
  173. __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
  174. #endif
  175. /* default attributes per uid directory */
  176. static struct attribute *uids_attributes[] = {
  177. #ifdef CONFIG_FAIR_GROUP_SCHED
  178. &cpu_share_attr.attr,
  179. #endif
  180. #ifdef CONFIG_RT_GROUP_SCHED
  181. &cpu_rt_runtime_attr.attr,
  182. &cpu_rt_period_attr.attr,
  183. #endif
  184. NULL
  185. };
  186. /* the lifetime of user_struct is not managed by the core (now) */
  187. static void uids_release(struct kobject *kobj)
  188. {
  189. return;
  190. }
  191. static struct kobj_type uids_ktype = {
  192. .sysfs_ops = &kobj_sysfs_ops,
  193. .default_attrs = uids_attributes,
  194. .release = uids_release,
  195. };
  196. /* create /sys/kernel/uids/<uid>/cpu_share file for this user */
  197. static int uids_user_create(struct user_struct *up)
  198. {
  199. struct kobject *kobj = &up->kobj;
  200. int error;
  201. memset(kobj, 0, sizeof(struct kobject));
  202. kobj->kset = uids_kset;
  203. error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
  204. if (error) {
  205. kobject_put(kobj);
  206. goto done;
  207. }
  208. kobject_uevent(kobj, KOBJ_ADD);
  209. done:
  210. return error;
  211. }
  212. /* create these entries in sysfs:
  213. * "/sys/kernel/uids" directory
  214. * "/sys/kernel/uids/0" directory (for root user)
  215. * "/sys/kernel/uids/0/cpu_share" file (for root user)
  216. */
  217. int __init uids_sysfs_init(void)
  218. {
  219. uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
  220. if (!uids_kset)
  221. return -ENOMEM;
  222. return uids_user_create(&root_user);
  223. }
  224. /* work function to remove sysfs directory for a user and free up
  225. * corresponding structures.
  226. */
  227. static void remove_user_sysfs_dir(struct work_struct *w)
  228. {
  229. struct user_struct *up = container_of(w, struct user_struct, work);
  230. unsigned long flags;
  231. int remove_user = 0;
  232. /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
  233. * atomic.
  234. */
  235. uids_mutex_lock();
  236. local_irq_save(flags);
  237. if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
  238. uid_hash_remove(up);
  239. remove_user = 1;
  240. spin_unlock_irqrestore(&uidhash_lock, flags);
  241. } else {
  242. local_irq_restore(flags);
  243. }
  244. if (!remove_user)
  245. goto done;
  246. kobject_uevent(&up->kobj, KOBJ_REMOVE);
  247. kobject_del(&up->kobj);
  248. kobject_put(&up->kobj);
  249. sched_destroy_user(up);
  250. key_put(up->uid_keyring);
  251. key_put(up->session_keyring);
  252. kmem_cache_free(uid_cachep, up);
  253. done:
  254. uids_mutex_unlock();
  255. }
  256. /* IRQs are disabled and uidhash_lock is held upon function entry.
  257. * IRQ state (as stored in flags) is restored and uidhash_lock released
  258. * upon function exit.
  259. */
  260. static inline void free_user(struct user_struct *up, unsigned long flags)
  261. {
  262. /* restore back the count */
  263. atomic_inc(&up->__count);
  264. spin_unlock_irqrestore(&uidhash_lock, flags);
  265. INIT_WORK(&up->work, remove_user_sysfs_dir);
  266. schedule_work(&up->work);
  267. }
  268. #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
  269. int uids_sysfs_init(void) { return 0; }
  270. static inline int uids_user_create(struct user_struct *up) { return 0; }
  271. static inline void uids_mutex_lock(void) { }
  272. static inline void uids_mutex_unlock(void) { }
  273. /* IRQs are disabled and uidhash_lock is held upon function entry.
  274. * IRQ state (as stored in flags) is restored and uidhash_lock released
  275. * upon function exit.
  276. */
  277. static inline void free_user(struct user_struct *up, unsigned long flags)
  278. {
  279. uid_hash_remove(up);
  280. spin_unlock_irqrestore(&uidhash_lock, flags);
  281. sched_destroy_user(up);
  282. key_put(up->uid_keyring);
  283. key_put(up->session_keyring);
  284. kmem_cache_free(uid_cachep, up);
  285. }
  286. #endif
  287. /*
  288. * Locate the user_struct for the passed UID. If found, take a ref on it. The
  289. * caller must undo that ref with free_uid().
  290. *
  291. * If the user_struct could not be found, return NULL.
  292. */
  293. struct user_struct *find_user(uid_t uid)
  294. {
  295. struct user_struct *ret;
  296. unsigned long flags;
  297. struct user_namespace *ns = current->nsproxy->user_ns;
  298. spin_lock_irqsave(&uidhash_lock, flags);
  299. ret = uid_hash_find(uid, uidhashentry(ns, uid));
  300. spin_unlock_irqrestore(&uidhash_lock, flags);
  301. return ret;
  302. }
  303. void free_uid(struct user_struct *up)
  304. {
  305. unsigned long flags;
  306. if (!up)
  307. return;
  308. local_irq_save(flags);
  309. if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
  310. free_user(up, flags);
  311. else
  312. local_irq_restore(flags);
  313. }
  314. struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
  315. {
  316. struct hlist_head *hashent = uidhashentry(ns, uid);
  317. struct user_struct *up, *new;
  318. /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
  319. * atomic.
  320. */
  321. uids_mutex_lock();
  322. spin_lock_irq(&uidhash_lock);
  323. up = uid_hash_find(uid, hashent);
  324. spin_unlock_irq(&uidhash_lock);
  325. if (!up) {
  326. new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
  327. if (!new)
  328. goto out_unlock;
  329. new->uid = uid;
  330. atomic_set(&new->__count, 1);
  331. if (sched_create_user(new) < 0)
  332. goto out_free_user;
  333. if (uids_user_create(new))
  334. goto out_destoy_sched;
  335. /*
  336. * Before adding this, check whether we raced
  337. * on adding the same user already..
  338. */
  339. spin_lock_irq(&uidhash_lock);
  340. up = uid_hash_find(uid, hashent);
  341. if (up) {
  342. /* This case is not possible when CONFIG_USER_SCHED
  343. * is defined, since we serialize alloc_uid() using
  344. * uids_mutex. Hence no need to call
  345. * sched_destroy_user() or remove_user_sysfs_dir().
  346. */
  347. key_put(new->uid_keyring);
  348. key_put(new->session_keyring);
  349. kmem_cache_free(uid_cachep, new);
  350. } else {
  351. uid_hash_insert(new, hashent);
  352. up = new;
  353. }
  354. spin_unlock_irq(&uidhash_lock);
  355. }
  356. uids_mutex_unlock();
  357. return up;
  358. out_destoy_sched:
  359. sched_destroy_user(new);
  360. out_free_user:
  361. kmem_cache_free(uid_cachep, new);
  362. out_unlock:
  363. uids_mutex_unlock();
  364. return NULL;
  365. }
  366. void switch_uid(struct user_struct *new_user)
  367. {
  368. struct user_struct *old_user;
  369. /* What if a process setreuid()'s and this brings the
  370. * new uid over his NPROC rlimit? We can check this now
  371. * cheaply with the new uid cache, so if it matters
  372. * we should be checking for it. -DaveM
  373. */
  374. old_user = current->user;
  375. atomic_inc(&new_user->processes);
  376. atomic_dec(&old_user->processes);
  377. switch_uid_keyring(new_user);
  378. current->user = new_user;
  379. sched_switch_user(current);
  380. /*
  381. * We need to synchronize with __sigqueue_alloc()
  382. * doing a get_uid(p->user).. If that saw the old
  383. * user value, we need to wait until it has exited
  384. * its critical region before we can free the old
  385. * structure.
  386. */
  387. smp_mb();
  388. spin_unlock_wait(&current->sighand->siglock);
  389. free_uid(old_user);
  390. suid_keys(current);
  391. }
  392. #ifdef CONFIG_USER_NS
  393. void release_uids(struct user_namespace *ns)
  394. {
  395. int i;
  396. unsigned long flags;
  397. struct hlist_head *head;
  398. struct hlist_node *nd;
  399. spin_lock_irqsave(&uidhash_lock, flags);
  400. /*
  401. * collapse the chains so that the user_struct-s will
  402. * be still alive, but not in hashes. subsequent free_uid()
  403. * will free them.
  404. */
  405. for (i = 0; i < UIDHASH_SZ; i++) {
  406. head = ns->uidhash_table + i;
  407. while (!hlist_empty(head)) {
  408. nd = head->first;
  409. hlist_del_init(nd);
  410. }
  411. }
  412. spin_unlock_irqrestore(&uidhash_lock, flags);
  413. free_uid(ns->root_user);
  414. }
  415. #endif
  416. static int __init uid_cache_init(void)
  417. {
  418. int n;
  419. uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
  420. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  421. for(n = 0; n < UIDHASH_SZ; ++n)
  422. INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
  423. /* Insert the root user immediately (init already runs as root) */
  424. spin_lock_irq(&uidhash_lock);
  425. uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
  426. spin_unlock_irq(&uidhash_lock);
  427. return 0;
  428. }
  429. module_init(uid_cache_init);