user.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /*
  2. * The "user cache".
  3. *
  4. * (C) Copyright 1991-2000 Linus Torvalds
  5. *
  6. * We have a per-user structure to keep track of how many
  7. * processes, files etc the user has claimed, in order to be
  8. * able to have per-user limits for system resources.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/bitops.h>
  14. #include <linux/key.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/user_namespace.h>
  18. #include "cred-internals.h"
  19. struct user_namespace init_user_ns = {
  20. .kref = {
  21. .refcount = ATOMIC_INIT(1),
  22. },
  23. .creator = &root_user,
  24. };
  25. EXPORT_SYMBOL_GPL(init_user_ns);
  26. /*
  27. * UID task count cache, to get fast user lookup in "alloc_uid"
  28. * when changing user ID's (ie setuid() and friends).
  29. */
  30. #define UIDHASH_MASK (UIDHASH_SZ - 1)
  31. #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
  32. #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
  33. static struct kmem_cache *uid_cachep;
  34. /*
  35. * The uidhash_lock is mostly taken from process context, but it is
  36. * occasionally also taken from softirq/tasklet context, when
  37. * task-structs get RCU-freed. Hence all locking must be softirq-safe.
  38. * But free_uid() is also called with local interrupts disabled, and running
  39. * local_bh_enable() with local interrupts disabled is an error - we'll run
  40. * softirq callbacks, and they can unconditionally enable interrupts, and
  41. * the caller of free_uid() didn't expect that..
  42. */
  43. static DEFINE_SPINLOCK(uidhash_lock);
  44. /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
  45. struct user_struct root_user = {
  46. .__count = ATOMIC_INIT(2),
  47. .processes = ATOMIC_INIT(1),
  48. .files = ATOMIC_INIT(0),
  49. .sigpending = ATOMIC_INIT(0),
  50. .locked_shm = 0,
  51. .user_ns = &init_user_ns,
  52. #ifdef CONFIG_USER_SCHED
  53. .tg = &init_task_group,
  54. #endif
  55. };
  56. /*
  57. * These routines must be called with the uidhash spinlock held!
  58. */
  59. static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
  60. {
  61. hlist_add_head(&up->uidhash_node, hashent);
  62. }
  63. static void uid_hash_remove(struct user_struct *up)
  64. {
  65. hlist_del_init(&up->uidhash_node);
  66. }
  67. static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
  68. {
  69. struct user_struct *user;
  70. struct hlist_node *h;
  71. hlist_for_each_entry(user, h, hashent, uidhash_node) {
  72. if (user->uid == uid) {
  73. atomic_inc(&user->__count);
  74. return user;
  75. }
  76. }
  77. return NULL;
  78. }
  79. #ifdef CONFIG_USER_SCHED
  80. static void sched_destroy_user(struct user_struct *up)
  81. {
  82. sched_destroy_group(up->tg);
  83. }
  84. static int sched_create_user(struct user_struct *up)
  85. {
  86. int rc = 0;
  87. up->tg = sched_create_group(&root_task_group);
  88. if (IS_ERR(up->tg))
  89. rc = -ENOMEM;
  90. return rc;
  91. }
  92. #else /* CONFIG_USER_SCHED */
  93. static void sched_destroy_user(struct user_struct *up) { }
  94. static int sched_create_user(struct user_struct *up) { return 0; }
  95. #endif /* CONFIG_USER_SCHED */
  96. #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
  97. static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
  98. static DEFINE_MUTEX(uids_mutex);
  99. static inline void uids_mutex_lock(void)
  100. {
  101. mutex_lock(&uids_mutex);
  102. }
  103. static inline void uids_mutex_unlock(void)
  104. {
  105. mutex_unlock(&uids_mutex);
  106. }
  107. /* uid directory attributes */
  108. #ifdef CONFIG_FAIR_GROUP_SCHED
  109. static ssize_t cpu_shares_show(struct kobject *kobj,
  110. struct kobj_attribute *attr,
  111. char *buf)
  112. {
  113. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  114. return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
  115. }
  116. static ssize_t cpu_shares_store(struct kobject *kobj,
  117. struct kobj_attribute *attr,
  118. const char *buf, size_t size)
  119. {
  120. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  121. unsigned long shares;
  122. int rc;
  123. sscanf(buf, "%lu", &shares);
  124. rc = sched_group_set_shares(up->tg, shares);
  125. return (rc ? rc : size);
  126. }
  127. static struct kobj_attribute cpu_share_attr =
  128. __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
  129. #endif
  130. #ifdef CONFIG_RT_GROUP_SCHED
  131. static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
  132. struct kobj_attribute *attr,
  133. char *buf)
  134. {
  135. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  136. return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
  137. }
  138. static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
  139. struct kobj_attribute *attr,
  140. const char *buf, size_t size)
  141. {
  142. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  143. unsigned long rt_runtime;
  144. int rc;
  145. sscanf(buf, "%ld", &rt_runtime);
  146. rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
  147. return (rc ? rc : size);
  148. }
  149. static struct kobj_attribute cpu_rt_runtime_attr =
  150. __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
  151. static ssize_t cpu_rt_period_show(struct kobject *kobj,
  152. struct kobj_attribute *attr,
  153. char *buf)
  154. {
  155. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  156. return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
  157. }
  158. static ssize_t cpu_rt_period_store(struct kobject *kobj,
  159. struct kobj_attribute *attr,
  160. const char *buf, size_t size)
  161. {
  162. struct user_struct *up = container_of(kobj, struct user_struct, kobj);
  163. unsigned long rt_period;
  164. int rc;
  165. sscanf(buf, "%lu", &rt_period);
  166. rc = sched_group_set_rt_period(up->tg, rt_period);
  167. return (rc ? rc : size);
  168. }
  169. static struct kobj_attribute cpu_rt_period_attr =
  170. __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
  171. #endif
  172. /* default attributes per uid directory */
  173. static struct attribute *uids_attributes[] = {
  174. #ifdef CONFIG_FAIR_GROUP_SCHED
  175. &cpu_share_attr.attr,
  176. #endif
  177. #ifdef CONFIG_RT_GROUP_SCHED
  178. &cpu_rt_runtime_attr.attr,
  179. &cpu_rt_period_attr.attr,
  180. #endif
  181. NULL
  182. };
  183. /* the lifetime of user_struct is not managed by the core (now) */
  184. static void uids_release(struct kobject *kobj)
  185. {
  186. return;
  187. }
  188. static struct kobj_type uids_ktype = {
  189. .sysfs_ops = &kobj_sysfs_ops,
  190. .default_attrs = uids_attributes,
  191. .release = uids_release,
  192. };
  193. /* create /sys/kernel/uids/<uid>/cpu_share file for this user */
  194. static int uids_user_create(struct user_struct *up)
  195. {
  196. struct kobject *kobj = &up->kobj;
  197. int error;
  198. memset(kobj, 0, sizeof(struct kobject));
  199. kobj->kset = uids_kset;
  200. error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
  201. if (error) {
  202. kobject_put(kobj);
  203. goto done;
  204. }
  205. kobject_uevent(kobj, KOBJ_ADD);
  206. done:
  207. return error;
  208. }
  209. /* create these entries in sysfs:
  210. * "/sys/kernel/uids" directory
  211. * "/sys/kernel/uids/0" directory (for root user)
  212. * "/sys/kernel/uids/0/cpu_share" file (for root user)
  213. */
  214. int __init uids_sysfs_init(void)
  215. {
  216. uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
  217. if (!uids_kset)
  218. return -ENOMEM;
  219. return uids_user_create(&root_user);
  220. }
  221. /* work function to remove sysfs directory for a user and free up
  222. * corresponding structures.
  223. */
  224. static void remove_user_sysfs_dir(struct work_struct *w)
  225. {
  226. struct user_struct *up = container_of(w, struct user_struct, work);
  227. unsigned long flags;
  228. int remove_user = 0;
  229. /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
  230. * atomic.
  231. */
  232. uids_mutex_lock();
  233. local_irq_save(flags);
  234. if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
  235. uid_hash_remove(up);
  236. remove_user = 1;
  237. spin_unlock_irqrestore(&uidhash_lock, flags);
  238. } else {
  239. local_irq_restore(flags);
  240. }
  241. if (!remove_user)
  242. goto done;
  243. kobject_uevent(&up->kobj, KOBJ_REMOVE);
  244. kobject_del(&up->kobj);
  245. kobject_put(&up->kobj);
  246. sched_destroy_user(up);
  247. key_put(up->uid_keyring);
  248. key_put(up->session_keyring);
  249. kmem_cache_free(uid_cachep, up);
  250. done:
  251. uids_mutex_unlock();
  252. }
  253. /* IRQs are disabled and uidhash_lock is held upon function entry.
  254. * IRQ state (as stored in flags) is restored and uidhash_lock released
  255. * upon function exit.
  256. */
  257. static void free_user(struct user_struct *up, unsigned long flags)
  258. {
  259. /* restore back the count */
  260. atomic_inc(&up->__count);
  261. spin_unlock_irqrestore(&uidhash_lock, flags);
  262. put_user_ns(up->user_ns);
  263. INIT_WORK(&up->work, remove_user_sysfs_dir);
  264. schedule_work(&up->work);
  265. }
  266. #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
  267. int uids_sysfs_init(void) { return 0; }
  268. static inline int uids_user_create(struct user_struct *up) { return 0; }
  269. static inline void uids_mutex_lock(void) { }
  270. static inline void uids_mutex_unlock(void) { }
  271. /* IRQs are disabled and uidhash_lock is held upon function entry.
  272. * IRQ state (as stored in flags) is restored and uidhash_lock released
  273. * upon function exit.
  274. */
  275. static void free_user(struct user_struct *up, unsigned long flags)
  276. {
  277. uid_hash_remove(up);
  278. spin_unlock_irqrestore(&uidhash_lock, flags);
  279. sched_destroy_user(up);
  280. key_put(up->uid_keyring);
  281. key_put(up->session_keyring);
  282. put_user_ns(up->user_ns);
  283. kmem_cache_free(uid_cachep, up);
  284. }
  285. #endif
  286. /*
  287. * Locate the user_struct for the passed UID. If found, take a ref on it. The
  288. * caller must undo that ref with free_uid().
  289. *
  290. * If the user_struct could not be found, return NULL.
  291. */
  292. struct user_struct *find_user(uid_t uid)
  293. {
  294. struct user_struct *ret;
  295. unsigned long flags;
  296. struct user_namespace *ns = current_user()->user_ns;
  297. spin_lock_irqsave(&uidhash_lock, flags);
  298. ret = uid_hash_find(uid, uidhashentry(ns, uid));
  299. spin_unlock_irqrestore(&uidhash_lock, flags);
  300. return ret;
  301. }
  302. void free_uid(struct user_struct *up)
  303. {
  304. unsigned long flags;
  305. if (!up)
  306. return;
  307. local_irq_save(flags);
  308. if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
  309. free_user(up, flags);
  310. else
  311. local_irq_restore(flags);
  312. }
  313. struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
  314. {
  315. struct hlist_head *hashent = uidhashentry(ns, uid);
  316. struct user_struct *up, *new;
  317. /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
  318. * atomic.
  319. */
  320. uids_mutex_lock();
  321. spin_lock_irq(&uidhash_lock);
  322. up = uid_hash_find(uid, hashent);
  323. spin_unlock_irq(&uidhash_lock);
  324. if (!up) {
  325. new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
  326. if (!new)
  327. goto out_unlock;
  328. new->uid = uid;
  329. atomic_set(&new->__count, 1);
  330. if (sched_create_user(new) < 0)
  331. goto out_free_user;
  332. new->user_ns = get_user_ns(ns);
  333. if (uids_user_create(new))
  334. goto out_destoy_sched;
  335. /*
  336. * Before adding this, check whether we raced
  337. * on adding the same user already..
  338. */
  339. spin_lock_irq(&uidhash_lock);
  340. up = uid_hash_find(uid, hashent);
  341. if (up) {
  342. /* This case is not possible when CONFIG_USER_SCHED
  343. * is defined, since we serialize alloc_uid() using
  344. * uids_mutex. Hence no need to call
  345. * sched_destroy_user() or remove_user_sysfs_dir().
  346. */
  347. key_put(new->uid_keyring);
  348. key_put(new->session_keyring);
  349. kmem_cache_free(uid_cachep, new);
  350. } else {
  351. uid_hash_insert(new, hashent);
  352. up = new;
  353. }
  354. spin_unlock_irq(&uidhash_lock);
  355. }
  356. uids_mutex_unlock();
  357. return up;
  358. out_destoy_sched:
  359. sched_destroy_user(new);
  360. put_user_ns(new->user_ns);
  361. out_free_user:
  362. kmem_cache_free(uid_cachep, new);
  363. out_unlock:
  364. uids_mutex_unlock();
  365. return NULL;
  366. }
  367. static int __init uid_cache_init(void)
  368. {
  369. int n;
  370. uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
  371. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  372. for(n = 0; n < UIDHASH_SZ; ++n)
  373. INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
  374. /* Insert the root user immediately (init already runs as root) */
  375. spin_lock_irq(&uidhash_lock);
  376. uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
  377. spin_unlock_irq(&uidhash_lock);
  378. return 0;
  379. }
  380. module_init(uid_cache_init);