|
@@ -20,9 +20,9 @@
|
|
|
|
|
|
struct user_namespace init_user_ns = {
|
|
|
.kref = {
|
|
|
- .refcount = ATOMIC_INIT(2),
|
|
|
+ .refcount = ATOMIC_INIT(1),
|
|
|
},
|
|
|
- .root_user = &root_user,
|
|
|
+ .creator = &root_user,
|
|
|
};
|
|
|
EXPORT_SYMBOL_GPL(init_user_ns);
|
|
|
|
|
@@ -48,12 +48,14 @@ static struct kmem_cache *uid_cachep;
|
|
|
*/
|
|
|
static DEFINE_SPINLOCK(uidhash_lock);
|
|
|
|
|
|
+/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
|
|
|
struct user_struct root_user = {
|
|
|
- .__count = ATOMIC_INIT(1),
|
|
|
+ .__count = ATOMIC_INIT(2),
|
|
|
.processes = ATOMIC_INIT(1),
|
|
|
.files = ATOMIC_INIT(0),
|
|
|
.sigpending = ATOMIC_INIT(0),
|
|
|
.locked_shm = 0,
|
|
|
+ .user_ns = &init_user_ns,
|
|
|
#ifdef CONFIG_USER_SCHED
|
|
|
.tg = &init_task_group,
|
|
|
#endif
|
|
@@ -314,12 +316,13 @@ done:
|
|
|
* IRQ state (as stored in flags) is restored and uidhash_lock released
|
|
|
* upon function exit.
|
|
|
*/
|
|
|
-static inline void free_user(struct user_struct *up, unsigned long flags)
|
|
|
+static void free_user(struct user_struct *up, unsigned long flags)
|
|
|
{
|
|
|
/* restore back the count */
|
|
|
atomic_inc(&up->__count);
|
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
|
|
|
|
|
+ put_user_ns(up->user_ns);
|
|
|
INIT_WORK(&up->work, remove_user_sysfs_dir);
|
|
|
schedule_work(&up->work);
|
|
|
}
|
|
@@ -335,13 +338,14 @@ static inline void uids_mutex_unlock(void) { }
|
|
|
* IRQ state (as stored in flags) is restored and uidhash_lock released
|
|
|
* upon function exit.
|
|
|
*/
|
|
|
-static inline void free_user(struct user_struct *up, unsigned long flags)
|
|
|
+static void free_user(struct user_struct *up, unsigned long flags)
|
|
|
{
|
|
|
uid_hash_remove(up);
|
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
|
|
sched_destroy_user(up);
|
|
|
key_put(up->uid_keyring);
|
|
|
key_put(up->session_keyring);
|
|
|
+ put_user_ns(up->user_ns);
|
|
|
kmem_cache_free(uid_cachep, up);
|
|
|
}
|
|
|
|
|
@@ -357,7 +361,7 @@ struct user_struct *find_user(uid_t uid)
|
|
|
{
|
|
|
struct user_struct *ret;
|
|
|
unsigned long flags;
|
|
|
- struct user_namespace *ns = current->nsproxy->user_ns;
|
|
|
+ struct user_namespace *ns = current_user()->user_ns;
|
|
|
|
|
|
spin_lock_irqsave(&uidhash_lock, flags);
|
|
|
ret = uid_hash_find(uid, uidhashentry(ns, uid));
|
|
@@ -404,6 +408,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
|
|
|
if (sched_create_user(new) < 0)
|
|
|
goto out_free_user;
|
|
|
|
|
|
+ new->user_ns = get_user_ns(ns);
|
|
|
+
|
|
|
if (uids_user_create(new))
|
|
|
goto out_destoy_sched;
|
|
|
|
|
@@ -427,7 +433,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
|
|
|
up = new;
|
|
|
}
|
|
|
spin_unlock_irq(&uidhash_lock);
|
|
|
-
|
|
|
}
|
|
|
|
|
|
uids_mutex_unlock();
|
|
@@ -436,6 +441,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
|
|
|
|
|
|
out_destoy_sched:
|
|
|
sched_destroy_user(new);
|
|
|
+ put_user_ns(new->user_ns);
|
|
|
out_free_user:
|
|
|
kmem_cache_free(uid_cachep, new);
|
|
|
out_unlock:
|
|
@@ -443,33 +449,6 @@ out_unlock:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_USER_NS
|
|
|
-void release_uids(struct user_namespace *ns)
|
|
|
-{
|
|
|
- int i;
|
|
|
- unsigned long flags;
|
|
|
- struct hlist_head *head;
|
|
|
- struct hlist_node *nd;
|
|
|
-
|
|
|
- spin_lock_irqsave(&uidhash_lock, flags);
|
|
|
- /*
|
|
|
- * collapse the chains so that the user_struct-s will
|
|
|
- * be still alive, but not in hashes. subsequent free_uid()
|
|
|
- * will free them.
|
|
|
- */
|
|
|
- for (i = 0; i < UIDHASH_SZ; i++) {
|
|
|
- head = ns->uidhash_table + i;
|
|
|
- while (!hlist_empty(head)) {
|
|
|
- nd = head->first;
|
|
|
- hlist_del_init(nd);
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock_irqrestore(&uidhash_lock, flags);
|
|
|
-
|
|
|
- free_uid(ns->root_user);
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
static int __init uid_cache_init(void)
|
|
|
{
|
|
|
int n;
|