|
@@ -71,6 +71,7 @@ struct pid_namespace init_pid_ns = {
|
|
|
.level = 0,
|
|
|
.child_reaper = &init_task,
|
|
|
};
|
|
|
+EXPORT_SYMBOL_GPL(init_pid_ns);
|
|
|
|
|
|
int is_global_init(struct task_struct *tsk)
|
|
|
{
|
|
@@ -210,7 +211,8 @@ fastcall void free_pid(struct pid *pid)
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&pidmap_lock, flags);
|
|
|
- hlist_del_rcu(&pid->pid_chain);
|
|
|
+ for (i = 0; i <= pid->level; i++)
|
|
|
+ hlist_del_rcu(&pid->numbers[i].pid_chain);
|
|
|
spin_unlock_irqrestore(&pidmap_lock, flags);
|
|
|
|
|
|
for (i = 0; i <= pid->level; i++)
|
|
@@ -225,6 +227,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
|
|
|
enum pid_type type;
|
|
|
int i, nr;
|
|
|
struct pid_namespace *tmp;
|
|
|
+ struct upid *upid;
|
|
|
|
|
|
pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
|
|
|
if (!pid)
|
|
@@ -251,7 +254,11 @@ struct pid *alloc_pid(struct pid_namespace *ns)
|
|
|
INIT_HLIST_HEAD(&pid->tasks[type]);
|
|
|
|
|
|
spin_lock_irq(&pidmap_lock);
|
|
|
- hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr, ns)]);
|
|
|
+ for (i = ns->level; i >= 0; i--) {
|
|
|
+ upid = &pid->numbers[i];
|
|
|
+ hlist_add_head_rcu(&upid->pid_chain,
|
|
|
+ &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
|
|
|
+ }
|
|
|
spin_unlock_irq(&pidmap_lock);
|
|
|
|
|
|
out:
|
|
@@ -266,19 +273,20 @@ out_free:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
-struct pid * fastcall find_pid(int nr)
|
|
|
+struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns)
|
|
|
{
|
|
|
struct hlist_node *elem;
|
|
|
- struct pid *pid;
|
|
|
+ struct upid *pnr;
|
|
|
+
|
|
|
+ hlist_for_each_entry_rcu(pnr, elem,
|
|
|
+ &pid_hash[pid_hashfn(nr, ns)], pid_chain)
|
|
|
+ if (pnr->nr == nr && pnr->ns == ns)
|
|
|
+ return container_of(pnr, struct pid,
|
|
|
+ numbers[ns->level]);
|
|
|
|
|
|
- hlist_for_each_entry_rcu(pid, elem,
|
|
|
- &pid_hash[pid_hashfn(nr, &init_pid_ns)], pid_chain) {
|
|
|
- if (pid->nr == nr)
|
|
|
- return pid;
|
|
|
- }
|
|
|
return NULL;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(find_pid);
|
|
|
+EXPORT_SYMBOL_GPL(find_pid_ns);
|
|
|
|
|
|
/*
|
|
|
* attach_pid() must be called with the tasklist_lock write-held.
|
|
@@ -338,12 +346,13 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
|
|
|
/*
|
|
|
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
|
|
|
*/
|
|
|
-struct task_struct *find_task_by_pid_type(int type, int nr)
|
|
|
+struct task_struct *find_task_by_pid_type_ns(int type, int nr,
|
|
|
+ struct pid_namespace *ns)
|
|
|
{
|
|
|
- return pid_task(find_pid(nr), type);
|
|
|
+ return pid_task(find_pid_ns(nr, ns), type);
|
|
|
}
|
|
|
|
|
|
-EXPORT_SYMBOL(find_task_by_pid_type);
|
|
|
+EXPORT_SYMBOL(find_task_by_pid_type_ns);
|
|
|
|
|
|
struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
|
|
|
{
|
|
@@ -370,7 +379,7 @@ struct pid *find_get_pid(pid_t nr)
|
|
|
struct pid *pid;
|
|
|
|
|
|
rcu_read_lock();
|
|
|
- pid = get_pid(find_pid(nr));
|
|
|
+ pid = get_pid(find_vpid(nr));
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
return pid;
|
|
@@ -394,15 +403,15 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
|
|
|
*
|
|
|
* If there is a pid at nr this function is exactly the same as find_pid.
|
|
|
*/
|
|
|
-struct pid *find_ge_pid(int nr)
|
|
|
+struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
|
|
|
{
|
|
|
struct pid *pid;
|
|
|
|
|
|
do {
|
|
|
- pid = find_pid(nr);
|
|
|
+ pid = find_pid_ns(nr, ns);
|
|
|
if (pid)
|
|
|
break;
|
|
|
- nr = next_pidmap(task_active_pid_ns(current), nr);
|
|
|
+ nr = next_pidmap(ns, nr);
|
|
|
} while (nr > 0);
|
|
|
|
|
|
return pid;
|