123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416 |
- /*
- * Generic pidhash and scalable, time-bounded PID allocator
- *
- * (C) 2002-2003 William Irwin, IBM
- * (C) 2004 William Irwin, Oracle
- * (C) 2002-2004 Ingo Molnar, Red Hat
- *
- * pid-structures are backing objects for tasks sharing a given ID to chain
- * against. There is very little to them aside from hashing them and
- * parking tasks using given ID's on a list.
- *
- * The hash is always changed with the tasklist_lock write-acquired,
- * and the hash is only accessed with the tasklist_lock at least
- * read-acquired, so there's no additional SMP locking needed here.
- *
- * We have a list of bitmap pages, which bitmaps represent the PID space.
- * Allocating and freeing PIDs is completely lockless. The worst-case
- * allocation scenario when all but one out of 1 million PIDs possible are
- * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
- * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
- */
- #include <linux/mm.h>
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/init.h>
- #include <linux/bootmem.h>
- #include <linux/hash.h>
- #include <linux/pid_namespace.h>
- #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
- static struct hlist_head *pid_hash;
- static int pidhash_shift;
- static struct kmem_cache *pid_cachep;
- int pid_max = PID_MAX_DEFAULT;
- #define RESERVED_PIDS 300
- int pid_max_min = RESERVED_PIDS + 1;
- int pid_max_max = PID_MAX_LIMIT;
- #define BITS_PER_PAGE (PAGE_SIZE*8)
- #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
- static inline int mk_pid(struct pid_namespace *pid_ns,
- struct pidmap *map, int off)
- {
- return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
- }
- #define find_next_offset(map, off) \
- find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
- /*
- * PID-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way a low pid_max
- * value does not cause lots of bitmaps to be allocated, but
- * the scheme scales to up to 4 million PIDs, runtime.
- */
- struct pid_namespace init_pid_ns = {
- .kref = {
- .refcount = ATOMIC_INIT(2),
- },
- .pidmap = {
- [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
- },
- .last_pid = 0,
- .child_reaper = &init_task
- };
- /*
- * Note: disable interrupts while the pidmap_lock is held as an
- * interrupt might come in and do read_lock(&tasklist_lock).
- *
- * If we don't disable interrupts there is a nasty deadlock between
- * detach_pid()->free_pid() and another cpu that does
- * spin_lock(&pidmap_lock) followed by an interrupt routine that does
- * read_lock(&tasklist_lock);
- *
- * After we clean up the tasklist_lock and know there are no
- * irq handlers that take it we can leave the interrupts enabled.
- * For now it is easier to be safe than to prove it can't happen.
- */
- static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
- static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
- {
- struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
- int offset = pid & BITS_PER_PAGE_MASK;
- clear_bit(offset, map->page);
- atomic_inc(&map->nr_free);
- }
- static int alloc_pidmap(struct pid_namespace *pid_ns)
- {
- int i, offset, max_scan, pid, last = pid_ns->last_pid;
- struct pidmap *map;
- pid = last + 1;
- if (pid >= pid_max)
- pid = RESERVED_PIDS;
- offset = pid & BITS_PER_PAGE_MASK;
- map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
- max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
- for (i = 0; i <= max_scan; ++i) {
- if (unlikely(!map->page)) {
- void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
- /*
- * Free the page if someone raced with us
- * installing it:
- */
- spin_lock_irq(&pidmap_lock);
- if (map->page)
- kfree(page);
- else
- map->page = page;
- spin_unlock_irq(&pidmap_lock);
- if (unlikely(!map->page))
- break;
- }
- if (likely(atomic_read(&map->nr_free))) {
- do {
- if (!test_and_set_bit(offset, map->page)) {
- atomic_dec(&map->nr_free);
- pid_ns->last_pid = pid;
- return pid;
- }
- offset = find_next_offset(map, offset);
- pid = mk_pid(pid_ns, map, offset);
- /*
- * find_next_offset() found a bit, the pid from it
- * is in-bounds, and if we fell back to the last
- * bitmap block and the final block was the same
- * as the starting point, pid is before last_pid.
- */
- } while (offset < BITS_PER_PAGE && pid < pid_max &&
- (i != max_scan || pid < last ||
- !((last+1) & BITS_PER_PAGE_MASK)));
- }
- if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
- ++map;
- offset = 0;
- } else {
- map = &pid_ns->pidmap[0];
- offset = RESERVED_PIDS;
- if (unlikely(last == offset))
- break;
- }
- pid = mk_pid(pid_ns, map, offset);
- }
- return -1;
- }
- static int next_pidmap(struct pid_namespace *pid_ns, int last)
- {
- int offset;
- struct pidmap *map, *end;
- offset = (last + 1) & BITS_PER_PAGE_MASK;
- map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
- end = &pid_ns->pidmap[PIDMAP_ENTRIES];
- for (; map < end; map++, offset = 0) {
- if (unlikely(!map->page))
- continue;
- offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
- if (offset < BITS_PER_PAGE)
- return mk_pid(pid_ns, map, offset);
- }
- return -1;
- }
- fastcall void put_pid(struct pid *pid)
- {
- if (!pid)
- return;
- if ((atomic_read(&pid->count) == 1) ||
- atomic_dec_and_test(&pid->count))
- kmem_cache_free(pid_cachep, pid);
- }
- EXPORT_SYMBOL_GPL(put_pid);
- static void delayed_put_pid(struct rcu_head *rhp)
- {
- struct pid *pid = container_of(rhp, struct pid, rcu);
- put_pid(pid);
- }
- fastcall void free_pid(struct pid *pid)
- {
- /* We can be called with write_lock_irq(&tasklist_lock) held */
- unsigned long flags;
- spin_lock_irqsave(&pidmap_lock, flags);
- hlist_del_rcu(&pid->pid_chain);
- spin_unlock_irqrestore(&pidmap_lock, flags);
- free_pidmap(&init_pid_ns, pid->nr);
- call_rcu(&pid->rcu, delayed_put_pid);
- }
- struct pid *alloc_pid(void)
- {
- struct pid *pid;
- enum pid_type type;
- int nr = -1;
- pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
- if (!pid)
- goto out;
- nr = alloc_pidmap(current->nsproxy->pid_ns);
- if (nr < 0)
- goto out_free;
- atomic_set(&pid->count, 1);
- pid->nr = nr;
- for (type = 0; type < PIDTYPE_MAX; ++type)
- INIT_HLIST_HEAD(&pid->tasks[type]);
- spin_lock_irq(&pidmap_lock);
- hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
- spin_unlock_irq(&pidmap_lock);
- out:
- return pid;
- out_free:
- kmem_cache_free(pid_cachep, pid);
- pid = NULL;
- goto out;
- }
- struct pid * fastcall find_pid(int nr)
- {
- struct hlist_node *elem;
- struct pid *pid;
- hlist_for_each_entry_rcu(pid, elem,
- &pid_hash[pid_hashfn(nr)], pid_chain) {
- if (pid->nr == nr)
- return pid;
- }
- return NULL;
- }
- EXPORT_SYMBOL_GPL(find_pid);
- int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
- {
- struct pid_link *link;
- struct pid *pid;
- link = &task->pids[type];
- link->pid = pid = find_pid(nr);
- hlist_add_head_rcu(&link->node, &pid->tasks[type]);
- return 0;
- }
- void fastcall detach_pid(struct task_struct *task, enum pid_type type)
- {
- struct pid_link *link;
- struct pid *pid;
- int tmp;
- link = &task->pids[type];
- pid = link->pid;
- hlist_del_rcu(&link->node);
- link->pid = NULL;
- for (tmp = PIDTYPE_MAX; --tmp >= 0; )
- if (!hlist_empty(&pid->tasks[tmp]))
- return;
- free_pid(pid);
- }
- /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
- void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
- enum pid_type type)
- {
- new->pids[type].pid = old->pids[type].pid;
- hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
- old->pids[type].pid = NULL;
- }
- struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
- {
- struct task_struct *result = NULL;
- if (pid) {
- struct hlist_node *first;
- first = rcu_dereference(pid->tasks[type].first);
- if (first)
- result = hlist_entry(first, struct task_struct, pids[(type)].node);
- }
- return result;
- }
- /*
- * Must be called under rcu_read_lock() or with tasklist_lock read-held.
- */
- struct task_struct *find_task_by_pid_type(int type, int nr)
- {
- return pid_task(find_pid(nr), type);
- }
- EXPORT_SYMBOL(find_task_by_pid_type);
- struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
- {
- struct pid *pid;
- rcu_read_lock();
- pid = get_pid(task->pids[type].pid);
- rcu_read_unlock();
- return pid;
- }
- struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
- {
- struct task_struct *result;
- rcu_read_lock();
- result = pid_task(pid, type);
- if (result)
- get_task_struct(result);
- rcu_read_unlock();
- return result;
- }
- struct pid *find_get_pid(pid_t nr)
- {
- struct pid *pid;
- rcu_read_lock();
- pid = get_pid(find_pid(nr));
- rcu_read_unlock();
- return pid;
- }
- /*
- * Used by proc to find the first pid that is greater then or equal to nr.
- *
- * If there is a pid at nr this function is exactly the same as find_pid.
- */
- struct pid *find_ge_pid(int nr)
- {
- struct pid *pid;
- do {
- pid = find_pid(nr);
- if (pid)
- break;
- nr = next_pidmap(current->nsproxy->pid_ns, nr);
- } while (nr > 0);
- return pid;
- }
- EXPORT_SYMBOL_GPL(find_get_pid);
- int copy_pid_ns(int flags, struct task_struct *tsk)
- {
- struct pid_namespace *old_ns = tsk->nsproxy->pid_ns;
- int err = 0;
- if (!old_ns)
- return 0;
- get_pid_ns(old_ns);
- return err;
- }
- void free_pid_ns(struct kref *kref)
- {
- struct pid_namespace *ns;
- ns = container_of(kref, struct pid_namespace, kref);
- kfree(ns);
- }
- /*
- * The pid hash table is scaled according to the amount of memory in the
- * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
- * more.
- */
- void __init pidhash_init(void)
- {
- int i, pidhash_size;
- unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
- pidhash_shift = max(4, fls(megabytes * 4));
- pidhash_shift = min(12, pidhash_shift);
- pidhash_size = 1 << pidhash_shift;
- printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
- pidhash_size, pidhash_shift,
- pidhash_size * sizeof(struct hlist_head));
- pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
- if (!pid_hash)
- panic("Could not alloc pidhash!\n");
- for (i = 0; i < pidhash_size; i++)
- INIT_HLIST_HEAD(&pid_hash[i]);
- }
- void __init pidmap_init(void)
- {
- init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
- /* Reserve PID 0. We never call free_pidmap(0) */
- set_bit(0, init_pid_ns.pidmap[0].page);
- atomic_dec(&init_pid_ns.pidmap[0].nr_free);
- pid_cachep = KMEM_CACHE(pid, SLAB_PANIC);
- }
|