|
@@ -484,20 +484,6 @@ static void mm_init_aio(struct mm_struct *mm)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
-int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
|
-{
|
|
|
-#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
|
- if (!alloc_cpumask_var(&mm->cpu_vm_mask_var, GFP_KERNEL))
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- if (oldmm)
|
|
|
- cpumask_copy(mm_cpumask(mm), mm_cpumask(oldmm));
|
|
|
- else
|
|
|
- memset(mm_cpumask(mm), 0, cpumask_size());
|
|
|
-#endif
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
|
|
|
{
|
|
|
atomic_set(&mm->mm_users, 1);
|
|
@@ -538,17 +524,8 @@ struct mm_struct * mm_alloc(void)
|
|
|
return NULL;
|
|
|
|
|
|
memset(mm, 0, sizeof(*mm));
|
|
|
- mm = mm_init(mm, current);
|
|
|
- if (!mm)
|
|
|
- return NULL;
|
|
|
-
|
|
|
- if (mm_init_cpumask(mm, NULL)) {
|
|
|
- mm_free_pgd(mm);
|
|
|
- free_mm(mm);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-
|
|
|
- return mm;
|
|
|
+ mm_init_cpumask(mm);
|
|
|
+ return mm_init(mm, current);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -559,7 +536,6 @@ struct mm_struct * mm_alloc(void)
|
|
|
void __mmdrop(struct mm_struct *mm)
|
|
|
{
|
|
|
BUG_ON(mm == &init_mm);
|
|
|
- free_cpumask_var(mm->cpu_vm_mask_var);
|
|
|
mm_free_pgd(mm);
|
|
|
destroy_context(mm);
|
|
|
mmu_notifier_mm_destroy(mm);
|
|
@@ -753,6 +729,7 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
|
|
|
goto fail_nomem;
|
|
|
|
|
|
memcpy(mm, oldmm, sizeof(*mm));
|
|
|
+ mm_init_cpumask(mm);
|
|
|
|
|
|
/* Initializing for Swap token stuff */
|
|
|
mm->token_priority = 0;
|
|
@@ -765,9 +742,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
|
|
|
if (!mm_init(mm, tsk))
|
|
|
goto fail_nomem;
|
|
|
|
|
|
- if (mm_init_cpumask(mm, oldmm))
|
|
|
- goto fail_nocpumask;
|
|
|
-
|
|
|
if (init_new_context(tsk, mm))
|
|
|
goto fail_nocontext;
|
|
|
|
|
@@ -794,9 +768,6 @@ fail_nomem:
|
|
|
return NULL;
|
|
|
|
|
|
fail_nocontext:
|
|
|
- free_cpumask_var(mm->cpu_vm_mask_var);
|
|
|
-
|
|
|
-fail_nocpumask:
|
|
|
/*
|
|
|
* If init_new_context() failed, we cannot use mmput() to free the mm
|
|
|
* because it calls destroy_context()
|
|
@@ -1591,6 +1562,13 @@ void __init proc_caches_init(void)
|
|
|
fs_cachep = kmem_cache_create("fs_cache",
|
|
|
sizeof(struct fs_struct), 0,
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
|
|
|
+ /*
|
|
|
+ * FIXME! The "sizeof(struct mm_struct)" currently includes the
|
|
|
+ * whole struct cpumask for the OFFSTACK case. We could change
|
|
|
+ * this to *only* allocate as much of it as required by the
|
|
|
+ * maximum number of CPU's we can ever have. The cpumask_allocation
|
|
|
+ * is at the end of the structure, exactly for that reason.
|
|
|
+ */
|
|
|
mm_cachep = kmem_cache_create("mm_struct",
|
|
|
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
|