|
@@ -196,28 +196,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
|
|
|
BUG_ON(atomic_read(&mm->mm_users) <= 0);
|
|
|
|
|
|
/*
|
|
|
- * Verify that mmu_notifier_init() already run and the global srcu is
|
|
|
- * initialized.
|
|
|
- */
|
|
|
+ * Verify that mmu_notifier_init() already run and the global srcu is
|
|
|
+ * initialized.
|
|
|
+ */
|
|
|
BUG_ON(!srcu.per_cpu_ref);
|
|
|
|
|
|
+ ret = -ENOMEM;
|
|
|
+ mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
|
|
|
+ if (unlikely(!mmu_notifier_mm))
|
|
|
+ goto out;
|
|
|
+
|
|
|
if (take_mmap_sem)
|
|
|
down_write(&mm->mmap_sem);
|
|
|
ret = mm_take_all_locks(mm);
|
|
|
if (unlikely(ret))
|
|
|
- goto out;
|
|
|
+ goto out_clean;
|
|
|
|
|
|
if (!mm_has_notifiers(mm)) {
|
|
|
- mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
|
|
|
- GFP_KERNEL);
|
|
|
- if (unlikely(!mmu_notifier_mm)) {
|
|
|
- ret = -ENOMEM;
|
|
|
- goto out_of_mem;
|
|
|
- }
|
|
|
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
|
|
|
spin_lock_init(&mmu_notifier_mm->lock);
|
|
|
|
|
|
mm->mmu_notifier_mm = mmu_notifier_mm;
|
|
|
+ mmu_notifier_mm = NULL;
|
|
|
}
|
|
|
atomic_inc(&mm->mm_count);
|
|
|
|
|
@@ -233,12 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
|
|
|
hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
|
|
|
spin_unlock(&mm->mmu_notifier_mm->lock);
|
|
|
|
|
|
-out_of_mem:
|
|
|
mm_drop_all_locks(mm);
|
|
|
-out:
|
|
|
+out_clean:
|
|
|
if (take_mmap_sem)
|
|
|
up_write(&mm->mmap_sem);
|
|
|
-
|
|
|
+ kfree(mmu_notifier_mm);
|
|
|
+out:
|
|
|
BUG_ON(atomic_read(&mm->mm_users) <= 0);
|
|
|
return ret;
|
|
|
}
|