|
@@ -391,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|
|
struct task_struct *tsk;
|
|
|
struct anon_vma *av;
|
|
|
|
|
|
- read_lock(&tasklist_lock);
|
|
|
av = page_lock_anon_vma(page);
|
|
|
if (av == NULL) /* Not actually mapped anymore */
|
|
|
- goto out;
|
|
|
+ return;
|
|
|
+
|
|
|
+ read_lock(&tasklist_lock);
|
|
|
for_each_process (tsk) {
|
|
|
struct anon_vma_chain *vmac;
|
|
|
|
|
@@ -408,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|
|
add_to_kill(tsk, page, vma, to_kill, tkc);
|
|
|
}
|
|
|
}
|
|
|
- page_unlock_anon_vma(av);
|
|
|
-out:
|
|
|
read_unlock(&tasklist_lock);
|
|
|
+ page_unlock_anon_vma(av);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -424,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|
|
struct prio_tree_iter iter;
|
|
|
struct address_space *mapping = page->mapping;
|
|
|
|
|
|
- /*
|
|
|
- * A note on the locking order between the two locks.
|
|
|
- * We don't rely on this particular order.
|
|
|
- * If you have some other code that needs a different order
|
|
|
- * feel free to switch them around. Or add a reverse link
|
|
|
- * from mm_struct to task_struct, then this could be all
|
|
|
- * done without taking tasklist_lock and looping over all tasks.
|
|
|
- */
|
|
|
-
|
|
|
- read_lock(&tasklist_lock);
|
|
|
mutex_lock(&mapping->i_mmap_mutex);
|
|
|
+ read_lock(&tasklist_lock);
|
|
|
for_each_process(tsk) {
|
|
|
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
|
|
|
|
@@ -454,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|
|
add_to_kill(tsk, page, vma, to_kill, tkc);
|
|
|
}
|
|
|
}
|
|
|
- mutex_unlock(&mapping->i_mmap_mutex);
|
|
|
read_unlock(&tasklist_lock);
|
|
|
+ mutex_unlock(&mapping->i_mmap_mutex);
|
|
|
}
|
|
|
|
|
|
/*
|