|
@@ -316,7 +316,7 @@ void __init anon_vma_init(void)
|
|
|
*/
|
|
|
struct anon_vma *page_lock_anon_vma(struct page *page)
|
|
|
{
|
|
|
- struct anon_vma *anon_vma;
|
|
|
+ struct anon_vma *anon_vma, *root_anon_vma;
|
|
|
unsigned long anon_mapping;
|
|
|
|
|
|
rcu_read_lock();
|
|
@@ -327,8 +327,21 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
|
|
|
goto out;
|
|
|
|
|
|
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
|
|
|
- anon_vma_lock(anon_vma);
|
|
|
- return anon_vma;
|
|
|
+ root_anon_vma = ACCESS_ONCE(anon_vma->root);
|
|
|
+ spin_lock(&root_anon_vma->lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If this page is still mapped, then its anon_vma cannot have been
|
|
|
+ * freed. But if it has been unmapped, we have no security against
|
|
|
+ * the anon_vma structure being freed and reused (for another anon_vma:
|
|
|
+ * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
|
|
|
+ * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
|
|
|
+ * anon_vma->root before page_unlock_anon_vma() is called to unlock.
|
|
|
+ */
|
|
|
+ if (page_mapped(page))
|
|
|
+ return anon_vma;
|
|
|
+
|
|
|
+ spin_unlock(&root_anon_vma->lock);
|
|
|
out:
|
|
|
rcu_read_unlock();
|
|
|
return NULL;
|