|
@@ -121,7 +121,6 @@ void __remove_from_page_cache(struct page *page)
|
|
mapping->nrpages--;
|
|
mapping->nrpages--;
|
|
__dec_zone_page_state(page, NR_FILE_PAGES);
|
|
__dec_zone_page_state(page, NR_FILE_PAGES);
|
|
BUG_ON(page_mapped(page));
|
|
BUG_ON(page_mapped(page));
|
|
- mem_cgroup_uncharge_cache_page(page);
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* Some filesystems seem to re-dirty the page even after
|
|
* Some filesystems seem to re-dirty the page even after
|
|
@@ -145,6 +144,7 @@ void remove_from_page_cache(struct page *page)
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
__remove_from_page_cache(page);
|
|
__remove_from_page_cache(page);
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
|
|
+ mem_cgroup_uncharge_cache_page(page);
|
|
}
|
|
}
|
|
|
|
|
|
static int sync_page(void *word)
|
|
static int sync_page(void *word)
|
|
@@ -476,13 +476,13 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|
if (likely(!error)) {
|
|
if (likely(!error)) {
|
|
mapping->nrpages++;
|
|
mapping->nrpages++;
|
|
__inc_zone_page_state(page, NR_FILE_PAGES);
|
|
__inc_zone_page_state(page, NR_FILE_PAGES);
|
|
|
|
+ spin_unlock_irq(&mapping->tree_lock);
|
|
} else {
|
|
} else {
|
|
page->mapping = NULL;
|
|
page->mapping = NULL;
|
|
|
|
+ spin_unlock_irq(&mapping->tree_lock);
|
|
mem_cgroup_uncharge_cache_page(page);
|
|
mem_cgroup_uncharge_cache_page(page);
|
|
page_cache_release(page);
|
|
page_cache_release(page);
|
|
}
|
|
}
|
|
-
|
|
|
|
- spin_unlock_irq(&mapping->tree_lock);
|
|
|
|
radix_tree_preload_end();
|
|
radix_tree_preload_end();
|
|
} else
|
|
} else
|
|
mem_cgroup_uncharge_cache_page(page);
|
|
mem_cgroup_uncharge_cache_page(page);
|