|
@@ -56,7 +56,8 @@ void clear_page_mlock(struct page *page)
|
|
|
if (!TestClearPageMlocked(page))
|
|
|
return;
|
|
|
|
|
|
- dec_zone_page_state(page, NR_MLOCK);
|
|
|
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
|
|
|
+ -hpage_nr_pages(page));
|
|
|
count_vm_event(UNEVICTABLE_PGCLEARED);
|
|
|
if (!isolate_lru_page(page)) {
|
|
|
putback_lru_page(page);
|
|
@@ -78,7 +79,8 @@ void mlock_vma_page(struct page *page)
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
|
|
|
if (!TestSetPageMlocked(page)) {
|
|
|
- inc_zone_page_state(page, NR_MLOCK);
|
|
|
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
|
|
|
+ hpage_nr_pages(page));
|
|
|
count_vm_event(UNEVICTABLE_PGMLOCKED);
|
|
|
if (!isolate_lru_page(page))
|
|
|
putback_lru_page(page);
|
|
@@ -105,7 +107,8 @@ void munlock_vma_page(struct page *page)
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
|
|
|
if (TestClearPageMlocked(page)) {
|
|
|
- dec_zone_page_state(page, NR_MLOCK);
|
|
|
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
|
|
|
+ -hpage_nr_pages(page));
|
|
|
if (!isolate_lru_page(page)) {
|
|
|
int ret = SWAP_AGAIN;
|
|
|
|