|
@@ -150,18 +150,6 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * free_page_mlock() -- clean up attempts to free and mlocked() page.
|
|
|
- * Page should not be on lru, so no need to fix that up.
|
|
|
- * free_pages_check() will verify...
|
|
|
- */
|
|
|
-static inline void free_page_mlock(struct page *page)
|
|
|
-{
|
|
|
- __ClearPageMlocked(page);
|
|
|
- __dec_zone_page_state(page, NR_MLOCK);
|
|
|
- __count_vm_event(UNEVICTABLE_MLOCKFREED);
|
|
|
-}
|
|
|
-
|
|
|
#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
|
|
|
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
|
|
|
{
|
|
@@ -170,7 +158,6 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
|
|
|
static inline void clear_page_mlock(struct page *page) { }
|
|
|
static inline void mlock_vma_page(struct page *page) { }
|
|
|
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
|
|
|
-static inline void free_page_mlock(struct page *page) { }
|
|
|
|
|
|
#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
|
|
|
|