|
@@ -146,6 +146,22 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * free_page_mlock() -- clean up attempts to free and mlocked() page.
|
|
|
+ * Page should not be on lru, so no need to fix that up.
|
|
|
+ * free_pages_check() will verify...
|
|
|
+ */
|
|
|
+static inline void free_page_mlock(struct page *page)
|
|
|
+{
|
|
|
+ if (unlikely(TestClearPageMlocked(page))) {
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ __dec_zone_page_state(page, NR_MLOCK);
|
|
|
+ __count_vm_event(UNEVICTABLE_MLOCKFREED);
|
|
|
+ local_irq_restore(flags);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
#else /* CONFIG_UNEVICTABLE_LRU */
|
|
|
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
|
|
@@ -155,6 +171,7 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
|
|
|
static inline void clear_page_mlock(struct page *page) { }
|
|
|
static inline void mlock_vma_page(struct page *page) { }
|
|
|
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
|
|
|
+static inline void free_page_mlock(struct page *page) { }
|
|
|
|
|
|
#endif /* CONFIG_UNEVICTABLE_LRU */
|
|
|
|