|
@@ -271,27 +271,94 @@ static void update_page_reclaim_stat(struct zone *zone, struct page *page,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * FIXME: speed this up?
|
|
|
+ * A page will go to active list either by activate_page or putback_lru_page.
|
|
|
+ * In the activate_page case, the page hasn't active bit set. The page might
|
|
|
+ * not in LRU list because it's isolated before it gets a chance to be moved to
|
|
|
+ * active list. The window is small because pagevec just stores several pages.
|
|
|
+ * For such case, we do nothing for such page.
|
|
|
+ * In the putback_lru_page case, the page isn't in lru list but has active
|
|
|
+ * bit set
|
|
|
*/
|
|
|
-void activate_page(struct page *page)
|
|
|
+static void __activate_page(struct page *page, void *arg)
|
|
|
{
|
|
|
struct zone *zone = page_zone(page);
|
|
|
+ int file = page_is_file_cache(page);
|
|
|
+ int lru = page_lru_base_type(page);
|
|
|
+ bool putback = !PageLRU(page);
|
|
|
|
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
|
|
- int file = page_is_file_cache(page);
|
|
|
- int lru = page_lru_base_type(page);
|
|
|
+ /* The page is isolated before it's moved to active list */
|
|
|
+ if (!PageLRU(page) && !PageActive(page))
|
|
|
+ return;
|
|
|
+ if ((PageLRU(page) && PageActive(page)) || PageUnevictable(page))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!putback)
|
|
|
del_page_from_lru_list(zone, page, lru);
|
|
|
+ else
|
|
|
+ SetPageLRU(page);
|
|
|
|
|
|
- SetPageActive(page);
|
|
|
- lru += LRU_ACTIVE;
|
|
|
- add_page_to_lru_list(zone, page, lru);
|
|
|
- __count_vm_event(PGACTIVATE);
|
|
|
+ SetPageActive(page);
|
|
|
+ lru += LRU_ACTIVE;
|
|
|
+ add_page_to_lru_list(zone, page, lru);
|
|
|
+
|
|
|
+ if (putback)
|
|
|
+ return;
|
|
|
+ __count_vm_event(PGACTIVATE);
|
|
|
+ update_page_reclaim_stat(zone, page, file, 1);
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
|
|
|
+
|
|
|
+static void activate_page_drain(int cpu)
|
|
|
+{
|
|
|
+ struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
|
|
|
+
|
|
|
+ if (pagevec_count(pvec))
|
|
|
+ pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+void activate_page(struct page *page)
|
|
|
+{
|
|
|
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
|
|
+ struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
|
|
|
+
|
|
|
+ page_cache_get(page);
|
|
|
+ if (!pagevec_add(pvec, page))
|
|
|
+ pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
|
|
+ put_cpu_var(activate_page_pvecs);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- update_page_reclaim_stat(zone, page, file, 1);
|
|
|
+/* Caller should hold zone->lru_lock */
|
|
|
+int putback_active_lru_page(struct zone *zone, struct page *page)
|
|
|
+{
|
|
|
+ struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
|
|
|
+
|
|
|
+ if (!pagevec_add(pvec, page)) {
|
|
|
+ spin_unlock_irq(&zone->lru_lock);
|
|
|
+ pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
|
|
+ spin_lock_irq(&zone->lru_lock);
|
|
|
}
|
|
|
+ put_cpu_var(activate_page_pvecs);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+static inline void activate_page_drain(int cpu)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+void activate_page(struct page *page)
|
|
|
+{
|
|
|
+ struct zone *zone = page_zone(page);
|
|
|
+
|
|
|
+ spin_lock_irq(&zone->lru_lock);
|
|
|
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page))
|
|
|
+ __activate_page(page, NULL);
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
|
* Mark a page as having seen activity.
|
|
@@ -390,6 +457,7 @@ static void drain_cpu_pagevecs(int cpu)
|
|
|
pagevec_move_tail(pvec);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
+ activate_page_drain(cpu);
|
|
|
}
|
|
|
|
|
|
void lru_add_drain(void)
|