|
@@ -7,115 +7,77 @@
|
|
|
#include <linux/mmzone.h>
|
|
|
#include <asm/atomic.h>
|
|
|
|
|
|
+#ifdef CONFIG_VM_EVENT_COUNTERS
|
|
|
/*
|
|
|
- * Global page accounting. One instance per CPU. Only unsigned longs are
|
|
|
- * allowed.
|
|
|
+ * Light weight per cpu counter implementation.
|
|
|
*
|
|
|
- * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
|
|
|
- * any time safely (which protects the instance from modification by
|
|
|
- * interrupt.
|
|
|
- * - The __xxx_page_state variants can be used safely when interrupts are
|
|
|
- * disabled.
|
|
|
- * - The __xxx_page_state variants can be used if the field is only
|
|
|
- * modified from process context and protected from preemption, or only
|
|
|
- * modified from interrupt context. In this case, the field should be
|
|
|
- * commented here.
|
|
|
+ * Counters should only be incremented and no critical kernel component
|
|
|
+ * should rely on the counter values.
|
|
|
+ *
|
|
|
+ * Counters are handled completely inline. On many platforms the code
|
|
|
+ * generated will simply be the increment of a global address.
|
|
|
*/
|
|
|
-struct page_state {
|
|
|
- unsigned long pgpgin; /* Disk reads */
|
|
|
- unsigned long pgpgout; /* Disk writes */
|
|
|
- unsigned long pswpin; /* swap reads */
|
|
|
- unsigned long pswpout; /* swap writes */
|
|
|
-
|
|
|
- unsigned long pgalloc_high; /* page allocations */
|
|
|
- unsigned long pgalloc_normal;
|
|
|
- unsigned long pgalloc_dma32;
|
|
|
- unsigned long pgalloc_dma;
|
|
|
-
|
|
|
- unsigned long pgfree; /* page freeings */
|
|
|
- unsigned long pgactivate; /* pages moved inactive->active */
|
|
|
- unsigned long pgdeactivate; /* pages moved active->inactive */
|
|
|
-
|
|
|
- unsigned long pgfault; /* faults (major+minor) */
|
|
|
- unsigned long pgmajfault; /* faults (major only) */
|
|
|
-
|
|
|
- unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
|
|
|
- unsigned long pgrefill_normal;
|
|
|
- unsigned long pgrefill_dma32;
|
|
|
- unsigned long pgrefill_dma;
|
|
|
-
|
|
|
- unsigned long pgsteal_high; /* total highmem pages reclaimed */
|
|
|
- unsigned long pgsteal_normal;
|
|
|
- unsigned long pgsteal_dma32;
|
|
|
- unsigned long pgsteal_dma;
|
|
|
-
|
|
|
- unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
|
|
|
- unsigned long pgscan_kswapd_normal;
|
|
|
- unsigned long pgscan_kswapd_dma32;
|
|
|
- unsigned long pgscan_kswapd_dma;
|
|
|
-
|
|
|
- unsigned long pgscan_direct_high;/* total highmem pages scanned */
|
|
|
- unsigned long pgscan_direct_normal;
|
|
|
- unsigned long pgscan_direct_dma32;
|
|
|
- unsigned long pgscan_direct_dma;
|
|
|
-
|
|
|
- unsigned long pginodesteal; /* pages reclaimed via inode freeing */
|
|
|
- unsigned long slabs_scanned; /* slab objects scanned */
|
|
|
- unsigned long kswapd_steal; /* pages reclaimed by kswapd */
|
|
|
- unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
|
|
|
- unsigned long pageoutrun; /* kswapd's calls to page reclaim */
|
|
|
- unsigned long allocstall; /* direct reclaim calls */
|
|
|
-
|
|
|
- unsigned long pgrotated; /* pages rotated to tail of the LRU */
|
|
|
+
|
|
|
+#define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH
|
|
|
+
|
|
|
+enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|
|
+ FOR_ALL_ZONES(PGALLOC),
|
|
|
+ PGFREE, PGACTIVATE, PGDEACTIVATE,
|
|
|
+ PGFAULT, PGMAJFAULT,
|
|
|
+ FOR_ALL_ZONES(PGREFILL),
|
|
|
+ FOR_ALL_ZONES(PGSTEAL),
|
|
|
+ FOR_ALL_ZONES(PGSCAN_KSWAPD),
|
|
|
+ FOR_ALL_ZONES(PGSCAN_DIRECT),
|
|
|
+ PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
|
|
|
+ PAGEOUTRUN, ALLOCSTALL, PGROTATED,
|
|
|
+ NR_VM_EVENT_ITEMS
|
|
|
+};
|
|
|
+
|
|
|
+struct vm_event_state {
|
|
|
+ unsigned long event[NR_VM_EVENT_ITEMS];
|
|
|
};
|
|
|
|
|
|
-extern void get_full_page_state(struct page_state *ret);
|
|
|
-extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
|
|
|
-extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
|
|
|
-
|
|
|
-#define mod_page_state(member, delta) \
|
|
|
- mod_page_state_offset(offsetof(struct page_state, member), (delta))
|
|
|
-
|
|
|
-#define __mod_page_state(member, delta) \
|
|
|
- __mod_page_state_offset(offsetof(struct page_state, member), (delta))
|
|
|
-
|
|
|
-#define inc_page_state(member) mod_page_state(member, 1UL)
|
|
|
-#define dec_page_state(member) mod_page_state(member, 0UL - 1)
|
|
|
-#define add_page_state(member,delta) mod_page_state(member, (delta))
|
|
|
-#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
|
|
|
-
|
|
|
-#define __inc_page_state(member) __mod_page_state(member, 1UL)
|
|
|
-#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
|
|
|
-#define __add_page_state(member,delta) __mod_page_state(member, (delta))
|
|
|
-#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
|
|
|
-
|
|
|
-#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
|
|
|
-
|
|
|
-#define state_zone_offset(zone, member) \
|
|
|
-({ \
|
|
|
- unsigned offset; \
|
|
|
- if (is_highmem(zone)) \
|
|
|
- offset = offsetof(struct page_state, member##_high); \
|
|
|
- else if (is_normal(zone)) \
|
|
|
- offset = offsetof(struct page_state, member##_normal); \
|
|
|
- else if (is_dma32(zone)) \
|
|
|
- offset = offsetof(struct page_state, member##_dma32); \
|
|
|
- else \
|
|
|
- offset = offsetof(struct page_state, member##_dma); \
|
|
|
- offset; \
|
|
|
-})
|
|
|
-
|
|
|
-#define __mod_page_state_zone(zone, member, delta) \
|
|
|
- do { \
|
|
|
- __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
|
|
|
- } while (0)
|
|
|
-
|
|
|
-#define mod_page_state_zone(zone, member, delta) \
|
|
|
- do { \
|
|
|
- mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
|
|
|
- } while (0)
|
|
|
-
|
|
|
-DECLARE_PER_CPU(struct page_state, page_states);
|
|
|
+DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
|
|
|
+
|
|
|
+static inline void __count_vm_event(enum vm_event_item item)
|
|
|
+{
|
|
|
+ __get_cpu_var(vm_event_states.event[item])++;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void count_vm_event(enum vm_event_item item)
|
|
|
+{
|
|
|
+ get_cpu_var(vm_event_states.event[item])++;
|
|
|
+ put_cpu();
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __count_vm_events(enum vm_event_item item, long delta)
|
|
|
+{
|
|
|
+ __get_cpu_var(vm_event_states.event[item]) += delta;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void count_vm_events(enum vm_event_item item, long delta)
|
|
|
+{
|
|
|
+ get_cpu_var(vm_event_states.event[item])++;
|
|
|
+ put_cpu();
|
|
|
+}
|
|
|
+
|
|
|
+extern void all_vm_events(unsigned long *);
|
|
|
+extern void vm_events_fold_cpu(int cpu);
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+/* Disable counters */
|
|
|
+#define get_cpu_vm_events(e) 0L
|
|
|
+#define count_vm_event(e) do { } while (0)
|
|
|
+#define count_vm_events(e,d) do { } while (0)
|
|
|
+#define __count_vm_event(e) do { } while (0)
|
|
|
+#define __count_vm_events(e,d) do { } while (0)
|
|
|
+#define vm_events_fold_cpu(x) do { } while (0)
|
|
|
+
|
|
|
+#endif /* CONFIG_VM_EVENT_COUNTERS */
|
|
|
+
|
|
|
+#define __count_zone_vm_events(item, zone, delta) \
|
|
|
+ __count_vm_events(item##_DMA + zone_idx(zone), delta)
|
|
|
|
|
|
/*
|
|
|
* Zone based page accounting with per cpu differentials.
|