vmstat.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. #ifndef _LINUX_VMSTAT_H
  2. #define _LINUX_VMSTAT_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. #include <linux/mm.h>
  6. #include <linux/mmzone.h>
  7. #include <asm/atomic.h>
  8. #ifdef CONFIG_ZONE_DMA
  9. #define DMA_ZONE(xx) xx##_DMA,
  10. #else
  11. #define DMA_ZONE(xx)
  12. #endif
  13. #ifdef CONFIG_ZONE_DMA32
  14. #define DMA32_ZONE(xx) xx##_DMA32,
  15. #else
  16. #define DMA32_ZONE(xx)
  17. #endif
  18. #ifdef CONFIG_HIGHMEM
  19. #define HIGHMEM_ZONE(xx) , xx##_HIGH
  20. #else
  21. #define HIGHMEM_ZONE(xx)
  22. #endif
  23. #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
  24. enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
  25. FOR_ALL_ZONES(PGALLOC),
  26. PGFREE, PGACTIVATE, PGDEACTIVATE,
  27. PGFAULT, PGMAJFAULT,
  28. FOR_ALL_ZONES(PGREFILL),
  29. FOR_ALL_ZONES(PGSTEAL),
  30. FOR_ALL_ZONES(PGSCAN_KSWAPD),
  31. FOR_ALL_ZONES(PGSCAN_DIRECT),
  32. #ifdef CONFIG_NUMA
  33. PGSCAN_ZONE_RECLAIM_FAILED,
  34. #endif
  35. PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
  36. KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
  37. KSWAPD_SKIP_CONGESTION_WAIT,
  38. PAGEOUTRUN, ALLOCSTALL, PGROTATED,
  39. #ifdef CONFIG_COMPACTION
  40. COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
  41. COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
  42. #endif
  43. #ifdef CONFIG_HUGETLB_PAGE
  44. HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
  45. #endif
  46. UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
  47. UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
  48. UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
  49. UNEVICTABLE_PGMLOCKED,
  50. UNEVICTABLE_PGMUNLOCKED,
  51. UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
  52. UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
  53. UNEVICTABLE_MLOCKFREED,
  54. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  55. THP_FAULT_ALLOC,
  56. THP_FAULT_FALLBACK,
  57. THP_COLLAPSE_ALLOC,
  58. THP_COLLAPSE_ALLOC_FAILED,
  59. THP_SPLIT,
  60. #endif
  61. NR_VM_EVENT_ITEMS
  62. };
  63. extern int sysctl_stat_interval;
  64. #ifdef CONFIG_VM_EVENT_COUNTERS
  65. /*
  66. * Light weight per cpu counter implementation.
  67. *
  68. * Counters should only be incremented and no critical kernel component
  69. * should rely on the counter values.
  70. *
  71. * Counters are handled completely inline. On many platforms the code
  72. * generated will simply be the increment of a global address.
  73. */
  74. struct vm_event_state {
  75. unsigned long event[NR_VM_EVENT_ITEMS];
  76. };
  77. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  78. static inline void __count_vm_event(enum vm_event_item item)
  79. {
  80. __this_cpu_inc(vm_event_states.event[item]);
  81. }
  82. static inline void count_vm_event(enum vm_event_item item)
  83. {
  84. this_cpu_inc(vm_event_states.event[item]);
  85. }
  86. static inline void __count_vm_events(enum vm_event_item item, long delta)
  87. {
  88. __this_cpu_add(vm_event_states.event[item], delta);
  89. }
  90. static inline void count_vm_events(enum vm_event_item item, long delta)
  91. {
  92. this_cpu_add(vm_event_states.event[item], delta);
  93. }
  94. extern void all_vm_events(unsigned long *);
  95. #ifdef CONFIG_HOTPLUG
  96. extern void vm_events_fold_cpu(int cpu);
  97. #else
  98. static inline void vm_events_fold_cpu(int cpu)
  99. {
  100. }
  101. #endif
  102. #else
  103. /* Disable counters */
  104. static inline void count_vm_event(enum vm_event_item item)
  105. {
  106. }
  107. static inline void count_vm_events(enum vm_event_item item, long delta)
  108. {
  109. }
  110. static inline void __count_vm_event(enum vm_event_item item)
  111. {
  112. }
  113. static inline void __count_vm_events(enum vm_event_item item, long delta)
  114. {
  115. }
  116. static inline void all_vm_events(unsigned long *ret)
  117. {
  118. }
  119. static inline void vm_events_fold_cpu(int cpu)
  120. {
  121. }
  122. #endif /* CONFIG_VM_EVENT_COUNTERS */
  123. #define __count_zone_vm_events(item, zone, delta) \
  124. __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  125. zone_idx(zone), delta)
  126. /*
  127. * Zone based page accounting with per cpu differentials.
  128. */
  129. extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  130. static inline void zone_page_state_add(long x, struct zone *zone,
  131. enum zone_stat_item item)
  132. {
  133. atomic_long_add(x, &zone->vm_stat[item]);
  134. atomic_long_add(x, &vm_stat[item]);
  135. }
  136. static inline unsigned long global_page_state(enum zone_stat_item item)
  137. {
  138. long x = atomic_long_read(&vm_stat[item]);
  139. #ifdef CONFIG_SMP
  140. if (x < 0)
  141. x = 0;
  142. #endif
  143. return x;
  144. }
  145. static inline unsigned long zone_page_state(struct zone *zone,
  146. enum zone_stat_item item)
  147. {
  148. long x = atomic_long_read(&zone->vm_stat[item]);
  149. #ifdef CONFIG_SMP
  150. if (x < 0)
  151. x = 0;
  152. #endif
  153. return x;
  154. }
  155. /*
  156. * More accurate version that also considers the currently pending
  157. * deltas. For that we need to loop over all cpus to find the current
  158. * deltas. There is no synchronization so the result cannot be
  159. * exactly accurate either.
  160. */
  161. static inline unsigned long zone_page_state_snapshot(struct zone *zone,
  162. enum zone_stat_item item)
  163. {
  164. long x = atomic_long_read(&zone->vm_stat[item]);
  165. #ifdef CONFIG_SMP
  166. int cpu;
  167. for_each_online_cpu(cpu)
  168. x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
  169. if (x < 0)
  170. x = 0;
  171. #endif
  172. return x;
  173. }
  174. extern unsigned long global_reclaimable_pages(void);
  175. extern unsigned long zone_reclaimable_pages(struct zone *zone);
  176. #ifdef CONFIG_NUMA
  177. /*
  178. * Determine the per node value of a stat item. This function
  179. * is called frequently in a NUMA machine, so try to be as
  180. * frugal as possible.
  181. */
  182. static inline unsigned long node_page_state(int node,
  183. enum zone_stat_item item)
  184. {
  185. struct zone *zones = NODE_DATA(node)->node_zones;
  186. return
  187. #ifdef CONFIG_ZONE_DMA
  188. zone_page_state(&zones[ZONE_DMA], item) +
  189. #endif
  190. #ifdef CONFIG_ZONE_DMA32
  191. zone_page_state(&zones[ZONE_DMA32], item) +
  192. #endif
  193. #ifdef CONFIG_HIGHMEM
  194. zone_page_state(&zones[ZONE_HIGHMEM], item) +
  195. #endif
  196. zone_page_state(&zones[ZONE_NORMAL], item) +
  197. zone_page_state(&zones[ZONE_MOVABLE], item);
  198. }
  199. extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
  200. #else
  201. #define node_page_state(node, item) global_page_state(item)
  202. #define zone_statistics(_zl, _z, gfp) do { } while (0)
  203. #endif /* CONFIG_NUMA */
  204. #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  205. #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
  206. static inline void zap_zone_vm_stats(struct zone *zone)
  207. {
  208. memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
  209. }
  210. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  211. #ifdef CONFIG_SMP
  212. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  213. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  214. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  215. void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  216. void inc_zone_page_state(struct page *, enum zone_stat_item);
  217. void dec_zone_page_state(struct page *, enum zone_stat_item);
  218. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  219. extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  220. extern void dec_zone_state(struct zone *, enum zone_stat_item);
  221. extern void __dec_zone_state(struct zone *, enum zone_stat_item);
  222. void refresh_cpu_vm_stats(int);
  223. int calculate_pressure_threshold(struct zone *zone);
  224. int calculate_normal_threshold(struct zone *zone);
  225. void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  226. int (*calculate_pressure)(struct zone *));
  227. #else /* CONFIG_SMP */
  228. /*
  229. * We do not maintain differentials in a single processor configuration.
  230. * The functions directly modify the zone and global counters.
  231. */
  232. static inline void __mod_zone_page_state(struct zone *zone,
  233. enum zone_stat_item item, int delta)
  234. {
  235. zone_page_state_add(delta, zone, item);
  236. }
  237. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  238. {
  239. atomic_long_inc(&zone->vm_stat[item]);
  240. atomic_long_inc(&vm_stat[item]);
  241. }
  242. static inline void __inc_zone_page_state(struct page *page,
  243. enum zone_stat_item item)
  244. {
  245. __inc_zone_state(page_zone(page), item);
  246. }
  247. static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  248. {
  249. atomic_long_dec(&zone->vm_stat[item]);
  250. atomic_long_dec(&vm_stat[item]);
  251. }
  252. static inline void __dec_zone_page_state(struct page *page,
  253. enum zone_stat_item item)
  254. {
  255. __dec_zone_state(page_zone(page), item);
  256. }
  257. /*
  258. * We only use atomic operations to update counters. So there is no need to
  259. * disable interrupts.
  260. */
  261. #define inc_zone_page_state __inc_zone_page_state
  262. #define dec_zone_page_state __dec_zone_page_state
  263. #define mod_zone_page_state __mod_zone_page_state
  264. #define set_pgdat_percpu_threshold(pgdat, callback) { }
  265. static inline void refresh_cpu_vm_stats(int cpu) { }
  266. #endif /* CONFIG_SMP */
  267. extern const char * const vmstat_text[];
  268. #endif /* _LINUX_VMSTAT_H */