vmstat.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. #ifndef _LINUX_VMSTAT_H
  2. #define _LINUX_VMSTAT_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. #include <linux/mm.h>
  6. #include <linux/mmzone.h>
  7. #include <asm/atomic.h>
  8. #ifdef CONFIG_ZONE_DMA
  9. #define DMA_ZONE(xx) xx##_DMA,
  10. #else
  11. #define DMA_ZONE(xx)
  12. #endif
  13. #ifdef CONFIG_ZONE_DMA32
  14. #define DMA32_ZONE(xx) xx##_DMA32,
  15. #else
  16. #define DMA32_ZONE(xx)
  17. #endif
  18. #ifdef CONFIG_HIGHMEM
  19. #define HIGHMEM_ZONE(xx) , xx##_HIGH
  20. #else
  21. #define HIGHMEM_ZONE(xx)
  22. #endif
  23. #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
  24. enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
  25. FOR_ALL_ZONES(PGALLOC),
  26. PGFREE, PGACTIVATE, PGDEACTIVATE,
  27. PGFAULT, PGMAJFAULT,
  28. FOR_ALL_ZONES(PGREFILL),
  29. FOR_ALL_ZONES(PGSTEAL),
  30. FOR_ALL_ZONES(PGSCAN_KSWAPD),
  31. FOR_ALL_ZONES(PGSCAN_DIRECT),
  32. #ifdef CONFIG_NUMA
  33. PGSCAN_ZONE_RECLAIM_FAILED,
  34. #endif
  35. PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
  36. KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
  37. KSWAPD_SKIP_CONGESTION_WAIT,
  38. PAGEOUTRUN, ALLOCSTALL, PGROTATED,
  39. #ifdef CONFIG_COMPACTION
  40. COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
  41. #endif
  42. #ifdef CONFIG_HUGETLB_PAGE
  43. HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
  44. #endif
  45. UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
  46. UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
  47. UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
  48. UNEVICTABLE_PGMLOCKED,
  49. UNEVICTABLE_PGMUNLOCKED,
  50. UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
  51. UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
  52. UNEVICTABLE_MLOCKFREED,
  53. NR_VM_EVENT_ITEMS
  54. };
  55. extern int sysctl_stat_interval;
  56. #ifdef CONFIG_VM_EVENT_COUNTERS
  57. /*
  58. * Light weight per cpu counter implementation.
  59. *
  60. * Counters should only be incremented and no critical kernel component
  61. * should rely on the counter values.
  62. *
  63. * Counters are handled completely inline. On many platforms the code
  64. * generated will simply be the increment of a global address.
  65. */
  66. struct vm_event_state {
  67. unsigned long event[NR_VM_EVENT_ITEMS];
  68. };
  69. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  70. static inline void __count_vm_event(enum vm_event_item item)
  71. {
  72. __this_cpu_inc(vm_event_states.event[item]);
  73. }
  74. static inline void count_vm_event(enum vm_event_item item)
  75. {
  76. this_cpu_inc(vm_event_states.event[item]);
  77. }
  78. static inline void __count_vm_events(enum vm_event_item item, long delta)
  79. {
  80. __this_cpu_add(vm_event_states.event[item], delta);
  81. }
  82. static inline void count_vm_events(enum vm_event_item item, long delta)
  83. {
  84. this_cpu_add(vm_event_states.event[item], delta);
  85. }
  86. extern void all_vm_events(unsigned long *);
  87. #ifdef CONFIG_HOTPLUG
  88. extern void vm_events_fold_cpu(int cpu);
  89. #else
  90. static inline void vm_events_fold_cpu(int cpu)
  91. {
  92. }
  93. #endif
  94. #else
  95. /* Disable counters */
  96. static inline void count_vm_event(enum vm_event_item item)
  97. {
  98. }
  99. static inline void count_vm_events(enum vm_event_item item, long delta)
  100. {
  101. }
  102. static inline void __count_vm_event(enum vm_event_item item)
  103. {
  104. }
  105. static inline void __count_vm_events(enum vm_event_item item, long delta)
  106. {
  107. }
  108. static inline void all_vm_events(unsigned long *ret)
  109. {
  110. }
  111. static inline void vm_events_fold_cpu(int cpu)
  112. {
  113. }
  114. #endif /* CONFIG_VM_EVENT_COUNTERS */
  115. #define __count_zone_vm_events(item, zone, delta) \
  116. __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  117. zone_idx(zone), delta)
  118. /*
  119. * Zone based page accounting with per cpu differentials.
  120. */
  121. extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  122. static inline void zone_page_state_add(long x, struct zone *zone,
  123. enum zone_stat_item item)
  124. {
  125. atomic_long_add(x, &zone->vm_stat[item]);
  126. atomic_long_add(x, &vm_stat[item]);
  127. }
  128. static inline unsigned long global_page_state(enum zone_stat_item item)
  129. {
  130. long x = atomic_long_read(&vm_stat[item]);
  131. #ifdef CONFIG_SMP
  132. if (x < 0)
  133. x = 0;
  134. #endif
  135. return x;
  136. }
  137. static inline unsigned long zone_page_state(struct zone *zone,
  138. enum zone_stat_item item)
  139. {
  140. long x = atomic_long_read(&zone->vm_stat[item]);
  141. #ifdef CONFIG_SMP
  142. if (x < 0)
  143. x = 0;
  144. #endif
  145. return x;
  146. }
  147. extern unsigned long global_reclaimable_pages(void);
  148. extern unsigned long zone_reclaimable_pages(struct zone *zone);
  149. #ifdef CONFIG_NUMA
  150. /*
  151. * Determine the per node value of a stat item. This function
  152. * is called frequently in a NUMA machine, so try to be as
  153. * frugal as possible.
  154. */
  155. static inline unsigned long node_page_state(int node,
  156. enum zone_stat_item item)
  157. {
  158. struct zone *zones = NODE_DATA(node)->node_zones;
  159. return
  160. #ifdef CONFIG_ZONE_DMA
  161. zone_page_state(&zones[ZONE_DMA], item) +
  162. #endif
  163. #ifdef CONFIG_ZONE_DMA32
  164. zone_page_state(&zones[ZONE_DMA32], item) +
  165. #endif
  166. #ifdef CONFIG_HIGHMEM
  167. zone_page_state(&zones[ZONE_HIGHMEM], item) +
  168. #endif
  169. zone_page_state(&zones[ZONE_NORMAL], item) +
  170. zone_page_state(&zones[ZONE_MOVABLE], item);
  171. }
  172. extern void zone_statistics(struct zone *, struct zone *);
  173. #else
  174. #define node_page_state(node, item) global_page_state(item)
  175. #define zone_statistics(_zl,_z) do { } while (0)
  176. #endif /* CONFIG_NUMA */
  177. #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  178. #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
  179. static inline void zap_zone_vm_stats(struct zone *zone)
  180. {
  181. memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
  182. }
  183. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  184. #ifdef CONFIG_SMP
  185. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  186. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  187. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  188. void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  189. void inc_zone_page_state(struct page *, enum zone_stat_item);
  190. void dec_zone_page_state(struct page *, enum zone_stat_item);
  191. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  192. extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  193. extern void dec_zone_state(struct zone *, enum zone_stat_item);
  194. extern void __dec_zone_state(struct zone *, enum zone_stat_item);
  195. void refresh_cpu_vm_stats(int);
  196. #else /* CONFIG_SMP */
  197. /*
  198. * We do not maintain differentials in a single processor configuration.
  199. * The functions directly modify the zone and global counters.
  200. */
  201. static inline void __mod_zone_page_state(struct zone *zone,
  202. enum zone_stat_item item, int delta)
  203. {
  204. zone_page_state_add(delta, zone, item);
  205. }
  206. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  207. {
  208. atomic_long_inc(&zone->vm_stat[item]);
  209. atomic_long_inc(&vm_stat[item]);
  210. }
  211. static inline void __inc_zone_page_state(struct page *page,
  212. enum zone_stat_item item)
  213. {
  214. __inc_zone_state(page_zone(page), item);
  215. }
  216. static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  217. {
  218. atomic_long_dec(&zone->vm_stat[item]);
  219. atomic_long_dec(&vm_stat[item]);
  220. }
  221. static inline void __dec_zone_page_state(struct page *page,
  222. enum zone_stat_item item)
  223. {
  224. __dec_zone_state(page_zone(page), item);
  225. }
  226. /*
  227. * We only use atomic operations to update counters. So there is no need to
  228. * disable interrupts.
  229. */
  230. #define inc_zone_page_state __inc_zone_page_state
  231. #define dec_zone_page_state __dec_zone_page_state
  232. #define mod_zone_page_state __mod_zone_page_state
  233. static inline void refresh_cpu_vm_stats(int cpu) { }
  234. #endif
  235. #endif /* _LINUX_VMSTAT_H */