vmstat.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. #ifndef _LINUX_VMSTAT_H
  2. #define _LINUX_VMSTAT_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. #include <linux/mm.h>
  6. #include <linux/mmzone.h>
  7. #include <asm/atomic.h>
  8. #ifdef CONFIG_ZONE_DMA
  9. #define DMA_ZONE(xx) xx##_DMA,
  10. #else
  11. #define DMA_ZONE(xx)
  12. #endif
  13. #ifdef CONFIG_ZONE_DMA32
  14. #define DMA32_ZONE(xx) xx##_DMA32,
  15. #else
  16. #define DMA32_ZONE(xx)
  17. #endif
  18. #ifdef CONFIG_HIGHMEM
  19. #define HIGHMEM_ZONE(xx) , xx##_HIGH
  20. #else
  21. #define HIGHMEM_ZONE(xx)
  22. #endif
  23. #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
  24. enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
  25. FOR_ALL_ZONES(PGALLOC),
  26. PGFREE, PGACTIVATE, PGDEACTIVATE,
  27. PGFAULT, PGMAJFAULT,
  28. FOR_ALL_ZONES(PGREFILL),
  29. FOR_ALL_ZONES(PGSTEAL),
  30. FOR_ALL_ZONES(PGSCAN_KSWAPD),
  31. FOR_ALL_ZONES(PGSCAN_DIRECT),
  32. PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
  33. PAGEOUTRUN, ALLOCSTALL, PGROTATED,
  34. #ifdef CONFIG_HUGETLB_PAGE
  35. HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
  36. #endif
  37. #ifdef CONFIG_UNEVICTABLE_LRU
  38. UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
  39. UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
  40. UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
  41. UNEVICTABLE_PGMLOCKED,
  42. UNEVICTABLE_PGMUNLOCKED,
  43. UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
  44. UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
  45. #endif
  46. NR_VM_EVENT_ITEMS
  47. };
  48. extern const struct seq_operations fragmentation_op;
  49. extern const struct seq_operations pagetypeinfo_op;
  50. extern const struct seq_operations zoneinfo_op;
  51. extern const struct seq_operations vmstat_op;
  52. extern int sysctl_stat_interval;
  53. #ifdef CONFIG_VM_EVENT_COUNTERS
  54. /*
  55. * Light weight per cpu counter implementation.
  56. *
  57. * Counters should only be incremented and no critical kernel component
  58. * should rely on the counter values.
  59. *
  60. * Counters are handled completely inline. On many platforms the code
  61. * generated will simply be the increment of a global address.
  62. */
  63. struct vm_event_state {
  64. unsigned long event[NR_VM_EVENT_ITEMS];
  65. };
  66. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  67. static inline void __count_vm_event(enum vm_event_item item)
  68. {
  69. __get_cpu_var(vm_event_states).event[item]++;
  70. }
  71. static inline void count_vm_event(enum vm_event_item item)
  72. {
  73. get_cpu_var(vm_event_states).event[item]++;
  74. put_cpu();
  75. }
  76. static inline void __count_vm_events(enum vm_event_item item, long delta)
  77. {
  78. __get_cpu_var(vm_event_states).event[item] += delta;
  79. }
  80. static inline void count_vm_events(enum vm_event_item item, long delta)
  81. {
  82. get_cpu_var(vm_event_states).event[item] += delta;
  83. put_cpu();
  84. }
  85. extern void all_vm_events(unsigned long *);
  86. #ifdef CONFIG_HOTPLUG
  87. extern void vm_events_fold_cpu(int cpu);
  88. #else
  89. static inline void vm_events_fold_cpu(int cpu)
  90. {
  91. }
  92. #endif
  93. #else
  94. /* Disable counters */
  95. static inline void count_vm_event(enum vm_event_item item)
  96. {
  97. }
  98. static inline void count_vm_events(enum vm_event_item item, long delta)
  99. {
  100. }
  101. static inline void __count_vm_event(enum vm_event_item item)
  102. {
  103. }
  104. static inline void __count_vm_events(enum vm_event_item item, long delta)
  105. {
  106. }
  107. static inline void all_vm_events(unsigned long *ret)
  108. {
  109. }
  110. static inline void vm_events_fold_cpu(int cpu)
  111. {
  112. }
  113. #endif /* CONFIG_VM_EVENT_COUNTERS */
  114. #define __count_zone_vm_events(item, zone, delta) \
  115. __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  116. zone_idx(zone), delta)
  117. /*
  118. * Zone based page accounting with per cpu differentials.
  119. */
  120. extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  121. static inline void zone_page_state_add(long x, struct zone *zone,
  122. enum zone_stat_item item)
  123. {
  124. atomic_long_add(x, &zone->vm_stat[item]);
  125. atomic_long_add(x, &vm_stat[item]);
  126. }
  127. static inline unsigned long global_page_state(enum zone_stat_item item)
  128. {
  129. long x = atomic_long_read(&vm_stat[item]);
  130. #ifdef CONFIG_SMP
  131. if (x < 0)
  132. x = 0;
  133. #endif
  134. return x;
  135. }
  136. static inline unsigned long zone_page_state(struct zone *zone,
  137. enum zone_stat_item item)
  138. {
  139. long x = atomic_long_read(&zone->vm_stat[item]);
  140. #ifdef CONFIG_SMP
  141. if (x < 0)
  142. x = 0;
  143. #endif
  144. return x;
  145. }
  146. extern unsigned long global_lru_pages(void);
  147. static inline unsigned long zone_lru_pages(struct zone *zone)
  148. {
  149. return (zone_page_state(zone, NR_ACTIVE_ANON)
  150. + zone_page_state(zone, NR_ACTIVE_FILE)
  151. + zone_page_state(zone, NR_INACTIVE_ANON)
  152. + zone_page_state(zone, NR_INACTIVE_FILE));
  153. }
  154. #ifdef CONFIG_NUMA
  155. /*
  156. * Determine the per node value of a stat item. This function
  157. * is called frequently in a NUMA machine, so try to be as
  158. * frugal as possible.
  159. */
  160. static inline unsigned long node_page_state(int node,
  161. enum zone_stat_item item)
  162. {
  163. struct zone *zones = NODE_DATA(node)->node_zones;
  164. return
  165. #ifdef CONFIG_ZONE_DMA
  166. zone_page_state(&zones[ZONE_DMA], item) +
  167. #endif
  168. #ifdef CONFIG_ZONE_DMA32
  169. zone_page_state(&zones[ZONE_DMA32], item) +
  170. #endif
  171. #ifdef CONFIG_HIGHMEM
  172. zone_page_state(&zones[ZONE_HIGHMEM], item) +
  173. #endif
  174. zone_page_state(&zones[ZONE_NORMAL], item) +
  175. zone_page_state(&zones[ZONE_MOVABLE], item);
  176. }
  177. extern void zone_statistics(struct zone *, struct zone *);
  178. #else
  179. #define node_page_state(node, item) global_page_state(item)
  180. #define zone_statistics(_zl,_z) do { } while (0)
  181. #endif /* CONFIG_NUMA */
  182. #define __add_zone_page_state(__z, __i, __d) \
  183. __mod_zone_page_state(__z, __i, __d)
  184. #define __sub_zone_page_state(__z, __i, __d) \
  185. __mod_zone_page_state(__z, __i,-(__d))
  186. #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  187. #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
  188. static inline void zap_zone_vm_stats(struct zone *zone)
  189. {
  190. memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
  191. }
  192. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  193. #ifdef CONFIG_SMP
  194. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  195. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  196. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  197. void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  198. void inc_zone_page_state(struct page *, enum zone_stat_item);
  199. void dec_zone_page_state(struct page *, enum zone_stat_item);
  200. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  201. extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  202. extern void dec_zone_state(struct zone *, enum zone_stat_item);
  203. extern void __dec_zone_state(struct zone *, enum zone_stat_item);
  204. void refresh_cpu_vm_stats(int);
  205. #else /* CONFIG_SMP */
  206. /*
  207. * We do not maintain differentials in a single processor configuration.
  208. * The functions directly modify the zone and global counters.
  209. */
  210. static inline void __mod_zone_page_state(struct zone *zone,
  211. enum zone_stat_item item, int delta)
  212. {
  213. zone_page_state_add(delta, zone, item);
  214. }
  215. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  216. {
  217. atomic_long_inc(&zone->vm_stat[item]);
  218. atomic_long_inc(&vm_stat[item]);
  219. }
  220. static inline void __inc_zone_page_state(struct page *page,
  221. enum zone_stat_item item)
  222. {
  223. __inc_zone_state(page_zone(page), item);
  224. }
  225. static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  226. {
  227. atomic_long_dec(&zone->vm_stat[item]);
  228. atomic_long_dec(&vm_stat[item]);
  229. }
  230. static inline void __dec_zone_page_state(struct page *page,
  231. enum zone_stat_item item)
  232. {
  233. __dec_zone_state(page_zone(page), item);
  234. }
  235. /*
  236. * We only use atomic operations to update counters. So there is no need to
  237. * disable interrupts.
  238. */
  239. #define inc_zone_page_state __inc_zone_page_state
  240. #define dec_zone_page_state __dec_zone_page_state
  241. #define mod_zone_page_state __mod_zone_page_state
  242. static inline void refresh_cpu_vm_stats(int cpu) { }
  243. #endif
  244. #endif /* _LINUX_VMSTAT_H */