vmstat.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. #ifndef _LINUX_VMSTAT_H
  2. #define _LINUX_VMSTAT_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. #include <linux/mm.h>
  6. #include <linux/mmzone.h>
  7. #include <linux/vm_event_item.h>
  8. #include <linux/atomic.h>
  9. extern int sysctl_stat_interval;
  10. #ifdef CONFIG_VM_EVENT_COUNTERS
  11. /*
  12. * Light weight per cpu counter implementation.
  13. *
  14. * Counters should only be incremented and no critical kernel component
  15. * should rely on the counter values.
  16. *
  17. * Counters are handled completely inline. On many platforms the code
  18. * generated will simply be the increment of a global address.
  19. */
  20. struct vm_event_state {
  21. unsigned long event[NR_VM_EVENT_ITEMS];
  22. };
  23. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  24. static inline void __count_vm_event(enum vm_event_item item)
  25. {
  26. __this_cpu_inc(vm_event_states.event[item]);
  27. }
  28. static inline void count_vm_event(enum vm_event_item item)
  29. {
  30. this_cpu_inc(vm_event_states.event[item]);
  31. }
  32. static inline void __count_vm_events(enum vm_event_item item, long delta)
  33. {
  34. __this_cpu_add(vm_event_states.event[item], delta);
  35. }
  36. static inline void count_vm_events(enum vm_event_item item, long delta)
  37. {
  38. this_cpu_add(vm_event_states.event[item], delta);
  39. }
  40. extern void all_vm_events(unsigned long *);
  41. extern void vm_events_fold_cpu(int cpu);
  42. #else
  43. /* Disable counters */
  44. static inline void count_vm_event(enum vm_event_item item)
  45. {
  46. }
  47. static inline void count_vm_events(enum vm_event_item item, long delta)
  48. {
  49. }
  50. static inline void __count_vm_event(enum vm_event_item item)
  51. {
  52. }
  53. static inline void __count_vm_events(enum vm_event_item item, long delta)
  54. {
  55. }
  56. static inline void all_vm_events(unsigned long *ret)
  57. {
  58. }
  59. static inline void vm_events_fold_cpu(int cpu)
  60. {
  61. }
  62. #endif /* CONFIG_VM_EVENT_COUNTERS */
  63. #ifdef CONFIG_NUMA_BALANCING
  64. #define count_vm_numa_event(x) count_vm_event(x)
  65. #define count_vm_numa_events(x, y) count_vm_events(x, y)
  66. #else
  67. #define count_vm_numa_event(x) do {} while (0)
  68. #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
  69. #endif /* CONFIG_NUMA_BALANCING */
  70. #define __count_zone_vm_events(item, zone, delta) \
  71. __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  72. zone_idx(zone), delta)
  73. /*
  74. * Zone based page accounting with per cpu differentials.
  75. */
  76. extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  77. static inline void zone_page_state_add(long x, struct zone *zone,
  78. enum zone_stat_item item)
  79. {
  80. atomic_long_add(x, &zone->vm_stat[item]);
  81. atomic_long_add(x, &vm_stat[item]);
  82. }
  83. static inline unsigned long global_page_state(enum zone_stat_item item)
  84. {
  85. long x = atomic_long_read(&vm_stat[item]);
  86. #ifdef CONFIG_SMP
  87. if (x < 0)
  88. x = 0;
  89. #endif
  90. return x;
  91. }
  92. static inline unsigned long zone_page_state(struct zone *zone,
  93. enum zone_stat_item item)
  94. {
  95. long x = atomic_long_read(&zone->vm_stat[item]);
  96. #ifdef CONFIG_SMP
  97. if (x < 0)
  98. x = 0;
  99. #endif
  100. return x;
  101. }
  102. /*
  103. * More accurate version that also considers the currently pending
  104. * deltas. For that we need to loop over all cpus to find the current
  105. * deltas. There is no synchronization so the result cannot be
  106. * exactly accurate either.
  107. */
  108. static inline unsigned long zone_page_state_snapshot(struct zone *zone,
  109. enum zone_stat_item item)
  110. {
  111. long x = atomic_long_read(&zone->vm_stat[item]);
  112. #ifdef CONFIG_SMP
  113. int cpu;
  114. for_each_online_cpu(cpu)
  115. x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
  116. if (x < 0)
  117. x = 0;
  118. #endif
  119. return x;
  120. }
  121. extern unsigned long global_reclaimable_pages(void);
  122. extern unsigned long zone_reclaimable_pages(struct zone *zone);
  123. #ifdef CONFIG_NUMA
  124. /*
  125. * Determine the per node value of a stat item. This function
  126. * is called frequently in a NUMA machine, so try to be as
  127. * frugal as possible.
  128. */
  129. static inline unsigned long node_page_state(int node,
  130. enum zone_stat_item item)
  131. {
  132. struct zone *zones = NODE_DATA(node)->node_zones;
  133. return
  134. #ifdef CONFIG_ZONE_DMA
  135. zone_page_state(&zones[ZONE_DMA], item) +
  136. #endif
  137. #ifdef CONFIG_ZONE_DMA32
  138. zone_page_state(&zones[ZONE_DMA32], item) +
  139. #endif
  140. #ifdef CONFIG_HIGHMEM
  141. zone_page_state(&zones[ZONE_HIGHMEM], item) +
  142. #endif
  143. zone_page_state(&zones[ZONE_NORMAL], item) +
  144. zone_page_state(&zones[ZONE_MOVABLE], item);
  145. }
  146. extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
  147. #else
  148. #define node_page_state(node, item) global_page_state(item)
  149. #define zone_statistics(_zl, _z, gfp) do { } while (0)
  150. #endif /* CONFIG_NUMA */
  151. #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  152. #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
  153. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  154. #ifdef CONFIG_SMP
  155. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  156. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  157. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  158. void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  159. void inc_zone_page_state(struct page *, enum zone_stat_item);
  160. void dec_zone_page_state(struct page *, enum zone_stat_item);
  161. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  162. extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  163. extern void dec_zone_state(struct zone *, enum zone_stat_item);
  164. extern void __dec_zone_state(struct zone *, enum zone_stat_item);
  165. void cpu_vm_stats_fold(int cpu);
  166. void refresh_zone_stat_thresholds(void);
  167. void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
  168. int calculate_pressure_threshold(struct zone *zone);
  169. int calculate_normal_threshold(struct zone *zone);
  170. void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  171. int (*calculate_pressure)(struct zone *));
  172. #else /* CONFIG_SMP */
  173. /*
  174. * We do not maintain differentials in a single processor configuration.
  175. * The functions directly modify the zone and global counters.
  176. */
  177. static inline void __mod_zone_page_state(struct zone *zone,
  178. enum zone_stat_item item, int delta)
  179. {
  180. zone_page_state_add(delta, zone, item);
  181. }
  182. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  183. {
  184. atomic_long_inc(&zone->vm_stat[item]);
  185. atomic_long_inc(&vm_stat[item]);
  186. }
  187. static inline void __inc_zone_page_state(struct page *page,
  188. enum zone_stat_item item)
  189. {
  190. __inc_zone_state(page_zone(page), item);
  191. }
  192. static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  193. {
  194. atomic_long_dec(&zone->vm_stat[item]);
  195. atomic_long_dec(&vm_stat[item]);
  196. }
  197. static inline void __dec_zone_page_state(struct page *page,
  198. enum zone_stat_item item)
  199. {
  200. __dec_zone_state(page_zone(page), item);
  201. }
  202. /*
  203. * We only use atomic operations to update counters. So there is no need to
  204. * disable interrupts.
  205. */
  206. #define inc_zone_page_state __inc_zone_page_state
  207. #define dec_zone_page_state __dec_zone_page_state
  208. #define mod_zone_page_state __mod_zone_page_state
  209. #define set_pgdat_percpu_threshold(pgdat, callback) { }
  210. static inline void refresh_cpu_vm_stats(int cpu) { }
  211. static inline void refresh_zone_stat_thresholds(void) { }
  212. static inline void cpu_vm_stats_fold(int cpu) { }
  213. static inline void drain_zonestat(struct zone *zone,
  214. struct per_cpu_pageset *pset) { }
  215. #endif /* CONFIG_SMP */
  216. static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
  217. int migratetype)
  218. {
  219. __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
  220. if (is_migrate_cma(migratetype))
  221. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
  222. }
  223. extern const char * const vmstat_text[];
  224. #endif /* _LINUX_VMSTAT_H */