vmstat.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. #ifndef _LINUX_VMSTAT_H
  2. #define _LINUX_VMSTAT_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. #include <linux/mm.h>
  6. #include <linux/mmzone.h>
  7. #include <linux/vm_event_item.h>
  8. #include <linux/atomic.h>
  9. extern int sysctl_stat_interval;
  10. #ifdef CONFIG_VM_EVENT_COUNTERS
  11. /*
  12. * Light weight per cpu counter implementation.
  13. *
  14. * Counters should only be incremented and no critical kernel component
  15. * should rely on the counter values.
  16. *
  17. * Counters are handled completely inline. On many platforms the code
  18. * generated will simply be the increment of a global address.
  19. */
  20. struct vm_event_state {
  21. unsigned long event[NR_VM_EVENT_ITEMS];
  22. };
  23. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  24. static inline void __count_vm_event(enum vm_event_item item)
  25. {
  26. __this_cpu_inc(vm_event_states.event[item]);
  27. }
  28. static inline void count_vm_event(enum vm_event_item item)
  29. {
  30. this_cpu_inc(vm_event_states.event[item]);
  31. }
  32. static inline void __count_vm_events(enum vm_event_item item, long delta)
  33. {
  34. __this_cpu_add(vm_event_states.event[item], delta);
  35. }
  36. static inline void count_vm_events(enum vm_event_item item, long delta)
  37. {
  38. this_cpu_add(vm_event_states.event[item], delta);
  39. }
  40. extern void all_vm_events(unsigned long *);
  41. #ifdef CONFIG_HOTPLUG
  42. extern void vm_events_fold_cpu(int cpu);
  43. #else
  44. static inline void vm_events_fold_cpu(int cpu)
  45. {
  46. }
  47. #endif
  48. #else
  49. /* Disable counters */
  50. static inline void count_vm_event(enum vm_event_item item)
  51. {
  52. }
  53. static inline void count_vm_events(enum vm_event_item item, long delta)
  54. {
  55. }
  56. static inline void __count_vm_event(enum vm_event_item item)
  57. {
  58. }
  59. static inline void __count_vm_events(enum vm_event_item item, long delta)
  60. {
  61. }
  62. static inline void all_vm_events(unsigned long *ret)
  63. {
  64. }
  65. static inline void vm_events_fold_cpu(int cpu)
  66. {
  67. }
  68. #endif /* CONFIG_VM_EVENT_COUNTERS */
  69. #ifdef CONFIG_NUMA_BALANCING
  70. #define count_vm_numa_event(x) count_vm_event(x)
  71. #define count_vm_numa_events(x, y) count_vm_events(x, y)
  72. #else
  73. #define count_vm_numa_event(x) do {} while (0)
  74. #define count_vm_numa_events(x, y) do {} while (0)
  75. #endif /* CONFIG_NUMA_BALANCING */
  76. #define __count_zone_vm_events(item, zone, delta) \
  77. __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  78. zone_idx(zone), delta)
  79. /*
  80. * Zone based page accounting with per cpu differentials.
  81. */
  82. extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  83. static inline void zone_page_state_add(long x, struct zone *zone,
  84. enum zone_stat_item item)
  85. {
  86. atomic_long_add(x, &zone->vm_stat[item]);
  87. atomic_long_add(x, &vm_stat[item]);
  88. }
  89. static inline unsigned long global_page_state(enum zone_stat_item item)
  90. {
  91. long x = atomic_long_read(&vm_stat[item]);
  92. #ifdef CONFIG_SMP
  93. if (x < 0)
  94. x = 0;
  95. #endif
  96. return x;
  97. }
  98. static inline unsigned long zone_page_state(struct zone *zone,
  99. enum zone_stat_item item)
  100. {
  101. long x = atomic_long_read(&zone->vm_stat[item]);
  102. #ifdef CONFIG_SMP
  103. if (x < 0)
  104. x = 0;
  105. #endif
  106. return x;
  107. }
  108. /*
  109. * More accurate version that also considers the currently pending
  110. * deltas. For that we need to loop over all cpus to find the current
  111. * deltas. There is no synchronization so the result cannot be
  112. * exactly accurate either.
  113. */
  114. static inline unsigned long zone_page_state_snapshot(struct zone *zone,
  115. enum zone_stat_item item)
  116. {
  117. long x = atomic_long_read(&zone->vm_stat[item]);
  118. #ifdef CONFIG_SMP
  119. int cpu;
  120. for_each_online_cpu(cpu)
  121. x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
  122. if (x < 0)
  123. x = 0;
  124. #endif
  125. return x;
  126. }
  127. extern unsigned long global_reclaimable_pages(void);
  128. extern unsigned long zone_reclaimable_pages(struct zone *zone);
  129. #ifdef CONFIG_NUMA
  130. /*
  131. * Determine the per node value of a stat item. This function
  132. * is called frequently in a NUMA machine, so try to be as
  133. * frugal as possible.
  134. */
  135. static inline unsigned long node_page_state(int node,
  136. enum zone_stat_item item)
  137. {
  138. struct zone *zones = NODE_DATA(node)->node_zones;
  139. return
  140. #ifdef CONFIG_ZONE_DMA
  141. zone_page_state(&zones[ZONE_DMA], item) +
  142. #endif
  143. #ifdef CONFIG_ZONE_DMA32
  144. zone_page_state(&zones[ZONE_DMA32], item) +
  145. #endif
  146. #ifdef CONFIG_HIGHMEM
  147. zone_page_state(&zones[ZONE_HIGHMEM], item) +
  148. #endif
  149. zone_page_state(&zones[ZONE_NORMAL], item) +
  150. zone_page_state(&zones[ZONE_MOVABLE], item);
  151. }
  152. extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
  153. #else
  154. #define node_page_state(node, item) global_page_state(item)
  155. #define zone_statistics(_zl, _z, gfp) do { } while (0)
  156. #endif /* CONFIG_NUMA */
  157. #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  158. #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
  159. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  160. #ifdef CONFIG_SMP
  161. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  162. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  163. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  164. void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  165. void inc_zone_page_state(struct page *, enum zone_stat_item);
  166. void dec_zone_page_state(struct page *, enum zone_stat_item);
  167. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  168. extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  169. extern void dec_zone_state(struct zone *, enum zone_stat_item);
  170. extern void __dec_zone_state(struct zone *, enum zone_stat_item);
  171. void refresh_cpu_vm_stats(int);
  172. void refresh_zone_stat_thresholds(void);
  173. void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
  174. int calculate_pressure_threshold(struct zone *zone);
  175. int calculate_normal_threshold(struct zone *zone);
  176. void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  177. int (*calculate_pressure)(struct zone *));
  178. #else /* CONFIG_SMP */
  179. /*
  180. * We do not maintain differentials in a single processor configuration.
  181. * The functions directly modify the zone and global counters.
  182. */
  183. static inline void __mod_zone_page_state(struct zone *zone,
  184. enum zone_stat_item item, int delta)
  185. {
  186. zone_page_state_add(delta, zone, item);
  187. }
  188. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  189. {
  190. atomic_long_inc(&zone->vm_stat[item]);
  191. atomic_long_inc(&vm_stat[item]);
  192. }
  193. static inline void __inc_zone_page_state(struct page *page,
  194. enum zone_stat_item item)
  195. {
  196. __inc_zone_state(page_zone(page), item);
  197. }
  198. static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  199. {
  200. atomic_long_dec(&zone->vm_stat[item]);
  201. atomic_long_dec(&vm_stat[item]);
  202. }
  203. static inline void __dec_zone_page_state(struct page *page,
  204. enum zone_stat_item item)
  205. {
  206. __dec_zone_state(page_zone(page), item);
  207. }
  208. /*
  209. * We only use atomic operations to update counters. So there is no need to
  210. * disable interrupts.
  211. */
  212. #define inc_zone_page_state __inc_zone_page_state
  213. #define dec_zone_page_state __dec_zone_page_state
  214. #define mod_zone_page_state __mod_zone_page_state
  215. #define set_pgdat_percpu_threshold(pgdat, callback) { }
  216. static inline void refresh_cpu_vm_stats(int cpu) { }
  217. static inline void refresh_zone_stat_thresholds(void) { }
  218. static inline void drain_zonestat(struct zone *zone,
  219. struct per_cpu_pageset *pset) { }
  220. #endif /* CONFIG_SMP */
  221. static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
  222. int migratetype)
  223. {
  224. __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
  225. if (is_migrate_cma(migratetype))
  226. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
  227. }
  228. extern const char * const vmstat_text[];
  229. #endif /* _LINUX_VMSTAT_H */