vmstat.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. #ifndef _LINUX_VMSTAT_H
  2. #define _LINUX_VMSTAT_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. #include <linux/config.h>
  6. #include <linux/mmzone.h>
  7. #include <asm/atomic.h>
  8. #ifdef CONFIG_VM_EVENT_COUNTERS
  9. /*
  10. * Light weight per cpu counter implementation.
  11. *
  12. * Counters should only be incremented and no critical kernel component
  13. * should rely on the counter values.
  14. *
  15. * Counters are handled completely inline. On many platforms the code
  16. * generated will simply be the increment of a global address.
  17. */
  18. #define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH
  19. enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
  20. FOR_ALL_ZONES(PGALLOC),
  21. PGFREE, PGACTIVATE, PGDEACTIVATE,
  22. PGFAULT, PGMAJFAULT,
  23. FOR_ALL_ZONES(PGREFILL),
  24. FOR_ALL_ZONES(PGSTEAL),
  25. FOR_ALL_ZONES(PGSCAN_KSWAPD),
  26. FOR_ALL_ZONES(PGSCAN_DIRECT),
  27. PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
  28. PAGEOUTRUN, ALLOCSTALL, PGROTATED,
  29. NR_VM_EVENT_ITEMS
  30. };
  31. struct vm_event_state {
  32. unsigned long event[NR_VM_EVENT_ITEMS];
  33. };
  34. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  35. static inline void __count_vm_event(enum vm_event_item item)
  36. {
  37. __get_cpu_var(vm_event_states).event[item]++;
  38. }
  39. static inline void count_vm_event(enum vm_event_item item)
  40. {
  41. get_cpu_var(vm_event_states).event[item]++;
  42. put_cpu();
  43. }
  44. static inline void __count_vm_events(enum vm_event_item item, long delta)
  45. {
  46. __get_cpu_var(vm_event_states).event[item] += delta;
  47. }
  48. static inline void count_vm_events(enum vm_event_item item, long delta)
  49. {
  50. get_cpu_var(vm_event_states).event[item] += delta;
  51. put_cpu();
  52. }
  53. extern void all_vm_events(unsigned long *);
  54. extern void vm_events_fold_cpu(int cpu);
  55. #else
  56. /* Disable counters */
  57. #define get_cpu_vm_events(e) 0L
  58. #define count_vm_event(e) do { } while (0)
  59. #define count_vm_events(e,d) do { } while (0)
  60. #define __count_vm_event(e) do { } while (0)
  61. #define __count_vm_events(e,d) do { } while (0)
  62. #define vm_events_fold_cpu(x) do { } while (0)
  63. #endif /* CONFIG_VM_EVENT_COUNTERS */
  64. #define __count_zone_vm_events(item, zone, delta) \
  65. __count_vm_events(item##_DMA + zone_idx(zone), delta)
  66. /*
  67. * Zone based page accounting with per cpu differentials.
  68. */
  69. extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  70. static inline void zone_page_state_add(long x, struct zone *zone,
  71. enum zone_stat_item item)
  72. {
  73. atomic_long_add(x, &zone->vm_stat[item]);
  74. atomic_long_add(x, &vm_stat[item]);
  75. }
  76. static inline unsigned long global_page_state(enum zone_stat_item item)
  77. {
  78. long x = atomic_long_read(&vm_stat[item]);
  79. #ifdef CONFIG_SMP
  80. if (x < 0)
  81. x = 0;
  82. #endif
  83. return x;
  84. }
  85. static inline unsigned long zone_page_state(struct zone *zone,
  86. enum zone_stat_item item)
  87. {
  88. long x = atomic_long_read(&zone->vm_stat[item]);
  89. #ifdef CONFIG_SMP
  90. if (x < 0)
  91. x = 0;
  92. #endif
  93. return x;
  94. }
  95. #ifdef CONFIG_NUMA
  96. /*
  97. * Determine the per node value of a stat item. This function
  98. * is called frequently in a NUMA machine, so try to be as
  99. * frugal as possible.
  100. */
  101. static inline unsigned long node_page_state(int node,
  102. enum zone_stat_item item)
  103. {
  104. struct zone *zones = NODE_DATA(node)->node_zones;
  105. return
  106. #ifndef CONFIG_DMA_IS_NORMAL
  107. #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
  108. zone_page_state(&zones[ZONE_DMA32], item) +
  109. #endif
  110. zone_page_state(&zones[ZONE_NORMAL], item) +
  111. #endif
  112. #ifdef CONFIG_HIGHMEM
  113. zone_page_state(&zones[ZONE_HIGHMEM], item) +
  114. #endif
  115. zone_page_state(&zones[ZONE_DMA], item);
  116. }
  117. extern void zone_statistics(struct zonelist *, struct zone *);
  118. #else
  119. #define node_page_state(node, item) global_page_state(item)
  120. #define zone_statistics(_zl,_z) do { } while (0)
  121. #endif /* CONFIG_NUMA */
  122. #define __add_zone_page_state(__z, __i, __d) \
  123. __mod_zone_page_state(__z, __i, __d)
  124. #define __sub_zone_page_state(__z, __i, __d) \
  125. __mod_zone_page_state(__z, __i,-(__d))
  126. #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  127. #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
  128. static inline void zap_zone_vm_stats(struct zone *zone)
  129. {
  130. memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
  131. }
  132. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  133. #ifdef CONFIG_SMP
  134. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  135. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  136. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  137. void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  138. void inc_zone_page_state(struct page *, enum zone_stat_item);
  139. void dec_zone_page_state(struct page *, enum zone_stat_item);
  140. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  141. void refresh_cpu_vm_stats(int);
  142. void refresh_vm_stats(void);
  143. #else /* CONFIG_SMP */
  144. /*
  145. * We do not maintain differentials in a single processor configuration.
  146. * The functions directly modify the zone and global counters.
  147. */
  148. static inline void __mod_zone_page_state(struct zone *zone,
  149. enum zone_stat_item item, int delta)
  150. {
  151. zone_page_state_add(delta, zone, item);
  152. }
  153. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  154. {
  155. atomic_long_inc(&zone->vm_stat[item]);
  156. atomic_long_inc(&vm_stat[item]);
  157. }
  158. static inline void __inc_zone_page_state(struct page *page,
  159. enum zone_stat_item item)
  160. {
  161. __inc_zone_state(page_zone(page), item);
  162. }
  163. static inline void __dec_zone_page_state(struct page *page,
  164. enum zone_stat_item item)
  165. {
  166. atomic_long_dec(&page_zone(page)->vm_stat[item]);
  167. atomic_long_dec(&vm_stat[item]);
  168. }
  169. /*
  170. * We only use atomic operations to update counters. So there is no need to
  171. * disable interrupts.
  172. */
  173. #define inc_zone_page_state __inc_zone_page_state
  174. #define dec_zone_page_state __dec_zone_page_state
  175. #define mod_zone_page_state __mod_zone_page_state
  176. static inline void refresh_cpu_vm_stats(int cpu) { }
  177. static inline void refresh_vm_stats(void) { }
  178. #endif
  179. #endif /* _LINUX_VMSTAT_H */