vmstat.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. #ifndef _LINUX_VMSTAT_H
  2. #define _LINUX_VMSTAT_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. /*
  6. * Global page accounting. One instance per CPU. Only unsigned longs are
  7. * allowed.
  8. *
  9. * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
  10. * any time safely (which protects the instance from modification by
  11. * interrupt.
  12. * - The __xxx_page_state variants can be used safely when interrupts are
  13. * disabled.
  14. * - The __xxx_page_state variants can be used if the field is only
  15. * modified from process context and protected from preemption, or only
  16. * modified from interrupt context. In this case, the field should be
  17. * commented here.
  18. */
  19. struct page_state {
  20. unsigned long nr_dirty; /* Dirty writeable pages */
  21. unsigned long nr_writeback; /* Pages under writeback */
  22. unsigned long nr_unstable; /* NFS unstable pages */
  23. unsigned long nr_page_table_pages;/* Pages used for pagetables */
  24. unsigned long nr_mapped; /* mapped into pagetables.
  25. * only modified from process context */
  26. unsigned long nr_slab; /* In slab */
  27. #define GET_PAGE_STATE_LAST nr_slab
  28. /*
  29. * The below are zeroed by get_page_state(). Use get_full_page_state()
  30. * to add up all these.
  31. */
  32. unsigned long pgpgin; /* Disk reads */
  33. unsigned long pgpgout; /* Disk writes */
  34. unsigned long pswpin; /* swap reads */
  35. unsigned long pswpout; /* swap writes */
  36. unsigned long pgalloc_high; /* page allocations */
  37. unsigned long pgalloc_normal;
  38. unsigned long pgalloc_dma32;
  39. unsigned long pgalloc_dma;
  40. unsigned long pgfree; /* page freeings */
  41. unsigned long pgactivate; /* pages moved inactive->active */
  42. unsigned long pgdeactivate; /* pages moved active->inactive */
  43. unsigned long pgfault; /* faults (major+minor) */
  44. unsigned long pgmajfault; /* faults (major only) */
  45. unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
  46. unsigned long pgrefill_normal;
  47. unsigned long pgrefill_dma32;
  48. unsigned long pgrefill_dma;
  49. unsigned long pgsteal_high; /* total highmem pages reclaimed */
  50. unsigned long pgsteal_normal;
  51. unsigned long pgsteal_dma32;
  52. unsigned long pgsteal_dma;
  53. unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
  54. unsigned long pgscan_kswapd_normal;
  55. unsigned long pgscan_kswapd_dma32;
  56. unsigned long pgscan_kswapd_dma;
  57. unsigned long pgscan_direct_high;/* total highmem pages scanned */
  58. unsigned long pgscan_direct_normal;
  59. unsigned long pgscan_direct_dma32;
  60. unsigned long pgscan_direct_dma;
  61. unsigned long pginodesteal; /* pages reclaimed via inode freeing */
  62. unsigned long slabs_scanned; /* slab objects scanned */
  63. unsigned long kswapd_steal; /* pages reclaimed by kswapd */
  64. unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
  65. unsigned long pageoutrun; /* kswapd's calls to page reclaim */
  66. unsigned long allocstall; /* direct reclaim calls */
  67. unsigned long pgrotated; /* pages rotated to tail of the LRU */
  68. unsigned long nr_bounce; /* pages for bounce buffers */
  69. };
  70. extern void get_page_state(struct page_state *ret);
  71. extern void get_page_state_node(struct page_state *ret, int node);
  72. extern void get_full_page_state(struct page_state *ret);
  73. extern unsigned long read_page_state_offset(unsigned long offset);
  74. extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
  75. extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
  76. #define read_page_state(member) \
  77. read_page_state_offset(offsetof(struct page_state, member))
  78. #define mod_page_state(member, delta) \
  79. mod_page_state_offset(offsetof(struct page_state, member), (delta))
  80. #define __mod_page_state(member, delta) \
  81. __mod_page_state_offset(offsetof(struct page_state, member), (delta))
  82. #define inc_page_state(member) mod_page_state(member, 1UL)
  83. #define dec_page_state(member) mod_page_state(member, 0UL - 1)
  84. #define add_page_state(member,delta) mod_page_state(member, (delta))
  85. #define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
  86. #define __inc_page_state(member) __mod_page_state(member, 1UL)
  87. #define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
  88. #define __add_page_state(member,delta) __mod_page_state(member, (delta))
  89. #define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
  90. #define page_state(member) (*__page_state(offsetof(struct page_state, member)))
  91. #define state_zone_offset(zone, member) \
  92. ({ \
  93. unsigned offset; \
  94. if (is_highmem(zone)) \
  95. offset = offsetof(struct page_state, member##_high); \
  96. else if (is_normal(zone)) \
  97. offset = offsetof(struct page_state, member##_normal); \
  98. else if (is_dma32(zone)) \
  99. offset = offsetof(struct page_state, member##_dma32); \
  100. else \
  101. offset = offsetof(struct page_state, member##_dma); \
  102. offset; \
  103. })
  104. #define __mod_page_state_zone(zone, member, delta) \
  105. do { \
  106. __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
  107. } while (0)
  108. #define mod_page_state_zone(zone, member, delta) \
  109. do { \
  110. mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
  111. } while (0)
  112. DECLARE_PER_CPU(struct page_state, page_states);
  113. #endif /* _LINUX_VMSTAT_H */