kernel_stat.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. #ifndef _LINUX_KERNEL_STAT_H
  2. #define _LINUX_KERNEL_STAT_H
  3. #include <linux/smp.h>
  4. #include <linux/threads.h>
  5. #include <linux/percpu.h>
  6. #include <linux/cpumask.h>
  7. #include <linux/interrupt.h>
  8. #include <asm/irq.h>
  9. #include <asm/cputime.h>
  10. /*
  11. * 'kernel_stat.h' contains the definitions needed for doing
  12. * some kernel statistics (CPU usage, context switches ...),
  13. * used by rstatd/perfmeter
  14. */
  15. struct cpu_usage_stat {
  16. cputime64_t user;
  17. cputime64_t nice;
  18. cputime64_t system;
  19. cputime64_t softirq;
  20. cputime64_t irq;
  21. cputime64_t idle;
  22. cputime64_t iowait;
  23. cputime64_t steal;
  24. cputime64_t guest;
  25. cputime64_t guest_nice;
  26. };
  27. struct kernel_stat {
  28. struct cpu_usage_stat cpustat;
  29. #ifndef CONFIG_GENERIC_HARDIRQS
  30. unsigned int irqs[NR_IRQS];
  31. #endif
  32. unsigned long irqs_sum;
  33. unsigned int softirqs[NR_SOFTIRQS];
  34. };
  35. DECLARE_PER_CPU(struct kernel_stat, kstat);
  36. #define kstat_cpu(cpu) per_cpu(kstat, cpu)
  37. /* Must have preemption disabled for this to be meaningful. */
  38. #define kstat_this_cpu __get_cpu_var(kstat)
  39. extern unsigned long long nr_context_switches(void);
  40. #ifndef CONFIG_GENERIC_HARDIRQS
  41. struct irq_desc;
  42. static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
  43. struct irq_desc *desc)
  44. {
  45. __this_cpu_inc(kstat.irqs[irq]);
  46. __this_cpu_inc(kstat.irqs_sum);
  47. }
  48. static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
  49. {
  50. return kstat_cpu(cpu).irqs[irq];
  51. }
  52. #else
  53. #include <linux/irq.h>
  54. extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
  55. #define kstat_incr_irqs_this_cpu(irqno, DESC) \
  56. do { \
  57. __this_cpu_inc(*(DESC)->kstat_irqs); \
  58. __this_cpu_inc(kstat.irqs_sum); \
  59. } while (0)
  60. #endif
  61. static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
  62. {
  63. __this_cpu_inc(kstat.softirqs[irq]);
  64. }
  65. static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
  66. {
  67. return kstat_cpu(cpu).softirqs[irq];
  68. }
  69. /*
  70. * Number of interrupts per specific IRQ source, since bootup
  71. */
  72. #ifndef CONFIG_GENERIC_HARDIRQS
  73. static inline unsigned int kstat_irqs(unsigned int irq)
  74. {
  75. unsigned int sum = 0;
  76. int cpu;
  77. for_each_possible_cpu(cpu)
  78. sum += kstat_irqs_cpu(irq, cpu);
  79. return sum;
  80. }
  81. #else
  82. extern unsigned int kstat_irqs(unsigned int irq);
  83. #endif
  84. /*
  85. * Number of interrupts per cpu, since bootup
  86. */
  87. static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
  88. {
  89. return kstat_cpu(cpu).irqs_sum;
  90. }
  91. /*
  92. * Lock/unlock the current runqueue - to extract task statistics:
  93. */
  94. extern unsigned long long task_delta_exec(struct task_struct *);
  95. extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
  96. extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
  97. extern void account_steal_time(cputime_t);
  98. extern void account_idle_time(cputime_t);
  99. extern void account_process_tick(struct task_struct *, int user);
  100. extern void account_steal_ticks(unsigned long ticks);
  101. extern void account_idle_ticks(unsigned long ticks);
  102. #endif /* _LINUX_KERNEL_STAT_H */