stat.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. #include <linux/cpumask.h>
  2. #include <linux/fs.h>
  3. #include <linux/init.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/kernel_stat.h>
  6. #include <linux/proc_fs.h>
  7. #include <linux/sched.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/slab.h>
  10. #include <linux/time.h>
  11. #include <linux/irqnr.h>
  12. #include <asm/cputime.h>
  13. #include <linux/tick.h>
  14. #ifndef arch_irq_stat_cpu
  15. #define arch_irq_stat_cpu(cpu) 0
  16. #endif
  17. #ifndef arch_irq_stat
  18. #define arch_irq_stat() 0
  19. #endif
  20. #ifdef arch_idle_time
  21. static cputime64_t get_idle_time(int cpu)
  22. {
  23. cputime64_t idle;
  24. idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
  25. if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
  26. idle += arch_idle_time(cpu);
  27. return idle;
  28. }
  29. static cputime64_t get_iowait_time(int cpu)
  30. {
  31. cputime64_t iowait;
  32. iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
  33. if (cpu_online(cpu) && nr_iowait_cpu(cpu))
  34. iowait += arch_idle_time(cpu);
  35. return iowait;
  36. }
  37. #else
  38. static u64 get_idle_time(int cpu)
  39. {
  40. u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
  41. if (idle_time == -1ULL)
  42. /* !NO_HZ so we can rely on cpustat.idle */
  43. idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
  44. else
  45. idle = usecs_to_cputime64(idle_time);
  46. return idle;
  47. }
  48. static u64 get_iowait_time(int cpu)
  49. {
  50. u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
  51. if (iowait_time == -1ULL)
  52. /* !NO_HZ so we can rely on cpustat.iowait */
  53. iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
  54. else
  55. iowait = usecs_to_cputime64(iowait_time);
  56. return iowait;
  57. }
  58. #endif
  59. static int show_stat(struct seq_file *p, void *v)
  60. {
  61. int i, j;
  62. unsigned long jif;
  63. u64 user, nice, system, idle, iowait, irq, softirq, steal;
  64. u64 guest, guest_nice;
  65. u64 sum = 0;
  66. u64 sum_softirq = 0;
  67. unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
  68. struct timespec boottime;
  69. user = nice = system = idle = iowait =
  70. irq = softirq = steal = 0;
  71. guest = guest_nice = 0;
  72. getboottime(&boottime);
  73. jif = boottime.tv_sec;
  74. for_each_possible_cpu(i) {
  75. user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
  76. nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
  77. system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
  78. idle += get_idle_time(i);
  79. iowait += get_iowait_time(i);
  80. irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
  81. softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
  82. steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
  83. guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
  84. guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
  85. sum += kstat_cpu_irqs_sum(i);
  86. sum += arch_irq_stat_cpu(i);
  87. for (j = 0; j < NR_SOFTIRQS; j++) {
  88. unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
  89. per_softirq_sums[j] += softirq_stat;
  90. sum_softirq += softirq_stat;
  91. }
  92. }
  93. sum += arch_irq_stat();
  94. seq_puts(p, "cpu ");
  95. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
  96. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
  97. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
  98. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
  99. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
  100. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
  101. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
  102. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
  103. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
  104. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
  105. seq_putc(p, '\n');
  106. for_each_online_cpu(i) {
  107. /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
  108. user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
  109. nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
  110. system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
  111. idle = get_idle_time(i);
  112. iowait = get_iowait_time(i);
  113. irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
  114. softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
  115. steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
  116. guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
  117. guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
  118. seq_printf(p, "cpu%d", i);
  119. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
  120. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
  121. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
  122. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
  123. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
  124. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
  125. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
  126. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
  127. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
  128. seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
  129. seq_putc(p, '\n');
  130. }
  131. seq_printf(p, "intr %llu", (unsigned long long)sum);
  132. /* sum again ? it could be updated? */
  133. for_each_irq_nr(j)
  134. seq_put_decimal_ull(p, ' ', kstat_irqs(j));
  135. seq_printf(p,
  136. "\nctxt %llu\n"
  137. "btime %lu\n"
  138. "processes %lu\n"
  139. "procs_running %lu\n"
  140. "procs_blocked %lu\n",
  141. nr_context_switches(),
  142. (unsigned long)jif,
  143. total_forks,
  144. nr_running(),
  145. nr_iowait());
  146. seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
  147. for (i = 0; i < NR_SOFTIRQS; i++)
  148. seq_put_decimal_ull(p, ' ', per_softirq_sums[i]);
  149. seq_putc(p, '\n');
  150. return 0;
  151. }
  152. static int stat_open(struct inode *inode, struct file *file)
  153. {
  154. unsigned size = 1024 + 128 * num_possible_cpus();
  155. char *buf;
  156. struct seq_file *m;
  157. int res;
  158. /* minimum size to display an interrupt count : 2 bytes */
  159. size += 2 * nr_irqs;
  160. /* don't ask for more than the kmalloc() max size */
  161. if (size > KMALLOC_MAX_SIZE)
  162. size = KMALLOC_MAX_SIZE;
  163. buf = kmalloc(size, GFP_KERNEL);
  164. if (!buf)
  165. return -ENOMEM;
  166. res = single_open(file, show_stat, NULL);
  167. if (!res) {
  168. m = file->private_data;
  169. m->buf = buf;
  170. m->size = ksize(buf);
  171. } else
  172. kfree(buf);
  173. return res;
  174. }
  175. static const struct file_operations proc_stat_operations = {
  176. .open = stat_open,
  177. .read = seq_read,
  178. .llseek = seq_lseek,
  179. .release = single_release,
  180. };
  181. static int __init proc_stat_init(void)
  182. {
  183. proc_create("stat", 0, NULL, &proc_stat_operations);
  184. return 0;
  185. }
  186. module_init(proc_stat_init);