stat.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #include <linux/cpumask.h>
  2. #include <linux/fs.h>
  3. #include <linux/gfp.h>
  4. #include <linux/init.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/kernel_stat.h>
  7. #include <linux/proc_fs.h>
  8. #include <linux/sched.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/slab.h>
  11. #include <linux/time.h>
  12. #include <linux/irqnr.h>
  13. #include <asm/cputime.h>
  14. #ifndef arch_irq_stat_cpu
  15. #define arch_irq_stat_cpu(cpu) 0
  16. #endif
  17. #ifndef arch_irq_stat
  18. #define arch_irq_stat() 0
  19. #endif
  20. #ifndef arch_idle_time
  21. #define arch_idle_time(cpu) 0
  22. #endif
  23. static int show_stat(struct seq_file *p, void *v)
  24. {
  25. int i, j;
  26. unsigned long jif;
  27. cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
  28. cputime64_t guest;
  29. u64 sum = 0;
  30. u64 sum_softirq = 0;
  31. unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
  32. struct timespec boottime;
  33. unsigned int per_irq_sum;
  34. user = nice = system = idle = iowait =
  35. irq = softirq = steal = cputime64_zero;
  36. guest = cputime64_zero;
  37. getboottime(&boottime);
  38. jif = boottime.tv_sec;
  39. for_each_possible_cpu(i) {
  40. user = cputime64_add(user, kstat_cpu(i).cpustat.user);
  41. nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
  42. system = cputime64_add(system, kstat_cpu(i).cpustat.system);
  43. idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
  44. idle = cputime64_add(idle, arch_idle_time(i));
  45. iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
  46. irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
  47. softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
  48. steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
  49. guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
  50. for_each_irq_nr(j) {
  51. sum += kstat_irqs_cpu(j, i);
  52. }
  53. sum += arch_irq_stat_cpu(i);
  54. for (j = 0; j < NR_SOFTIRQS; j++) {
  55. unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
  56. per_softirq_sums[j] += softirq_stat;
  57. sum_softirq += softirq_stat;
  58. }
  59. }
  60. sum += arch_irq_stat();
  61. seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
  62. (unsigned long long)cputime64_to_clock_t(user),
  63. (unsigned long long)cputime64_to_clock_t(nice),
  64. (unsigned long long)cputime64_to_clock_t(system),
  65. (unsigned long long)cputime64_to_clock_t(idle),
  66. (unsigned long long)cputime64_to_clock_t(iowait),
  67. (unsigned long long)cputime64_to_clock_t(irq),
  68. (unsigned long long)cputime64_to_clock_t(softirq),
  69. (unsigned long long)cputime64_to_clock_t(steal),
  70. (unsigned long long)cputime64_to_clock_t(guest));
  71. for_each_online_cpu(i) {
  72. /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
  73. user = kstat_cpu(i).cpustat.user;
  74. nice = kstat_cpu(i).cpustat.nice;
  75. system = kstat_cpu(i).cpustat.system;
  76. idle = kstat_cpu(i).cpustat.idle;
  77. idle = cputime64_add(idle, arch_idle_time(i));
  78. iowait = kstat_cpu(i).cpustat.iowait;
  79. irq = kstat_cpu(i).cpustat.irq;
  80. softirq = kstat_cpu(i).cpustat.softirq;
  81. steal = kstat_cpu(i).cpustat.steal;
  82. guest = kstat_cpu(i).cpustat.guest;
  83. seq_printf(p,
  84. "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
  85. i,
  86. (unsigned long long)cputime64_to_clock_t(user),
  87. (unsigned long long)cputime64_to_clock_t(nice),
  88. (unsigned long long)cputime64_to_clock_t(system),
  89. (unsigned long long)cputime64_to_clock_t(idle),
  90. (unsigned long long)cputime64_to_clock_t(iowait),
  91. (unsigned long long)cputime64_to_clock_t(irq),
  92. (unsigned long long)cputime64_to_clock_t(softirq),
  93. (unsigned long long)cputime64_to_clock_t(steal),
  94. (unsigned long long)cputime64_to_clock_t(guest));
  95. }
  96. seq_printf(p, "intr %llu", (unsigned long long)sum);
  97. /* sum again ? it could be updated? */
  98. for_each_irq_nr(j) {
  99. per_irq_sum = 0;
  100. for_each_possible_cpu(i)
  101. per_irq_sum += kstat_irqs_cpu(j, i);
  102. seq_printf(p, " %u", per_irq_sum);
  103. }
  104. seq_printf(p,
  105. "\nctxt %llu\n"
  106. "btime %lu\n"
  107. "processes %lu\n"
  108. "procs_running %lu\n"
  109. "procs_blocked %lu\n",
  110. nr_context_switches(),
  111. (unsigned long)jif,
  112. total_forks,
  113. nr_running(),
  114. nr_iowait());
  115. seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
  116. for (i = 0; i < NR_SOFTIRQS; i++)
  117. seq_printf(p, " %u", per_softirq_sums[i]);
  118. seq_printf(p, "\n");
  119. return 0;
  120. }
  121. static int stat_open(struct inode *inode, struct file *file)
  122. {
  123. unsigned size = 4096 * (1 + num_possible_cpus() / 32);
  124. char *buf;
  125. struct seq_file *m;
  126. int res;
  127. /* don't ask for more than the kmalloc() max size, currently 128 KB */
  128. if (size > 128 * 1024)
  129. size = 128 * 1024;
  130. buf = kmalloc(size, GFP_KERNEL);
  131. if (!buf)
  132. return -ENOMEM;
  133. res = single_open(file, show_stat, NULL);
  134. if (!res) {
  135. m = file->private_data;
  136. m->buf = buf;
  137. m->size = size;
  138. } else
  139. kfree(buf);
  140. return res;
  141. }
  142. static const struct file_operations proc_stat_operations = {
  143. .open = stat_open,
  144. .read = seq_read,
  145. .llseek = seq_lseek,
  146. .release = single_release,
  147. };
  148. static int __init proc_stat_init(void)
  149. {
  150. proc_create("stat", 0, NULL, &proc_stat_operations);
  151. return 0;
  152. }
  153. module_init(proc_stat_init);