proc_misc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. /*
  2. * linux/fs/proc/proc_misc.c
  3. *
  4. * linux/fs/proc/array.c
  5. * Copyright (C) 1992 by Linus Torvalds
  6. * based on ideas by Darren Senn
  7. *
  8. * This used to be the part of array.c. See the rest of history and credits
  9. * there. I took this into a separate file and switched the thing to generic
  10. * proc_file_inode_operations, leaving in array.c only per-process stuff.
  11. * Inumbers allocation made dynamic (via create_proc_entry()). AV, May 1999.
  12. *
  13. * Changes:
  14. * Fulton Green : Encapsulated position metric calculations.
  15. * <kernel@FultonGreen.com>
  16. */
  17. #include <linux/types.h>
  18. #include <linux/errno.h>
  19. #include <linux/time.h>
  20. #include <linux/kernel.h>
  21. #include <linux/kernel_stat.h>
  22. #include <linux/fs.h>
  23. #include <linux/tty.h>
  24. #include <linux/string.h>
  25. #include <linux/mman.h>
  26. #include <linux/quicklist.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/ioport.h>
  29. #include <linux/mm.h>
  30. #include <linux/mmzone.h>
  31. #include <linux/pagemap.h>
  32. #include <linux/irq.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/swap.h>
  35. #include <linux/slab.h>
  36. #include <linux/genhd.h>
  37. #include <linux/smp.h>
  38. #include <linux/signal.h>
  39. #include <linux/module.h>
  40. #include <linux/init.h>
  41. #include <linux/seq_file.h>
  42. #include <linux/times.h>
  43. #include <linux/profile.h>
  44. #include <linux/utsname.h>
  45. #include <linux/blkdev.h>
  46. #include <linux/hugetlb.h>
  47. #include <linux/jiffies.h>
  48. #include <linux/vmalloc.h>
  49. #include <linux/crash_dump.h>
  50. #include <linux/pid_namespace.h>
  51. #include <linux/bootmem.h>
  52. #include <asm/uaccess.h>
  53. #include <asm/pgtable.h>
  54. #include <asm/io.h>
  55. #include <asm/tlb.h>
  56. #include <asm/div64.h>
  57. #include "internal.h"
  58. static int fragmentation_open(struct inode *inode, struct file *file)
  59. {
  60. (void)inode;
  61. return seq_open(file, &fragmentation_op);
  62. }
  63. static const struct file_operations fragmentation_file_operations = {
  64. .open = fragmentation_open,
  65. .read = seq_read,
  66. .llseek = seq_lseek,
  67. .release = seq_release,
  68. };
  69. static int pagetypeinfo_open(struct inode *inode, struct file *file)
  70. {
  71. return seq_open(file, &pagetypeinfo_op);
  72. }
  73. static const struct file_operations pagetypeinfo_file_ops = {
  74. .open = pagetypeinfo_open,
  75. .read = seq_read,
  76. .llseek = seq_lseek,
  77. .release = seq_release,
  78. };
  79. static int zoneinfo_open(struct inode *inode, struct file *file)
  80. {
  81. return seq_open(file, &zoneinfo_op);
  82. }
  83. static const struct file_operations proc_zoneinfo_file_operations = {
  84. .open = zoneinfo_open,
  85. .read = seq_read,
  86. .llseek = seq_lseek,
  87. .release = seq_release,
  88. };
  89. static int vmstat_open(struct inode *inode, struct file *file)
  90. {
  91. return seq_open(file, &vmstat_op);
  92. }
  93. static const struct file_operations proc_vmstat_file_operations = {
  94. .open = vmstat_open,
  95. .read = seq_read,
  96. .llseek = seq_lseek,
  97. .release = seq_release,
  98. };
  99. #ifdef CONFIG_BLOCK
  100. static int diskstats_open(struct inode *inode, struct file *file)
  101. {
  102. return seq_open(file, &diskstats_op);
  103. }
  104. static const struct file_operations proc_diskstats_operations = {
  105. .open = diskstats_open,
  106. .read = seq_read,
  107. .llseek = seq_lseek,
  108. .release = seq_release,
  109. };
  110. #endif
  111. #ifdef CONFIG_MODULES
  112. extern const struct seq_operations modules_op;
  113. static int modules_open(struct inode *inode, struct file *file)
  114. {
  115. return seq_open(file, &modules_op);
  116. }
  117. static const struct file_operations proc_modules_operations = {
  118. .open = modules_open,
  119. .read = seq_read,
  120. .llseek = seq_lseek,
  121. .release = seq_release,
  122. };
  123. #endif
  124. #ifdef CONFIG_SLABINFO
  125. static int slabinfo_open(struct inode *inode, struct file *file)
  126. {
  127. return seq_open(file, &slabinfo_op);
  128. }
  129. static const struct file_operations proc_slabinfo_operations = {
  130. .open = slabinfo_open,
  131. .read = seq_read,
  132. .write = slabinfo_write,
  133. .llseek = seq_lseek,
  134. .release = seq_release,
  135. };
  136. #ifdef CONFIG_DEBUG_SLAB_LEAK
  137. extern const struct seq_operations slabstats_op;
  138. static int slabstats_open(struct inode *inode, struct file *file)
  139. {
  140. unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
  141. int ret = -ENOMEM;
  142. if (n) {
  143. ret = seq_open(file, &slabstats_op);
  144. if (!ret) {
  145. struct seq_file *m = file->private_data;
  146. *n = PAGE_SIZE / (2 * sizeof(unsigned long));
  147. m->private = n;
  148. n = NULL;
  149. }
  150. kfree(n);
  151. }
  152. return ret;
  153. }
  154. static const struct file_operations proc_slabstats_operations = {
  155. .open = slabstats_open,
  156. .read = seq_read,
  157. .llseek = seq_lseek,
  158. .release = seq_release_private,
  159. };
  160. #endif
  161. #endif
  162. #ifdef CONFIG_MMU
  163. static int vmalloc_open(struct inode *inode, struct file *file)
  164. {
  165. unsigned int *ptr = NULL;
  166. int ret;
  167. if (NUMA_BUILD)
  168. ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
  169. ret = seq_open(file, &vmalloc_op);
  170. if (!ret) {
  171. struct seq_file *m = file->private_data;
  172. m->private = ptr;
  173. } else
  174. kfree(ptr);
  175. return ret;
  176. }
  177. static const struct file_operations proc_vmalloc_operations = {
  178. .open = vmalloc_open,
  179. .read = seq_read,
  180. .llseek = seq_lseek,
  181. .release = seq_release_private,
  182. };
  183. #endif
  184. #ifndef arch_irq_stat_cpu
  185. #define arch_irq_stat_cpu(cpu) 0
  186. #endif
  187. #ifndef arch_irq_stat
  188. #define arch_irq_stat() 0
  189. #endif
  190. static int show_stat(struct seq_file *p, void *v)
  191. {
  192. int i, j;
  193. unsigned long jif;
  194. cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
  195. cputime64_t guest;
  196. u64 sum = 0;
  197. struct timespec boottime;
  198. unsigned int per_irq_sum;
  199. user = nice = system = idle = iowait =
  200. irq = softirq = steal = cputime64_zero;
  201. guest = cputime64_zero;
  202. getboottime(&boottime);
  203. jif = boottime.tv_sec;
  204. for_each_possible_cpu(i) {
  205. user = cputime64_add(user, kstat_cpu(i).cpustat.user);
  206. nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
  207. system = cputime64_add(system, kstat_cpu(i).cpustat.system);
  208. idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
  209. iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
  210. irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
  211. softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
  212. steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
  213. guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
  214. for_each_irq_nr(j)
  215. sum += kstat_irqs_cpu(j, i);
  216. sum += arch_irq_stat_cpu(i);
  217. }
  218. sum += arch_irq_stat();
  219. seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
  220. (unsigned long long)cputime64_to_clock_t(user),
  221. (unsigned long long)cputime64_to_clock_t(nice),
  222. (unsigned long long)cputime64_to_clock_t(system),
  223. (unsigned long long)cputime64_to_clock_t(idle),
  224. (unsigned long long)cputime64_to_clock_t(iowait),
  225. (unsigned long long)cputime64_to_clock_t(irq),
  226. (unsigned long long)cputime64_to_clock_t(softirq),
  227. (unsigned long long)cputime64_to_clock_t(steal),
  228. (unsigned long long)cputime64_to_clock_t(guest));
  229. for_each_online_cpu(i) {
  230. /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
  231. user = kstat_cpu(i).cpustat.user;
  232. nice = kstat_cpu(i).cpustat.nice;
  233. system = kstat_cpu(i).cpustat.system;
  234. idle = kstat_cpu(i).cpustat.idle;
  235. iowait = kstat_cpu(i).cpustat.iowait;
  236. irq = kstat_cpu(i).cpustat.irq;
  237. softirq = kstat_cpu(i).cpustat.softirq;
  238. steal = kstat_cpu(i).cpustat.steal;
  239. guest = kstat_cpu(i).cpustat.guest;
  240. seq_printf(p,
  241. "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
  242. i,
  243. (unsigned long long)cputime64_to_clock_t(user),
  244. (unsigned long long)cputime64_to_clock_t(nice),
  245. (unsigned long long)cputime64_to_clock_t(system),
  246. (unsigned long long)cputime64_to_clock_t(idle),
  247. (unsigned long long)cputime64_to_clock_t(iowait),
  248. (unsigned long long)cputime64_to_clock_t(irq),
  249. (unsigned long long)cputime64_to_clock_t(softirq),
  250. (unsigned long long)cputime64_to_clock_t(steal),
  251. (unsigned long long)cputime64_to_clock_t(guest));
  252. }
  253. seq_printf(p, "intr %llu", (unsigned long long)sum);
  254. /* sum again ? it could be updated? */
  255. for_each_irq_nr(j) {
  256. per_irq_sum = 0;
  257. for_each_possible_cpu(i)
  258. per_irq_sum += kstat_irqs_cpu(j, i);
  259. seq_printf(p, " %u", per_irq_sum);
  260. }
  261. seq_printf(p,
  262. "\nctxt %llu\n"
  263. "btime %lu\n"
  264. "processes %lu\n"
  265. "procs_running %lu\n"
  266. "procs_blocked %lu\n",
  267. nr_context_switches(),
  268. (unsigned long)jif,
  269. total_forks,
  270. nr_running(),
  271. nr_iowait());
  272. return 0;
  273. }
  274. static int stat_open(struct inode *inode, struct file *file)
  275. {
  276. unsigned size = 4096 * (1 + num_possible_cpus() / 32);
  277. char *buf;
  278. struct seq_file *m;
  279. int res;
  280. /* don't ask for more than the kmalloc() max size, currently 128 KB */
  281. if (size > 128 * 1024)
  282. size = 128 * 1024;
  283. buf = kmalloc(size, GFP_KERNEL);
  284. if (!buf)
  285. return -ENOMEM;
  286. res = single_open(file, show_stat, NULL);
  287. if (!res) {
  288. m = file->private_data;
  289. m->buf = buf;
  290. m->size = size;
  291. } else
  292. kfree(buf);
  293. return res;
  294. }
  295. static const struct file_operations proc_stat_operations = {
  296. .open = stat_open,
  297. .read = seq_read,
  298. .llseek = seq_lseek,
  299. .release = single_release,
  300. };
  301. /*
  302. * /proc/interrupts
  303. */
  304. static void *int_seq_start(struct seq_file *f, loff_t *pos)
  305. {
  306. return (*pos <= nr_irqs) ? pos : NULL;
  307. }
  308. static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
  309. {
  310. (*pos)++;
  311. return (*pos <= nr_irqs) ? pos : NULL;
  312. }
  313. static void int_seq_stop(struct seq_file *f, void *v)
  314. {
  315. /* Nothing to do */
  316. }
  317. static const struct seq_operations int_seq_ops = {
  318. .start = int_seq_start,
  319. .next = int_seq_next,
  320. .stop = int_seq_stop,
  321. .show = show_interrupts
  322. };
  323. static int interrupts_open(struct inode *inode, struct file *filp)
  324. {
  325. return seq_open(filp, &int_seq_ops);
  326. }
  327. static const struct file_operations proc_interrupts_operations = {
  328. .open = interrupts_open,
  329. .read = seq_read,
  330. .llseek = seq_lseek,
  331. .release = seq_release,
  332. };
  333. #ifdef CONFIG_PROC_PAGE_MONITOR
  334. #define KPMSIZE sizeof(u64)
  335. #define KPMMASK (KPMSIZE - 1)
  336. /* /proc/kpagecount - an array exposing page counts
  337. *
  338. * Each entry is a u64 representing the corresponding
  339. * physical page count.
  340. */
  341. static ssize_t kpagecount_read(struct file *file, char __user *buf,
  342. size_t count, loff_t *ppos)
  343. {
  344. u64 __user *out = (u64 __user *)buf;
  345. struct page *ppage;
  346. unsigned long src = *ppos;
  347. unsigned long pfn;
  348. ssize_t ret = 0;
  349. u64 pcount;
  350. pfn = src / KPMSIZE;
  351. count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
  352. if (src & KPMMASK || count & KPMMASK)
  353. return -EINVAL;
  354. while (count > 0) {
  355. ppage = NULL;
  356. if (pfn_valid(pfn))
  357. ppage = pfn_to_page(pfn);
  358. pfn++;
  359. if (!ppage)
  360. pcount = 0;
  361. else
  362. pcount = page_mapcount(ppage);
  363. if (put_user(pcount, out++)) {
  364. ret = -EFAULT;
  365. break;
  366. }
  367. count -= KPMSIZE;
  368. }
  369. *ppos += (char __user *)out - buf;
  370. if (!ret)
  371. ret = (char __user *)out - buf;
  372. return ret;
  373. }
  374. static struct file_operations proc_kpagecount_operations = {
  375. .llseek = mem_lseek,
  376. .read = kpagecount_read,
  377. };
  378. /* /proc/kpageflags - an array exposing page flags
  379. *
  380. * Each entry is a u64 representing the corresponding
  381. * physical page flags.
  382. */
  383. /* These macros are used to decouple internal flags from exported ones */
  384. #define KPF_LOCKED 0
  385. #define KPF_ERROR 1
  386. #define KPF_REFERENCED 2
  387. #define KPF_UPTODATE 3
  388. #define KPF_DIRTY 4
  389. #define KPF_LRU 5
  390. #define KPF_ACTIVE 6
  391. #define KPF_SLAB 7
  392. #define KPF_WRITEBACK 8
  393. #define KPF_RECLAIM 9
  394. #define KPF_BUDDY 10
  395. #define kpf_copy_bit(flags, srcpos, dstpos) (((flags >> srcpos) & 1) << dstpos)
  396. static ssize_t kpageflags_read(struct file *file, char __user *buf,
  397. size_t count, loff_t *ppos)
  398. {
  399. u64 __user *out = (u64 __user *)buf;
  400. struct page *ppage;
  401. unsigned long src = *ppos;
  402. unsigned long pfn;
  403. ssize_t ret = 0;
  404. u64 kflags, uflags;
  405. pfn = src / KPMSIZE;
  406. count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
  407. if (src & KPMMASK || count & KPMMASK)
  408. return -EINVAL;
  409. while (count > 0) {
  410. ppage = NULL;
  411. if (pfn_valid(pfn))
  412. ppage = pfn_to_page(pfn);
  413. pfn++;
  414. if (!ppage)
  415. kflags = 0;
  416. else
  417. kflags = ppage->flags;
  418. uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) |
  419. kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
  420. kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
  421. kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
  422. kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) |
  423. kpf_copy_bit(kflags, KPF_LRU, PG_lru) |
  424. kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) |
  425. kpf_copy_bit(kflags, KPF_SLAB, PG_slab) |
  426. kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) |
  427. kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) |
  428. kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy);
  429. if (put_user(uflags, out++)) {
  430. ret = -EFAULT;
  431. break;
  432. }
  433. count -= KPMSIZE;
  434. }
  435. *ppos += (char __user *)out - buf;
  436. if (!ret)
  437. ret = (char __user *)out - buf;
  438. return ret;
  439. }
  440. static struct file_operations proc_kpageflags_operations = {
  441. .llseek = mem_lseek,
  442. .read = kpageflags_read,
  443. };
  444. #endif /* CONFIG_PROC_PAGE_MONITOR */
  445. struct proc_dir_entry *proc_root_kcore;
  446. void __init proc_misc_init(void)
  447. {
  448. proc_symlink("mounts", NULL, "self/mounts");
  449. /* And now for trickier ones */
  450. proc_create("stat", 0, NULL, &proc_stat_operations);
  451. proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
  452. #ifdef CONFIG_SLABINFO
  453. proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
  454. #ifdef CONFIG_DEBUG_SLAB_LEAK
  455. proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
  456. #endif
  457. #endif
  458. #ifdef CONFIG_MMU
  459. proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
  460. #endif
  461. proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
  462. proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
  463. proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
  464. proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
  465. #ifdef CONFIG_BLOCK
  466. proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
  467. #endif
  468. #ifdef CONFIG_MODULES
  469. proc_create("modules", 0, NULL, &proc_modules_operations);
  470. #endif
  471. #ifdef CONFIG_SCHEDSTATS
  472. proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
  473. #endif
  474. #ifdef CONFIG_PROC_KCORE
  475. proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations);
  476. if (proc_root_kcore)
  477. proc_root_kcore->size =
  478. (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
  479. #endif
  480. #ifdef CONFIG_PROC_PAGE_MONITOR
  481. proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
  482. proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
  483. #endif
  484. #ifdef CONFIG_PROC_VMCORE
  485. proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
  486. #endif
  487. }