proc_misc.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910
  1. /*
  2. * linux/fs/proc/proc_misc.c
  3. *
  4. * linux/fs/proc/array.c
  5. * Copyright (C) 1992 by Linus Torvalds
  6. * based on ideas by Darren Senn
  7. *
  8. * This used to be the part of array.c. See the rest of history and credits
  9. * there. I took this into a separate file and switched the thing to generic
  10. * proc_file_inode_operations, leaving in array.c only per-process stuff.
  11. * Inumbers allocation made dynamic (via create_proc_entry()). AV, May 1999.
  12. *
  13. * Changes:
  14. * Fulton Green : Encapsulated position metric calculations.
  15. * <kernel@FultonGreen.com>
  16. */
  17. #include <linux/types.h>
  18. #include <linux/errno.h>
  19. #include <linux/time.h>
  20. #include <linux/kernel.h>
  21. #include <linux/kernel_stat.h>
  22. #include <linux/fs.h>
  23. #include <linux/tty.h>
  24. #include <linux/string.h>
  25. #include <linux/mman.h>
  26. #include <linux/proc_fs.h>
  27. #include <linux/ioport.h>
  28. #include <linux/mm.h>
  29. #include <linux/mmzone.h>
  30. #include <linux/pagemap.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/swap.h>
  33. #include <linux/slab.h>
  34. #include <linux/genhd.h>
  35. #include <linux/smp.h>
  36. #include <linux/signal.h>
  37. #include <linux/module.h>
  38. #include <linux/init.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/times.h>
  41. #include <linux/profile.h>
  42. #include <linux/utsname.h>
  43. #include <linux/blkdev.h>
  44. #include <linux/hugetlb.h>
  45. #include <linux/jiffies.h>
  46. #include <linux/sysrq.h>
  47. #include <linux/vmalloc.h>
  48. #include <linux/crash_dump.h>
  49. #include <linux/pid_namespace.h>
  50. #include <linux/bootmem.h>
  51. #include <asm/uaccess.h>
  52. #include <asm/pgtable.h>
  53. #include <asm/io.h>
  54. #include <asm/tlb.h>
  55. #include <asm/div64.h>
  56. #include "internal.h"
  57. #define LOAD_INT(x) ((x) >> FSHIFT)
  58. #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
  59. /*
  60. * Warning: stuff below (imported functions) assumes that its output will fit
  61. * into one page. For some of those functions it may be wrong. Moreover, we
  62. * have a way to deal with that gracefully. Right now I used straightforward
  63. * wrappers, but this needs further analysis wrt potential overflows.
  64. */
  65. extern int get_hardware_list(char *);
  66. extern int get_stram_list(char *);
  67. extern int get_exec_domain_list(char *);
  68. extern int get_dma_list(char *);
  69. static int proc_calc_metrics(char *page, char **start, off_t off,
  70. int count, int *eof, int len)
  71. {
  72. if (len <= off+count) *eof = 1;
  73. *start = page + off;
  74. len -= off;
  75. if (len>count) len = count;
  76. if (len<0) len = 0;
  77. return len;
  78. }
  79. static int loadavg_read_proc(char *page, char **start, off_t off,
  80. int count, int *eof, void *data)
  81. {
  82. int a, b, c;
  83. int len;
  84. unsigned long seq;
  85. do {
  86. seq = read_seqbegin(&xtime_lock);
  87. a = avenrun[0] + (FIXED_1/200);
  88. b = avenrun[1] + (FIXED_1/200);
  89. c = avenrun[2] + (FIXED_1/200);
  90. } while (read_seqretry(&xtime_lock, seq));
  91. len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
  92. LOAD_INT(a), LOAD_FRAC(a),
  93. LOAD_INT(b), LOAD_FRAC(b),
  94. LOAD_INT(c), LOAD_FRAC(c),
  95. nr_running(), nr_threads,
  96. task_active_pid_ns(current)->last_pid);
  97. return proc_calc_metrics(page, start, off, count, eof, len);
  98. }
  99. static int uptime_read_proc(char *page, char **start, off_t off,
  100. int count, int *eof, void *data)
  101. {
  102. struct timespec uptime;
  103. struct timespec idle;
  104. int len;
  105. cputime_t idletime = cputime_add(init_task.utime, init_task.stime);
  106. do_posix_clock_monotonic_gettime(&uptime);
  107. monotonic_to_bootbased(&uptime);
  108. cputime_to_timespec(idletime, &idle);
  109. len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
  110. (unsigned long) uptime.tv_sec,
  111. (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
  112. (unsigned long) idle.tv_sec,
  113. (idle.tv_nsec / (NSEC_PER_SEC / 100)));
  114. return proc_calc_metrics(page, start, off, count, eof, len);
  115. }
  116. static int meminfo_read_proc(char *page, char **start, off_t off,
  117. int count, int *eof, void *data)
  118. {
  119. struct sysinfo i;
  120. int len;
  121. unsigned long committed;
  122. unsigned long allowed;
  123. struct vmalloc_info vmi;
  124. long cached;
  125. /*
  126. * display in kilobytes.
  127. */
  128. #define K(x) ((x) << (PAGE_SHIFT - 10))
  129. si_meminfo(&i);
  130. si_swapinfo(&i);
  131. committed = atomic_read(&vm_committed_space);
  132. allowed = ((totalram_pages - hugetlb_total_pages())
  133. * sysctl_overcommit_ratio / 100) + total_swap_pages;
  134. cached = global_page_state(NR_FILE_PAGES) -
  135. total_swapcache_pages - i.bufferram;
  136. if (cached < 0)
  137. cached = 0;
  138. get_vmalloc_info(&vmi);
  139. /*
  140. * Tagged format, for easy grepping and expansion.
  141. */
  142. len = sprintf(page,
  143. "MemTotal: %8lu kB\n"
  144. "MemFree: %8lu kB\n"
  145. "Buffers: %8lu kB\n"
  146. "Cached: %8lu kB\n"
  147. "SwapCached: %8lu kB\n"
  148. "Active: %8lu kB\n"
  149. "Inactive: %8lu kB\n"
  150. #ifdef CONFIG_HIGHMEM
  151. "HighTotal: %8lu kB\n"
  152. "HighFree: %8lu kB\n"
  153. "LowTotal: %8lu kB\n"
  154. "LowFree: %8lu kB\n"
  155. #endif
  156. "SwapTotal: %8lu kB\n"
  157. "SwapFree: %8lu kB\n"
  158. "Dirty: %8lu kB\n"
  159. "Writeback: %8lu kB\n"
  160. "AnonPages: %8lu kB\n"
  161. "Mapped: %8lu kB\n"
  162. "Slab: %8lu kB\n"
  163. "SReclaimable: %8lu kB\n"
  164. "SUnreclaim: %8lu kB\n"
  165. "PageTables: %8lu kB\n"
  166. "NFS_Unstable: %8lu kB\n"
  167. "Bounce: %8lu kB\n"
  168. "CommitLimit: %8lu kB\n"
  169. "Committed_AS: %8lu kB\n"
  170. "VmallocTotal: %8lu kB\n"
  171. "VmallocUsed: %8lu kB\n"
  172. "VmallocChunk: %8lu kB\n",
  173. K(i.totalram),
  174. K(i.freeram),
  175. K(i.bufferram),
  176. K(cached),
  177. K(total_swapcache_pages),
  178. K(global_page_state(NR_ACTIVE)),
  179. K(global_page_state(NR_INACTIVE)),
  180. #ifdef CONFIG_HIGHMEM
  181. K(i.totalhigh),
  182. K(i.freehigh),
  183. K(i.totalram-i.totalhigh),
  184. K(i.freeram-i.freehigh),
  185. #endif
  186. K(i.totalswap),
  187. K(i.freeswap),
  188. K(global_page_state(NR_FILE_DIRTY)),
  189. K(global_page_state(NR_WRITEBACK)),
  190. K(global_page_state(NR_ANON_PAGES)),
  191. K(global_page_state(NR_FILE_MAPPED)),
  192. K(global_page_state(NR_SLAB_RECLAIMABLE) +
  193. global_page_state(NR_SLAB_UNRECLAIMABLE)),
  194. K(global_page_state(NR_SLAB_RECLAIMABLE)),
  195. K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
  196. K(global_page_state(NR_PAGETABLE)),
  197. K(global_page_state(NR_UNSTABLE_NFS)),
  198. K(global_page_state(NR_BOUNCE)),
  199. K(allowed),
  200. K(committed),
  201. (unsigned long)VMALLOC_TOTAL >> 10,
  202. vmi.used >> 10,
  203. vmi.largest_chunk >> 10
  204. );
  205. len += hugetlb_report_meminfo(page + len);
  206. return proc_calc_metrics(page, start, off, count, eof, len);
  207. #undef K
  208. }
  209. extern const struct seq_operations fragmentation_op;
  210. static int fragmentation_open(struct inode *inode, struct file *file)
  211. {
  212. (void)inode;
  213. return seq_open(file, &fragmentation_op);
  214. }
  215. static const struct file_operations fragmentation_file_operations = {
  216. .open = fragmentation_open,
  217. .read = seq_read,
  218. .llseek = seq_lseek,
  219. .release = seq_release,
  220. };
  221. extern const struct seq_operations pagetypeinfo_op;
  222. static int pagetypeinfo_open(struct inode *inode, struct file *file)
  223. {
  224. return seq_open(file, &pagetypeinfo_op);
  225. }
  226. static const struct file_operations pagetypeinfo_file_ops = {
  227. .open = pagetypeinfo_open,
  228. .read = seq_read,
  229. .llseek = seq_lseek,
  230. .release = seq_release,
  231. };
  232. extern const struct seq_operations zoneinfo_op;
  233. static int zoneinfo_open(struct inode *inode, struct file *file)
  234. {
  235. return seq_open(file, &zoneinfo_op);
  236. }
  237. static const struct file_operations proc_zoneinfo_file_operations = {
  238. .open = zoneinfo_open,
  239. .read = seq_read,
  240. .llseek = seq_lseek,
  241. .release = seq_release,
  242. };
  243. static int version_read_proc(char *page, char **start, off_t off,
  244. int count, int *eof, void *data)
  245. {
  246. int len;
  247. len = snprintf(page, PAGE_SIZE, linux_proc_banner,
  248. utsname()->sysname,
  249. utsname()->release,
  250. utsname()->version);
  251. return proc_calc_metrics(page, start, off, count, eof, len);
  252. }
  253. extern const struct seq_operations cpuinfo_op;
  254. static int cpuinfo_open(struct inode *inode, struct file *file)
  255. {
  256. return seq_open(file, &cpuinfo_op);
  257. }
  258. static const struct file_operations proc_cpuinfo_operations = {
  259. .open = cpuinfo_open,
  260. .read = seq_read,
  261. .llseek = seq_lseek,
  262. .release = seq_release,
  263. };
  264. static int devinfo_show(struct seq_file *f, void *v)
  265. {
  266. int i = *(loff_t *) v;
  267. if (i < CHRDEV_MAJOR_HASH_SIZE) {
  268. if (i == 0)
  269. seq_printf(f, "Character devices:\n");
  270. chrdev_show(f, i);
  271. }
  272. #ifdef CONFIG_BLOCK
  273. else {
  274. i -= CHRDEV_MAJOR_HASH_SIZE;
  275. if (i == 0)
  276. seq_printf(f, "\nBlock devices:\n");
  277. blkdev_show(f, i);
  278. }
  279. #endif
  280. return 0;
  281. }
  282. static void *devinfo_start(struct seq_file *f, loff_t *pos)
  283. {
  284. if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
  285. return pos;
  286. return NULL;
  287. }
  288. static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos)
  289. {
  290. (*pos)++;
  291. if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
  292. return NULL;
  293. return pos;
  294. }
  295. static void devinfo_stop(struct seq_file *f, void *v)
  296. {
  297. /* Nothing to do */
  298. }
  299. static const struct seq_operations devinfo_ops = {
  300. .start = devinfo_start,
  301. .next = devinfo_next,
  302. .stop = devinfo_stop,
  303. .show = devinfo_show
  304. };
  305. static int devinfo_open(struct inode *inode, struct file *filp)
  306. {
  307. return seq_open(filp, &devinfo_ops);
  308. }
  309. static const struct file_operations proc_devinfo_operations = {
  310. .open = devinfo_open,
  311. .read = seq_read,
  312. .llseek = seq_lseek,
  313. .release = seq_release,
  314. };
  315. extern const struct seq_operations vmstat_op;
  316. static int vmstat_open(struct inode *inode, struct file *file)
  317. {
  318. return seq_open(file, &vmstat_op);
  319. }
  320. static const struct file_operations proc_vmstat_file_operations = {
  321. .open = vmstat_open,
  322. .read = seq_read,
  323. .llseek = seq_lseek,
  324. .release = seq_release,
  325. };
  326. #ifdef CONFIG_PROC_HARDWARE
  327. static int hardware_read_proc(char *page, char **start, off_t off,
  328. int count, int *eof, void *data)
  329. {
  330. int len = get_hardware_list(page);
  331. return proc_calc_metrics(page, start, off, count, eof, len);
  332. }
  333. #endif
  334. #ifdef CONFIG_STRAM_PROC
  335. static int stram_read_proc(char *page, char **start, off_t off,
  336. int count, int *eof, void *data)
  337. {
  338. int len = get_stram_list(page);
  339. return proc_calc_metrics(page, start, off, count, eof, len);
  340. }
  341. #endif
  342. #ifdef CONFIG_BLOCK
  343. static int partitions_open(struct inode *inode, struct file *file)
  344. {
  345. return seq_open(file, &partitions_op);
  346. }
  347. static const struct file_operations proc_partitions_operations = {
  348. .open = partitions_open,
  349. .read = seq_read,
  350. .llseek = seq_lseek,
  351. .release = seq_release,
  352. };
  353. static int diskstats_open(struct inode *inode, struct file *file)
  354. {
  355. return seq_open(file, &diskstats_op);
  356. }
  357. static const struct file_operations proc_diskstats_operations = {
  358. .open = diskstats_open,
  359. .read = seq_read,
  360. .llseek = seq_lseek,
  361. .release = seq_release,
  362. };
  363. #endif
  364. #ifdef CONFIG_MODULES
  365. extern const struct seq_operations modules_op;
  366. static int modules_open(struct inode *inode, struct file *file)
  367. {
  368. return seq_open(file, &modules_op);
  369. }
  370. static const struct file_operations proc_modules_operations = {
  371. .open = modules_open,
  372. .read = seq_read,
  373. .llseek = seq_lseek,
  374. .release = seq_release,
  375. };
  376. #endif
  377. #ifdef CONFIG_SLABINFO
  378. static int slabinfo_open(struct inode *inode, struct file *file)
  379. {
  380. return seq_open(file, &slabinfo_op);
  381. }
  382. static const struct file_operations proc_slabinfo_operations = {
  383. .open = slabinfo_open,
  384. .read = seq_read,
  385. .write = slabinfo_write,
  386. .llseek = seq_lseek,
  387. .release = seq_release,
  388. };
  389. #ifdef CONFIG_DEBUG_SLAB_LEAK
  390. extern const struct seq_operations slabstats_op;
  391. static int slabstats_open(struct inode *inode, struct file *file)
  392. {
  393. unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
  394. int ret = -ENOMEM;
  395. if (n) {
  396. ret = seq_open(file, &slabstats_op);
  397. if (!ret) {
  398. struct seq_file *m = file->private_data;
  399. *n = PAGE_SIZE / (2 * sizeof(unsigned long));
  400. m->private = n;
  401. n = NULL;
  402. }
  403. kfree(n);
  404. }
  405. return ret;
  406. }
  407. static const struct file_operations proc_slabstats_operations = {
  408. .open = slabstats_open,
  409. .read = seq_read,
  410. .llseek = seq_lseek,
  411. .release = seq_release_private,
  412. };
  413. #endif
  414. #endif
  415. static int show_stat(struct seq_file *p, void *v)
  416. {
  417. int i;
  418. unsigned long jif;
  419. cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
  420. cputime64_t guest;
  421. u64 sum = 0;
  422. struct timespec boottime;
  423. unsigned int *per_irq_sum;
  424. per_irq_sum = kzalloc(sizeof(unsigned int)*NR_IRQS, GFP_KERNEL);
  425. if (!per_irq_sum)
  426. return -ENOMEM;
  427. user = nice = system = idle = iowait =
  428. irq = softirq = steal = cputime64_zero;
  429. guest = cputime64_zero;
  430. getboottime(&boottime);
  431. jif = boottime.tv_sec;
  432. for_each_possible_cpu(i) {
  433. int j;
  434. user = cputime64_add(user, kstat_cpu(i).cpustat.user);
  435. nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
  436. system = cputime64_add(system, kstat_cpu(i).cpustat.system);
  437. idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
  438. iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
  439. irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
  440. softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
  441. steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
  442. guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
  443. for (j = 0; j < NR_IRQS; j++) {
  444. unsigned int temp = kstat_cpu(i).irqs[j];
  445. sum += temp;
  446. per_irq_sum[j] += temp;
  447. }
  448. }
  449. seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
  450. (unsigned long long)cputime64_to_clock_t(user),
  451. (unsigned long long)cputime64_to_clock_t(nice),
  452. (unsigned long long)cputime64_to_clock_t(system),
  453. (unsigned long long)cputime64_to_clock_t(idle),
  454. (unsigned long long)cputime64_to_clock_t(iowait),
  455. (unsigned long long)cputime64_to_clock_t(irq),
  456. (unsigned long long)cputime64_to_clock_t(softirq),
  457. (unsigned long long)cputime64_to_clock_t(steal),
  458. (unsigned long long)cputime64_to_clock_t(guest));
  459. for_each_online_cpu(i) {
  460. /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
  461. user = kstat_cpu(i).cpustat.user;
  462. nice = kstat_cpu(i).cpustat.nice;
  463. system = kstat_cpu(i).cpustat.system;
  464. idle = kstat_cpu(i).cpustat.idle;
  465. iowait = kstat_cpu(i).cpustat.iowait;
  466. irq = kstat_cpu(i).cpustat.irq;
  467. softirq = kstat_cpu(i).cpustat.softirq;
  468. steal = kstat_cpu(i).cpustat.steal;
  469. guest = kstat_cpu(i).cpustat.guest;
  470. seq_printf(p,
  471. "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
  472. i,
  473. (unsigned long long)cputime64_to_clock_t(user),
  474. (unsigned long long)cputime64_to_clock_t(nice),
  475. (unsigned long long)cputime64_to_clock_t(system),
  476. (unsigned long long)cputime64_to_clock_t(idle),
  477. (unsigned long long)cputime64_to_clock_t(iowait),
  478. (unsigned long long)cputime64_to_clock_t(irq),
  479. (unsigned long long)cputime64_to_clock_t(softirq),
  480. (unsigned long long)cputime64_to_clock_t(steal),
  481. (unsigned long long)cputime64_to_clock_t(guest));
  482. }
  483. seq_printf(p, "intr %llu", (unsigned long long)sum);
  484. for (i = 0; i < NR_IRQS; i++)
  485. seq_printf(p, " %u", per_irq_sum[i]);
  486. seq_printf(p,
  487. "\nctxt %llu\n"
  488. "btime %lu\n"
  489. "processes %lu\n"
  490. "procs_running %lu\n"
  491. "procs_blocked %lu\n",
  492. nr_context_switches(),
  493. (unsigned long)jif,
  494. total_forks,
  495. nr_running(),
  496. nr_iowait());
  497. kfree(per_irq_sum);
  498. return 0;
  499. }
  500. static int stat_open(struct inode *inode, struct file *file)
  501. {
  502. unsigned size = 4096 * (1 + num_possible_cpus() / 32);
  503. char *buf;
  504. struct seq_file *m;
  505. int res;
  506. /* don't ask for more than the kmalloc() max size, currently 128 KB */
  507. if (size > 128 * 1024)
  508. size = 128 * 1024;
  509. buf = kmalloc(size, GFP_KERNEL);
  510. if (!buf)
  511. return -ENOMEM;
  512. res = single_open(file, show_stat, NULL);
  513. if (!res) {
  514. m = file->private_data;
  515. m->buf = buf;
  516. m->size = size;
  517. } else
  518. kfree(buf);
  519. return res;
  520. }
  521. static const struct file_operations proc_stat_operations = {
  522. .open = stat_open,
  523. .read = seq_read,
  524. .llseek = seq_lseek,
  525. .release = single_release,
  526. };
  527. /*
  528. * /proc/interrupts
  529. */
  530. static void *int_seq_start(struct seq_file *f, loff_t *pos)
  531. {
  532. return (*pos <= NR_IRQS) ? pos : NULL;
  533. }
  534. static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
  535. {
  536. (*pos)++;
  537. if (*pos > NR_IRQS)
  538. return NULL;
  539. return pos;
  540. }
  541. static void int_seq_stop(struct seq_file *f, void *v)
  542. {
  543. /* Nothing to do */
  544. }
  545. static const struct seq_operations int_seq_ops = {
  546. .start = int_seq_start,
  547. .next = int_seq_next,
  548. .stop = int_seq_stop,
  549. .show = show_interrupts
  550. };
  551. static int interrupts_open(struct inode *inode, struct file *filp)
  552. {
  553. return seq_open(filp, &int_seq_ops);
  554. }
  555. static const struct file_operations proc_interrupts_operations = {
  556. .open = interrupts_open,
  557. .read = seq_read,
  558. .llseek = seq_lseek,
  559. .release = seq_release,
  560. };
  561. static int filesystems_read_proc(char *page, char **start, off_t off,
  562. int count, int *eof, void *data)
  563. {
  564. int len = get_filesystem_list(page);
  565. return proc_calc_metrics(page, start, off, count, eof, len);
  566. }
  567. static int cmdline_read_proc(char *page, char **start, off_t off,
  568. int count, int *eof, void *data)
  569. {
  570. int len;
  571. len = sprintf(page, "%s\n", saved_command_line);
  572. return proc_calc_metrics(page, start, off, count, eof, len);
  573. }
  574. static int locks_open(struct inode *inode, struct file *filp)
  575. {
  576. return seq_open(filp, &locks_seq_operations);
  577. }
  578. static const struct file_operations proc_locks_operations = {
  579. .open = locks_open,
  580. .read = seq_read,
  581. .llseek = seq_lseek,
  582. .release = seq_release,
  583. };
  584. static int execdomains_read_proc(char *page, char **start, off_t off,
  585. int count, int *eof, void *data)
  586. {
  587. int len = get_exec_domain_list(page);
  588. return proc_calc_metrics(page, start, off, count, eof, len);
  589. }
  590. #ifdef CONFIG_MAGIC_SYSRQ
  591. /*
  592. * writing 'C' to /proc/sysrq-trigger is like sysrq-C
  593. */
  594. static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
  595. size_t count, loff_t *ppos)
  596. {
  597. if (count) {
  598. char c;
  599. if (get_user(c, buf))
  600. return -EFAULT;
  601. __handle_sysrq(c, NULL, 0);
  602. }
  603. return count;
  604. }
  605. static const struct file_operations proc_sysrq_trigger_operations = {
  606. .write = write_sysrq_trigger,
  607. };
  608. #endif
  609. #ifdef CONFIG_PROC_PAGE_MONITOR
  610. #define KPMSIZE sizeof(u64)
  611. #define KPMMASK (KPMSIZE - 1)
  612. /* /proc/kpagecount - an array exposing page counts
  613. *
  614. * Each entry is a u64 representing the corresponding
  615. * physical page count.
  616. */
  617. static ssize_t kpagecount_read(struct file *file, char __user *buf,
  618. size_t count, loff_t *ppos)
  619. {
  620. u64 __user *out = (u64 __user *)buf;
  621. struct page *ppage;
  622. unsigned long src = *ppos;
  623. unsigned long pfn;
  624. ssize_t ret = 0;
  625. u64 pcount;
  626. pfn = src / KPMSIZE;
  627. count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
  628. if (src & KPMMASK || count & KPMMASK)
  629. return -EIO;
  630. while (count > 0) {
  631. ppage = NULL;
  632. if (pfn_valid(pfn))
  633. ppage = pfn_to_page(pfn);
  634. pfn++;
  635. if (!ppage)
  636. pcount = 0;
  637. else
  638. pcount = atomic_read(&ppage->_count);
  639. if (put_user(pcount, out++)) {
  640. ret = -EFAULT;
  641. break;
  642. }
  643. count -= KPMSIZE;
  644. }
  645. *ppos += (char __user *)out - buf;
  646. if (!ret)
  647. ret = (char __user *)out - buf;
  648. return ret;
  649. }
  650. static struct file_operations proc_kpagecount_operations = {
  651. .llseek = mem_lseek,
  652. .read = kpagecount_read,
  653. };
  654. /* /proc/kpageflags - an array exposing page flags
  655. *
  656. * Each entry is a u64 representing the corresponding
  657. * physical page flags.
  658. */
  659. /* These macros are used to decouple internal flags from exported ones */
  660. #define KPF_LOCKED 0
  661. #define KPF_ERROR 1
  662. #define KPF_REFERENCED 2
  663. #define KPF_UPTODATE 3
  664. #define KPF_DIRTY 4
  665. #define KPF_LRU 5
  666. #define KPF_ACTIVE 6
  667. #define KPF_SLAB 7
  668. #define KPF_WRITEBACK 8
  669. #define KPF_RECLAIM 9
  670. #define KPF_BUDDY 10
  671. #define kpf_copy_bit(flags, srcpos, dstpos) (((flags >> srcpos) & 1) << dstpos)
  672. static ssize_t kpageflags_read(struct file *file, char __user *buf,
  673. size_t count, loff_t *ppos)
  674. {
  675. u64 __user *out = (u64 __user *)buf;
  676. struct page *ppage;
  677. unsigned long src = *ppos;
  678. unsigned long pfn;
  679. ssize_t ret = 0;
  680. u64 kflags, uflags;
  681. pfn = src / KPMSIZE;
  682. count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
  683. if (src & KPMMASK || count & KPMMASK)
  684. return -EIO;
  685. while (count > 0) {
  686. ppage = NULL;
  687. if (pfn_valid(pfn))
  688. ppage = pfn_to_page(pfn);
  689. pfn++;
  690. if (!ppage)
  691. kflags = 0;
  692. else
  693. kflags = ppage->flags;
  694. uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) |
  695. kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
  696. kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
  697. kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
  698. kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) |
  699. kpf_copy_bit(kflags, KPF_LRU, PG_lru) |
  700. kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) |
  701. kpf_copy_bit(kflags, KPF_SLAB, PG_slab) |
  702. kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) |
  703. kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) |
  704. kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy);
  705. if (put_user(uflags, out++)) {
  706. ret = -EFAULT;
  707. break;
  708. }
  709. count -= KPMSIZE;
  710. }
  711. *ppos += (char __user *)out - buf;
  712. if (!ret)
  713. ret = (char __user *)out - buf;
  714. return ret;
  715. }
  716. static struct file_operations proc_kpageflags_operations = {
  717. .llseek = mem_lseek,
  718. .read = kpageflags_read,
  719. };
  720. #endif /* CONFIG_PROC_PAGE_MONITOR */
  721. struct proc_dir_entry *proc_root_kcore;
  722. void create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
  723. {
  724. struct proc_dir_entry *entry;
  725. entry = create_proc_entry(name, mode, NULL);
  726. if (entry)
  727. entry->proc_fops = f;
  728. }
  729. void __init proc_misc_init(void)
  730. {
  731. static struct {
  732. char *name;
  733. int (*read_proc)(char*,char**,off_t,int,int*,void*);
  734. } *p, simple_ones[] = {
  735. {"loadavg", loadavg_read_proc},
  736. {"uptime", uptime_read_proc},
  737. {"meminfo", meminfo_read_proc},
  738. {"version", version_read_proc},
  739. #ifdef CONFIG_PROC_HARDWARE
  740. {"hardware", hardware_read_proc},
  741. #endif
  742. #ifdef CONFIG_STRAM_PROC
  743. {"stram", stram_read_proc},
  744. #endif
  745. {"filesystems", filesystems_read_proc},
  746. {"cmdline", cmdline_read_proc},
  747. {"execdomains", execdomains_read_proc},
  748. {NULL,}
  749. };
  750. for (p = simple_ones; p->name; p++)
  751. create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL);
  752. proc_symlink("mounts", NULL, "self/mounts");
  753. /* And now for trickier ones */
  754. #ifdef CONFIG_PRINTK
  755. {
  756. struct proc_dir_entry *entry;
  757. entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
  758. if (entry)
  759. entry->proc_fops = &proc_kmsg_operations;
  760. }
  761. #endif
  762. create_seq_entry("locks", 0, &proc_locks_operations);
  763. create_seq_entry("devices", 0, &proc_devinfo_operations);
  764. create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
  765. #ifdef CONFIG_BLOCK
  766. create_seq_entry("partitions", 0, &proc_partitions_operations);
  767. #endif
  768. create_seq_entry("stat", 0, &proc_stat_operations);
  769. create_seq_entry("interrupts", 0, &proc_interrupts_operations);
  770. #ifdef CONFIG_SLABINFO
  771. create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
  772. #ifdef CONFIG_DEBUG_SLAB_LEAK
  773. create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations);
  774. #endif
  775. #endif
  776. create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
  777. create_seq_entry("pagetypeinfo", S_IRUGO, &pagetypeinfo_file_ops);
  778. create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
  779. create_seq_entry("zoneinfo",S_IRUGO, &proc_zoneinfo_file_operations);
  780. #ifdef CONFIG_BLOCK
  781. create_seq_entry("diskstats", 0, &proc_diskstats_operations);
  782. #endif
  783. #ifdef CONFIG_MODULES
  784. create_seq_entry("modules", 0, &proc_modules_operations);
  785. #endif
  786. #ifdef CONFIG_SCHEDSTATS
  787. create_seq_entry("schedstat", 0, &proc_schedstat_operations);
  788. #endif
  789. #ifdef CONFIG_PROC_KCORE
  790. proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
  791. if (proc_root_kcore) {
  792. proc_root_kcore->proc_fops = &proc_kcore_operations;
  793. proc_root_kcore->size =
  794. (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
  795. }
  796. #endif
  797. #ifdef CONFIG_PROC_PAGE_MONITOR
  798. create_seq_entry("kpagecount", S_IRUSR, &proc_kpagecount_operations);
  799. create_seq_entry("kpageflags", S_IRUSR, &proc_kpageflags_operations);
  800. #endif
  801. #ifdef CONFIG_PROC_VMCORE
  802. proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL);
  803. if (proc_vmcore)
  804. proc_vmcore->proc_fops = &proc_vmcore_operations;
  805. #endif
  806. #ifdef CONFIG_MAGIC_SYSRQ
  807. {
  808. struct proc_dir_entry *entry;
  809. entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL);
  810. if (entry)
  811. entry->proc_fops = &proc_sysrq_trigger_operations;
  812. }
  813. #endif
  814. }