vmstat.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. /*
  2. * linux/mm/vmstat.c
  3. *
  4. * Manages VM statistics
  5. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  6. *
  7. * zoned VM statistics
  8. * Copyright (C) 2006 Silicon Graphics, Inc.,
  9. * Christoph Lameter <christoph@lameter.com>
  10. */
  11. #include <linux/mm.h>
  12. #include <linux/err.h>
  13. #include <linux/module.h>
  14. #include <linux/cpu.h>
  15. #include <linux/sched.h>
  16. #ifdef CONFIG_VM_EVENT_COUNTERS
  17. DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
  18. EXPORT_PER_CPU_SYMBOL(vm_event_states);
  19. static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
  20. {
  21. int cpu;
  22. int i;
  23. memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
  24. for_each_cpu_mask(cpu, *cpumask) {
  25. struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
  26. for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
  27. ret[i] += this->event[i];
  28. }
  29. }
  30. /*
  31. * Accumulate the vm event counters across all CPUs.
  32. * The result is unavoidably approximate - it can change
  33. * during and after execution of this function.
  34. */
  35. void all_vm_events(unsigned long *ret)
  36. {
  37. get_online_cpus();
  38. sum_vm_events(ret, &cpu_online_map);
  39. put_online_cpus();
  40. }
  41. EXPORT_SYMBOL_GPL(all_vm_events);
  42. #ifdef CONFIG_HOTPLUG
  43. /*
  44. * Fold the foreign cpu events into our own.
  45. *
  46. * This is adding to the events on one processor
  47. * but keeps the global counts constant.
  48. */
  49. void vm_events_fold_cpu(int cpu)
  50. {
  51. struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
  52. int i;
  53. for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
  54. count_vm_events(i, fold_state->event[i]);
  55. fold_state->event[i] = 0;
  56. }
  57. }
  58. #endif /* CONFIG_HOTPLUG */
  59. #endif /* CONFIG_VM_EVENT_COUNTERS */
  60. /*
  61. * Manage combined zone based / global counters
  62. *
  63. * vm_stat contains the global counters
  64. */
  65. atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  66. EXPORT_SYMBOL(vm_stat);
  67. #ifdef CONFIG_SMP
  68. static int calculate_threshold(struct zone *zone)
  69. {
  70. int threshold;
  71. int mem; /* memory in 128 MB units */
  72. /*
  73. * The threshold scales with the number of processors and the amount
  74. * of memory per zone. More memory means that we can defer updates for
  75. * longer, more processors could lead to more contention.
  76. * fls() is used to have a cheap way of logarithmic scaling.
  77. *
  78. * Some sample thresholds:
  79. *
  80. * Threshold Processors (fls) Zonesize fls(mem+1)
  81. * ------------------------------------------------------------------
  82. * 8 1 1 0.9-1 GB 4
  83. * 16 2 2 0.9-1 GB 4
  84. * 20 2 2 1-2 GB 5
  85. * 24 2 2 2-4 GB 6
  86. * 28 2 2 4-8 GB 7
  87. * 32 2 2 8-16 GB 8
  88. * 4 2 2 <128M 1
  89. * 30 4 3 2-4 GB 5
  90. * 48 4 3 8-16 GB 8
  91. * 32 8 4 1-2 GB 4
  92. * 32 8 4 0.9-1GB 4
  93. * 10 16 5 <128M 1
  94. * 40 16 5 900M 4
  95. * 70 64 7 2-4 GB 5
  96. * 84 64 7 4-8 GB 6
  97. * 108 512 9 4-8 GB 6
  98. * 125 1024 10 8-16 GB 8
  99. * 125 1024 10 16-32 GB 9
  100. */
  101. mem = zone->present_pages >> (27 - PAGE_SHIFT);
  102. threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
  103. /*
  104. * Maximum threshold is 125
  105. */
  106. threshold = min(125, threshold);
  107. return threshold;
  108. }
  109. /*
  110. * Refresh the thresholds for each zone.
  111. */
  112. static void refresh_zone_stat_thresholds(void)
  113. {
  114. struct zone *zone;
  115. int cpu;
  116. int threshold;
  117. for_each_zone(zone) {
  118. if (!zone->present_pages)
  119. continue;
  120. threshold = calculate_threshold(zone);
  121. for_each_online_cpu(cpu)
  122. zone_pcp(zone, cpu)->stat_threshold = threshold;
  123. }
  124. }
  125. /*
  126. * For use when we know that interrupts are disabled.
  127. */
  128. void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  129. int delta)
  130. {
  131. struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
  132. s8 *p = pcp->vm_stat_diff + item;
  133. long x;
  134. x = delta + *p;
  135. if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
  136. zone_page_state_add(x, zone, item);
  137. x = 0;
  138. }
  139. *p = x;
  140. }
  141. EXPORT_SYMBOL(__mod_zone_page_state);
  142. /*
  143. * For an unknown interrupt state
  144. */
  145. void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  146. int delta)
  147. {
  148. unsigned long flags;
  149. local_irq_save(flags);
  150. __mod_zone_page_state(zone, item, delta);
  151. local_irq_restore(flags);
  152. }
  153. EXPORT_SYMBOL(mod_zone_page_state);
  154. /*
  155. * Optimized increment and decrement functions.
  156. *
  157. * These are only for a single page and therefore can take a struct page *
  158. * argument instead of struct zone *. This allows the inclusion of the code
  159. * generated for page_zone(page) into the optimized functions.
  160. *
  161. * No overflow check is necessary and therefore the differential can be
  162. * incremented or decremented in place which may allow the compilers to
  163. * generate better code.
  164. * The increment or decrement is known and therefore one boundary check can
  165. * be omitted.
  166. *
  167. * NOTE: These functions are very performance sensitive. Change only
  168. * with care.
  169. *
  170. * Some processors have inc/dec instructions that are atomic vs an interrupt.
  171. * However, the code must first determine the differential location in a zone
  172. * based on the processor number and then inc/dec the counter. There is no
  173. * guarantee without disabling preemption that the processor will not change
  174. * in between and therefore the atomicity vs. interrupt cannot be exploited
  175. * in a useful way here.
  176. */
  177. void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  178. {
  179. struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
  180. s8 *p = pcp->vm_stat_diff + item;
  181. (*p)++;
  182. if (unlikely(*p > pcp->stat_threshold)) {
  183. int overstep = pcp->stat_threshold / 2;
  184. zone_page_state_add(*p + overstep, zone, item);
  185. *p = -overstep;
  186. }
  187. }
  188. void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
  189. {
  190. __inc_zone_state(page_zone(page), item);
  191. }
  192. EXPORT_SYMBOL(__inc_zone_page_state);
  193. void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  194. {
  195. struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
  196. s8 *p = pcp->vm_stat_diff + item;
  197. (*p)--;
  198. if (unlikely(*p < - pcp->stat_threshold)) {
  199. int overstep = pcp->stat_threshold / 2;
  200. zone_page_state_add(*p - overstep, zone, item);
  201. *p = overstep;
  202. }
  203. }
  204. void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
  205. {
  206. __dec_zone_state(page_zone(page), item);
  207. }
  208. EXPORT_SYMBOL(__dec_zone_page_state);
  209. void inc_zone_state(struct zone *zone, enum zone_stat_item item)
  210. {
  211. unsigned long flags;
  212. local_irq_save(flags);
  213. __inc_zone_state(zone, item);
  214. local_irq_restore(flags);
  215. }
  216. void inc_zone_page_state(struct page *page, enum zone_stat_item item)
  217. {
  218. unsigned long flags;
  219. struct zone *zone;
  220. zone = page_zone(page);
  221. local_irq_save(flags);
  222. __inc_zone_state(zone, item);
  223. local_irq_restore(flags);
  224. }
  225. EXPORT_SYMBOL(inc_zone_page_state);
  226. void dec_zone_page_state(struct page *page, enum zone_stat_item item)
  227. {
  228. unsigned long flags;
  229. local_irq_save(flags);
  230. __dec_zone_page_state(page, item);
  231. local_irq_restore(flags);
  232. }
  233. EXPORT_SYMBOL(dec_zone_page_state);
  234. /*
  235. * Update the zone counters for one cpu.
  236. *
  237. * The cpu specified must be either the current cpu or a processor that
  238. * is not online. If it is the current cpu then the execution thread must
  239. * be pinned to the current cpu.
  240. *
  241. * Note that refresh_cpu_vm_stats strives to only access
  242. * node local memory. The per cpu pagesets on remote zones are placed
  243. * in the memory local to the processor using that pageset. So the
  244. * loop over all zones will access a series of cachelines local to
  245. * the processor.
  246. *
  247. * The call to zone_page_state_add updates the cachelines with the
  248. * statistics in the remote zone struct as well as the global cachelines
  249. * with the global counters. These could cause remote node cache line
  250. * bouncing and will have to be only done when necessary.
  251. */
  252. void refresh_cpu_vm_stats(int cpu)
  253. {
  254. struct zone *zone;
  255. int i;
  256. int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
  257. for_each_zone(zone) {
  258. struct per_cpu_pageset *p;
  259. if (!populated_zone(zone))
  260. continue;
  261. p = zone_pcp(zone, cpu);
  262. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  263. if (p->vm_stat_diff[i]) {
  264. unsigned long flags;
  265. int v;
  266. local_irq_save(flags);
  267. v = p->vm_stat_diff[i];
  268. p->vm_stat_diff[i] = 0;
  269. local_irq_restore(flags);
  270. atomic_long_add(v, &zone->vm_stat[i]);
  271. global_diff[i] += v;
  272. #ifdef CONFIG_NUMA
  273. /* 3 seconds idle till flush */
  274. p->expire = 3;
  275. #endif
  276. }
  277. cond_resched();
  278. #ifdef CONFIG_NUMA
  279. /*
  280. * Deal with draining the remote pageset of this
  281. * processor
  282. *
  283. * Check if there are pages remaining in this pageset
  284. * if not then there is nothing to expire.
  285. */
  286. if (!p->expire || !p->pcp.count)
  287. continue;
  288. /*
  289. * We never drain zones local to this processor.
  290. */
  291. if (zone_to_nid(zone) == numa_node_id()) {
  292. p->expire = 0;
  293. continue;
  294. }
  295. p->expire--;
  296. if (p->expire)
  297. continue;
  298. if (p->pcp.count)
  299. drain_zone_pages(zone, &p->pcp);
  300. #endif
  301. }
  302. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  303. if (global_diff[i])
  304. atomic_long_add(global_diff[i], &vm_stat[i]);
  305. }
  306. #endif
  307. #ifdef CONFIG_NUMA
  308. /*
  309. * zonelist = the list of zones passed to the allocator
  310. * z = the zone from which the allocation occurred.
  311. *
  312. * Must be called with interrupts disabled.
  313. */
  314. void zone_statistics(struct zone *preferred_zone, struct zone *z)
  315. {
  316. if (z->zone_pgdat == preferred_zone->zone_pgdat) {
  317. __inc_zone_state(z, NUMA_HIT);
  318. } else {
  319. __inc_zone_state(z, NUMA_MISS);
  320. __inc_zone_state(preferred_zone, NUMA_FOREIGN);
  321. }
  322. if (z->node == numa_node_id())
  323. __inc_zone_state(z, NUMA_LOCAL);
  324. else
  325. __inc_zone_state(z, NUMA_OTHER);
  326. }
  327. #endif
  328. #ifdef CONFIG_PROC_FS
  329. #include <linux/seq_file.h>
  330. static char * const migratetype_names[MIGRATE_TYPES] = {
  331. "Unmovable",
  332. "Reclaimable",
  333. "Movable",
  334. "Reserve",
  335. "Isolate",
  336. };
  337. static void *frag_start(struct seq_file *m, loff_t *pos)
  338. {
  339. pg_data_t *pgdat;
  340. loff_t node = *pos;
  341. for (pgdat = first_online_pgdat();
  342. pgdat && node;
  343. pgdat = next_online_pgdat(pgdat))
  344. --node;
  345. return pgdat;
  346. }
  347. static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
  348. {
  349. pg_data_t *pgdat = (pg_data_t *)arg;
  350. (*pos)++;
  351. return next_online_pgdat(pgdat);
  352. }
  353. static void frag_stop(struct seq_file *m, void *arg)
  354. {
  355. }
  356. /* Walk all the zones in a node and print using a callback */
  357. static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
  358. void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
  359. {
  360. struct zone *zone;
  361. struct zone *node_zones = pgdat->node_zones;
  362. unsigned long flags;
  363. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  364. if (!populated_zone(zone))
  365. continue;
  366. spin_lock_irqsave(&zone->lock, flags);
  367. print(m, pgdat, zone);
  368. spin_unlock_irqrestore(&zone->lock, flags);
  369. }
  370. }
  371. static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
  372. struct zone *zone)
  373. {
  374. int order;
  375. seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  376. for (order = 0; order < MAX_ORDER; ++order)
  377. seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
  378. seq_putc(m, '\n');
  379. }
  380. /*
  381. * This walks the free areas for each zone.
  382. */
  383. static int frag_show(struct seq_file *m, void *arg)
  384. {
  385. pg_data_t *pgdat = (pg_data_t *)arg;
  386. walk_zones_in_node(m, pgdat, frag_show_print);
  387. return 0;
  388. }
  389. static void pagetypeinfo_showfree_print(struct seq_file *m,
  390. pg_data_t *pgdat, struct zone *zone)
  391. {
  392. int order, mtype;
  393. for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
  394. seq_printf(m, "Node %4d, zone %8s, type %12s ",
  395. pgdat->node_id,
  396. zone->name,
  397. migratetype_names[mtype]);
  398. for (order = 0; order < MAX_ORDER; ++order) {
  399. unsigned long freecount = 0;
  400. struct free_area *area;
  401. struct list_head *curr;
  402. area = &(zone->free_area[order]);
  403. list_for_each(curr, &area->free_list[mtype])
  404. freecount++;
  405. seq_printf(m, "%6lu ", freecount);
  406. }
  407. seq_putc(m, '\n');
  408. }
  409. }
  410. /* Print out the free pages at each order for each migatetype */
  411. static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
  412. {
  413. int order;
  414. pg_data_t *pgdat = (pg_data_t *)arg;
  415. /* Print header */
  416. seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
  417. for (order = 0; order < MAX_ORDER; ++order)
  418. seq_printf(m, "%6d ", order);
  419. seq_putc(m, '\n');
  420. walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
  421. return 0;
  422. }
  423. static void pagetypeinfo_showblockcount_print(struct seq_file *m,
  424. pg_data_t *pgdat, struct zone *zone)
  425. {
  426. int mtype;
  427. unsigned long pfn;
  428. unsigned long start_pfn = zone->zone_start_pfn;
  429. unsigned long end_pfn = start_pfn + zone->spanned_pages;
  430. unsigned long count[MIGRATE_TYPES] = { 0, };
  431. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  432. struct page *page;
  433. if (!pfn_valid(pfn))
  434. continue;
  435. page = pfn_to_page(pfn);
  436. mtype = get_pageblock_migratetype(page);
  437. count[mtype]++;
  438. }
  439. /* Print counts */
  440. seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  441. for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
  442. seq_printf(m, "%12lu ", count[mtype]);
  443. seq_putc(m, '\n');
  444. }
  445. /* Print out the free pages at each order for each migratetype */
  446. static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
  447. {
  448. int mtype;
  449. pg_data_t *pgdat = (pg_data_t *)arg;
  450. seq_printf(m, "\n%-23s", "Number of blocks type ");
  451. for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
  452. seq_printf(m, "%12s ", migratetype_names[mtype]);
  453. seq_putc(m, '\n');
  454. walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
  455. return 0;
  456. }
  457. /*
  458. * This prints out statistics in relation to grouping pages by mobility.
  459. * It is expensive to collect so do not constantly read the file.
  460. */
  461. static int pagetypeinfo_show(struct seq_file *m, void *arg)
  462. {
  463. pg_data_t *pgdat = (pg_data_t *)arg;
  464. /* check memoryless node */
  465. if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
  466. return 0;
  467. seq_printf(m, "Page block order: %d\n", pageblock_order);
  468. seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
  469. seq_putc(m, '\n');
  470. pagetypeinfo_showfree(m, pgdat);
  471. pagetypeinfo_showblockcount(m, pgdat);
  472. return 0;
  473. }
  474. const struct seq_operations fragmentation_op = {
  475. .start = frag_start,
  476. .next = frag_next,
  477. .stop = frag_stop,
  478. .show = frag_show,
  479. };
  480. const struct seq_operations pagetypeinfo_op = {
  481. .start = frag_start,
  482. .next = frag_next,
  483. .stop = frag_stop,
  484. .show = pagetypeinfo_show,
  485. };
  486. #ifdef CONFIG_ZONE_DMA
  487. #define TEXT_FOR_DMA(xx) xx "_dma",
  488. #else
  489. #define TEXT_FOR_DMA(xx)
  490. #endif
  491. #ifdef CONFIG_ZONE_DMA32
  492. #define TEXT_FOR_DMA32(xx) xx "_dma32",
  493. #else
  494. #define TEXT_FOR_DMA32(xx)
  495. #endif
  496. #ifdef CONFIG_HIGHMEM
  497. #define TEXT_FOR_HIGHMEM(xx) xx "_high",
  498. #else
  499. #define TEXT_FOR_HIGHMEM(xx)
  500. #endif
  501. #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
  502. TEXT_FOR_HIGHMEM(xx) xx "_movable",
  503. static const char * const vmstat_text[] = {
  504. /* Zoned VM counters */
  505. "nr_free_pages",
  506. "nr_inactive",
  507. "nr_active",
  508. "nr_anon_pages",
  509. "nr_mapped",
  510. "nr_file_pages",
  511. "nr_dirty",
  512. "nr_writeback",
  513. "nr_slab_reclaimable",
  514. "nr_slab_unreclaimable",
  515. "nr_page_table_pages",
  516. "nr_unstable",
  517. "nr_bounce",
  518. "nr_vmscan_write",
  519. "nr_writeback_temp",
  520. #ifdef CONFIG_NUMA
  521. "numa_hit",
  522. "numa_miss",
  523. "numa_foreign",
  524. "numa_interleave",
  525. "numa_local",
  526. "numa_other",
  527. #endif
  528. #ifdef CONFIG_VM_EVENT_COUNTERS
  529. "pgpgin",
  530. "pgpgout",
  531. "pswpin",
  532. "pswpout",
  533. TEXTS_FOR_ZONES("pgalloc")
  534. "pgfree",
  535. "pgactivate",
  536. "pgdeactivate",
  537. "pgfault",
  538. "pgmajfault",
  539. TEXTS_FOR_ZONES("pgrefill")
  540. TEXTS_FOR_ZONES("pgsteal")
  541. TEXTS_FOR_ZONES("pgscan_kswapd")
  542. TEXTS_FOR_ZONES("pgscan_direct")
  543. "pginodesteal",
  544. "slabs_scanned",
  545. "kswapd_steal",
  546. "kswapd_inodesteal",
  547. "pageoutrun",
  548. "allocstall",
  549. "pgrotated",
  550. #ifdef CONFIG_HUGETLB_PAGE
  551. "htlb_buddy_alloc_success",
  552. "htlb_buddy_alloc_fail",
  553. #endif
  554. #endif
  555. };
  556. static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
  557. struct zone *zone)
  558. {
  559. int i;
  560. seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
  561. seq_printf(m,
  562. "\n pages free %lu"
  563. "\n min %lu"
  564. "\n low %lu"
  565. "\n high %lu"
  566. "\n scanned %lu (a: %lu i: %lu)"
  567. "\n spanned %lu"
  568. "\n present %lu",
  569. zone_page_state(zone, NR_FREE_PAGES),
  570. zone->pages_min,
  571. zone->pages_low,
  572. zone->pages_high,
  573. zone->pages_scanned,
  574. zone->nr_scan_active, zone->nr_scan_inactive,
  575. zone->spanned_pages,
  576. zone->present_pages);
  577. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  578. seq_printf(m, "\n %-12s %lu", vmstat_text[i],
  579. zone_page_state(zone, i));
  580. seq_printf(m,
  581. "\n protection: (%lu",
  582. zone->lowmem_reserve[0]);
  583. for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
  584. seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
  585. seq_printf(m,
  586. ")"
  587. "\n pagesets");
  588. for_each_online_cpu(i) {
  589. struct per_cpu_pageset *pageset;
  590. pageset = zone_pcp(zone, i);
  591. seq_printf(m,
  592. "\n cpu: %i"
  593. "\n count: %i"
  594. "\n high: %i"
  595. "\n batch: %i",
  596. i,
  597. pageset->pcp.count,
  598. pageset->pcp.high,
  599. pageset->pcp.batch);
  600. #ifdef CONFIG_SMP
  601. seq_printf(m, "\n vm stats threshold: %d",
  602. pageset->stat_threshold);
  603. #endif
  604. }
  605. seq_printf(m,
  606. "\n all_unreclaimable: %u"
  607. "\n prev_priority: %i"
  608. "\n start_pfn: %lu",
  609. zone_is_all_unreclaimable(zone),
  610. zone->prev_priority,
  611. zone->zone_start_pfn);
  612. seq_putc(m, '\n');
  613. }
  614. /*
  615. * Output information about zones in @pgdat.
  616. */
  617. static int zoneinfo_show(struct seq_file *m, void *arg)
  618. {
  619. pg_data_t *pgdat = (pg_data_t *)arg;
  620. walk_zones_in_node(m, pgdat, zoneinfo_show_print);
  621. return 0;
  622. }
  623. const struct seq_operations zoneinfo_op = {
  624. .start = frag_start, /* iterate over all zones. The same as in
  625. * fragmentation. */
  626. .next = frag_next,
  627. .stop = frag_stop,
  628. .show = zoneinfo_show,
  629. };
  630. static void *vmstat_start(struct seq_file *m, loff_t *pos)
  631. {
  632. unsigned long *v;
  633. #ifdef CONFIG_VM_EVENT_COUNTERS
  634. unsigned long *e;
  635. #endif
  636. int i;
  637. if (*pos >= ARRAY_SIZE(vmstat_text))
  638. return NULL;
  639. #ifdef CONFIG_VM_EVENT_COUNTERS
  640. v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
  641. + sizeof(struct vm_event_state), GFP_KERNEL);
  642. #else
  643. v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
  644. GFP_KERNEL);
  645. #endif
  646. m->private = v;
  647. if (!v)
  648. return ERR_PTR(-ENOMEM);
  649. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  650. v[i] = global_page_state(i);
  651. #ifdef CONFIG_VM_EVENT_COUNTERS
  652. e = v + NR_VM_ZONE_STAT_ITEMS;
  653. all_vm_events(e);
  654. e[PGPGIN] /= 2; /* sectors -> kbytes */
  655. e[PGPGOUT] /= 2;
  656. #endif
  657. return v + *pos;
  658. }
  659. static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
  660. {
  661. (*pos)++;
  662. if (*pos >= ARRAY_SIZE(vmstat_text))
  663. return NULL;
  664. return (unsigned long *)m->private + *pos;
  665. }
  666. static int vmstat_show(struct seq_file *m, void *arg)
  667. {
  668. unsigned long *l = arg;
  669. unsigned long off = l - (unsigned long *)m->private;
  670. seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
  671. return 0;
  672. }
  673. static void vmstat_stop(struct seq_file *m, void *arg)
  674. {
  675. kfree(m->private);
  676. m->private = NULL;
  677. }
  678. const struct seq_operations vmstat_op = {
  679. .start = vmstat_start,
  680. .next = vmstat_next,
  681. .stop = vmstat_stop,
  682. .show = vmstat_show,
  683. };
  684. #endif /* CONFIG_PROC_FS */
  685. #ifdef CONFIG_SMP
  686. static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
  687. int sysctl_stat_interval __read_mostly = HZ;
  688. static void vmstat_update(struct work_struct *w)
  689. {
  690. refresh_cpu_vm_stats(smp_processor_id());
  691. schedule_delayed_work(&__get_cpu_var(vmstat_work),
  692. sysctl_stat_interval);
  693. }
  694. static void __cpuinit start_cpu_timer(int cpu)
  695. {
  696. struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
  697. INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
  698. schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
  699. }
  700. /*
  701. * Use the cpu notifier to insure that the thresholds are recalculated
  702. * when necessary.
  703. */
  704. static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
  705. unsigned long action,
  706. void *hcpu)
  707. {
  708. long cpu = (long)hcpu;
  709. switch (action) {
  710. case CPU_ONLINE:
  711. case CPU_ONLINE_FROZEN:
  712. start_cpu_timer(cpu);
  713. break;
  714. case CPU_DOWN_PREPARE:
  715. case CPU_DOWN_PREPARE_FROZEN:
  716. cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
  717. per_cpu(vmstat_work, cpu).work.func = NULL;
  718. break;
  719. case CPU_DOWN_FAILED:
  720. case CPU_DOWN_FAILED_FROZEN:
  721. start_cpu_timer(cpu);
  722. break;
  723. case CPU_DEAD:
  724. case CPU_DEAD_FROZEN:
  725. refresh_zone_stat_thresholds();
  726. break;
  727. default:
  728. break;
  729. }
  730. return NOTIFY_OK;
  731. }
  732. static struct notifier_block __cpuinitdata vmstat_notifier =
  733. { &vmstat_cpuup_callback, NULL, 0 };
  734. static int __init setup_vmstat(void)
  735. {
  736. int cpu;
  737. refresh_zone_stat_thresholds();
  738. register_cpu_notifier(&vmstat_notifier);
  739. for_each_online_cpu(cpu)
  740. start_cpu_timer(cpu);
  741. return 0;
  742. }
  743. module_init(setup_vmstat)
  744. #endif