mm_init.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /*
  2. * mm_init.c - Memory initialisation verification and debugging
  3. *
  4. * Copyright 2008 IBM Corporation, 2008
  5. * Author Mel Gorman <mel@csn.ul.ie>
  6. *
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/kobject.h>
  11. #include <linux/export.h>
  12. #include <linux/memory.h>
  13. #include <linux/notifier.h>
  14. #include "internal.h"
  15. #ifdef CONFIG_DEBUG_MEMORY_INIT
  16. int mminit_loglevel;
  17. #ifndef SECTIONS_SHIFT
  18. #define SECTIONS_SHIFT 0
  19. #endif
  20. /* The zonelists are simply reported, validation is manual. */
  21. void mminit_verify_zonelist(void)
  22. {
  23. int nid;
  24. if (mminit_loglevel < MMINIT_VERIFY)
  25. return;
  26. for_each_online_node(nid) {
  27. pg_data_t *pgdat = NODE_DATA(nid);
  28. struct zone *zone;
  29. struct zoneref *z;
  30. struct zonelist *zonelist;
  31. int i, listid, zoneid;
  32. BUG_ON(MAX_ZONELISTS > 2);
  33. for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
  34. /* Identify the zone and nodelist */
  35. zoneid = i % MAX_NR_ZONES;
  36. listid = i / MAX_NR_ZONES;
  37. zonelist = &pgdat->node_zonelists[listid];
  38. zone = &pgdat->node_zones[zoneid];
  39. if (!populated_zone(zone))
  40. continue;
  41. /* Print information about the zonelist */
  42. printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
  43. listid > 0 ? "thisnode" : "general", nid,
  44. zone->name);
  45. /* Iterate the zonelist */
  46. for_each_zone_zonelist(zone, z, zonelist, zoneid) {
  47. #ifdef CONFIG_NUMA
  48. printk(KERN_CONT "%d:%s ",
  49. zone->node, zone->name);
  50. #else
  51. printk(KERN_CONT "0:%s ", zone->name);
  52. #endif /* CONFIG_NUMA */
  53. }
  54. printk(KERN_CONT "\n");
  55. }
  56. }
  57. }
  58. void __init mminit_verify_pageflags_layout(void)
  59. {
  60. int shift, width;
  61. unsigned long or_mask, add_mask;
  62. shift = 8 * sizeof(unsigned long);
  63. width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_NID_SHIFT;
  64. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
  65. "Section %d Node %d Zone %d Lastnid %d Flags %d\n",
  66. SECTIONS_WIDTH,
  67. NODES_WIDTH,
  68. ZONES_WIDTH,
  69. LAST_NID_WIDTH,
  70. NR_PAGEFLAGS);
  71. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
  72. "Section %d Node %d Zone %d Lastnid %d\n",
  73. SECTIONS_SHIFT,
  74. NODES_SHIFT,
  75. ZONES_SHIFT,
  76. LAST_NID_SHIFT);
  77. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
  78. "Section %lu Node %lu Zone %lu Lastnid %lu\n",
  79. (unsigned long)SECTIONS_PGSHIFT,
  80. (unsigned long)NODES_PGSHIFT,
  81. (unsigned long)ZONES_PGSHIFT,
  82. (unsigned long)LAST_NID_PGSHIFT);
  83. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
  84. "Node/Zone ID: %lu -> %lu\n",
  85. (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
  86. (unsigned long)ZONEID_PGOFF);
  87. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
  88. "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
  89. shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
  90. #ifdef NODE_NOT_IN_PAGE_FLAGS
  91. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
  92. "Node not in page flags");
  93. #endif
  94. #ifdef LAST_NID_NOT_IN_PAGE_FLAGS
  95. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
  96. "Last nid not in page flags");
  97. #endif
  98. if (SECTIONS_WIDTH) {
  99. shift -= SECTIONS_WIDTH;
  100. BUG_ON(shift != SECTIONS_PGSHIFT);
  101. }
  102. if (NODES_WIDTH) {
  103. shift -= NODES_WIDTH;
  104. BUG_ON(shift != NODES_PGSHIFT);
  105. }
  106. if (ZONES_WIDTH) {
  107. shift -= ZONES_WIDTH;
  108. BUG_ON(shift != ZONES_PGSHIFT);
  109. }
  110. /* Check for bitmask overlaps */
  111. or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
  112. (NODES_MASK << NODES_PGSHIFT) |
  113. (SECTIONS_MASK << SECTIONS_PGSHIFT);
  114. add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
  115. (NODES_MASK << NODES_PGSHIFT) +
  116. (SECTIONS_MASK << SECTIONS_PGSHIFT);
  117. BUG_ON(or_mask != add_mask);
  118. }
  119. void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone,
  120. unsigned long nid, unsigned long pfn)
  121. {
  122. BUG_ON(page_to_nid(page) != nid);
  123. BUG_ON(page_zonenum(page) != zone);
  124. BUG_ON(page_to_pfn(page) != pfn);
  125. }
  126. static __init int set_mminit_loglevel(char *str)
  127. {
  128. get_option(&str, &mminit_loglevel);
  129. return 0;
  130. }
  131. early_param("mminit_loglevel", set_mminit_loglevel);
  132. #endif /* CONFIG_DEBUG_MEMORY_INIT */
  133. struct kobject *mm_kobj;
  134. EXPORT_SYMBOL_GPL(mm_kobj);
  135. #ifdef CONFIG_SMP
  136. s32 vm_committed_as_batch = 32;
  137. static void __meminit mm_compute_batch(void)
  138. {
  139. u64 memsized_batch;
  140. s32 nr = num_present_cpus();
  141. s32 batch = max_t(s32, nr*2, 32);
  142. /* batch size set to 0.4% of (total memory/#cpus), or max int32 */
  143. memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff);
  144. vm_committed_as_batch = max_t(s32, memsized_batch, batch);
  145. }
  146. static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
  147. unsigned long action, void *arg)
  148. {
  149. switch (action) {
  150. case MEM_ONLINE:
  151. case MEM_OFFLINE:
  152. mm_compute_batch();
  153. default:
  154. break;
  155. }
  156. return NOTIFY_OK;
  157. }
  158. static struct notifier_block compute_batch_nb __meminitdata = {
  159. .notifier_call = mm_compute_batch_notifier,
  160. .priority = IPC_CALLBACK_PRI, /* use lowest priority */
  161. };
  162. static int __init mm_compute_batch_init(void)
  163. {
  164. mm_compute_batch();
  165. register_hotmemory_notifier(&compute_batch_nb);
  166. return 0;
  167. }
  168. __initcall(mm_compute_batch_init);
  169. #endif
  170. static int __init mm_sysfs_init(void)
  171. {
  172. mm_kobj = kobject_create_and_add("mm", kernel_kobj);
  173. if (!mm_kobj)
  174. return -ENOMEM;
  175. return 0;
  176. }
  177. __initcall(mm_sysfs_init);