memory_hotplug.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. /*
  2. * linux/mm/memory_hotplug.c
  3. *
  4. * Copyright (C)
  5. */
  6. #include <linux/stddef.h>
  7. #include <linux/mm.h>
  8. #include <linux/swap.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/bootmem.h>
  12. #include <linux/compiler.h>
  13. #include <linux/module.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/slab.h>
  16. #include <linux/sysctl.h>
  17. #include <linux/cpu.h>
  18. #include <linux/memory.h>
  19. #include <linux/memory_hotplug.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/ioport.h>
  23. #include <asm/tlbflush.h>
  24. extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
  25. unsigned long size);
  26. static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
  27. {
  28. struct pglist_data *pgdat = zone->zone_pgdat;
  29. int nr_pages = PAGES_PER_SECTION;
  30. int nid = pgdat->node_id;
  31. int zone_type;
  32. zone_type = zone - pgdat->node_zones;
  33. if (!populated_zone(zone)) {
  34. int ret = 0;
  35. ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
  36. if (ret < 0)
  37. return ret;
  38. }
  39. memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
  40. zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
  41. return 0;
  42. }
  43. extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
  44. int nr_pages);
  45. static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
  46. {
  47. int nr_pages = PAGES_PER_SECTION;
  48. int ret;
  49. ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
  50. if (ret < 0)
  51. return ret;
  52. ret = __add_zone(zone, phys_start_pfn);
  53. if (ret < 0)
  54. return ret;
  55. return register_new_memory(__pfn_to_section(phys_start_pfn));
  56. }
  57. /*
  58. * Reasonably generic function for adding memory. It is
  59. * expected that archs that support memory hotplug will
  60. * call this function after deciding the zone to which to
  61. * add the new pages.
  62. */
  63. int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
  64. unsigned long nr_pages)
  65. {
  66. unsigned long i;
  67. int err = 0;
  68. for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) {
  69. err = __add_section(zone, phys_start_pfn + i);
  70. /* We want to keep adding the rest of the
  71. * sections if the first ones already exist
  72. */
  73. if (err && (err != -EEXIST))
  74. break;
  75. }
  76. return err;
  77. }
  78. EXPORT_SYMBOL_GPL(__add_pages);
  79. static void grow_zone_span(struct zone *zone,
  80. unsigned long start_pfn, unsigned long end_pfn)
  81. {
  82. unsigned long old_zone_end_pfn;
  83. zone_span_writelock(zone);
  84. old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
  85. if (start_pfn < zone->zone_start_pfn)
  86. zone->zone_start_pfn = start_pfn;
  87. zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
  88. zone->zone_start_pfn;
  89. zone_span_writeunlock(zone);
  90. }
  91. static void grow_pgdat_span(struct pglist_data *pgdat,
  92. unsigned long start_pfn, unsigned long end_pfn)
  93. {
  94. unsigned long old_pgdat_end_pfn =
  95. pgdat->node_start_pfn + pgdat->node_spanned_pages;
  96. if (start_pfn < pgdat->node_start_pfn)
  97. pgdat->node_start_pfn = start_pfn;
  98. pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
  99. pgdat->node_start_pfn;
  100. }
  101. int online_pages(unsigned long pfn, unsigned long nr_pages)
  102. {
  103. unsigned long i;
  104. unsigned long flags;
  105. unsigned long onlined_pages = 0;
  106. struct resource res;
  107. u64 section_end;
  108. unsigned long start_pfn;
  109. struct zone *zone;
  110. int need_zonelists_rebuild = 0;
  111. /*
  112. * This doesn't need a lock to do pfn_to_page().
  113. * The section can't be removed here because of the
  114. * memory_block->state_sem.
  115. */
  116. zone = page_zone(pfn_to_page(pfn));
  117. pgdat_resize_lock(zone->zone_pgdat, &flags);
  118. grow_zone_span(zone, pfn, pfn + nr_pages);
  119. grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
  120. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  121. /*
  122. * If this zone is not populated, then it is not in zonelist.
  123. * This means the page allocator ignores this zone.
  124. * So, zonelist must be updated after online.
  125. */
  126. if (!populated_zone(zone))
  127. need_zonelists_rebuild = 1;
  128. res.start = (u64)pfn << PAGE_SHIFT;
  129. res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
  130. res.flags = IORESOURCE_MEM; /* we just need system ram */
  131. section_end = res.end;
  132. while (find_next_system_ram(&res) >= 0) {
  133. start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
  134. nr_pages = (unsigned long)
  135. ((res.end + 1 - res.start) >> PAGE_SHIFT);
  136. if (PageReserved(pfn_to_page(start_pfn))) {
  137. /* this region's page is not onlined now */
  138. for (i = 0; i < nr_pages; i++) {
  139. struct page *page = pfn_to_page(start_pfn + i);
  140. online_page(page);
  141. onlined_pages++;
  142. }
  143. }
  144. res.start = res.end + 1;
  145. res.end = section_end;
  146. }
  147. zone->present_pages += onlined_pages;
  148. zone->zone_pgdat->node_present_pages += onlined_pages;
  149. setup_per_zone_pages_min();
  150. if (need_zonelists_rebuild)
  151. build_all_zonelists();
  152. vm_total_pages = nr_free_pagecache_pages();
  153. return 0;
  154. }
  155. static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
  156. {
  157. struct pglist_data *pgdat;
  158. unsigned long zones_size[MAX_NR_ZONES] = {0};
  159. unsigned long zholes_size[MAX_NR_ZONES] = {0};
  160. unsigned long start_pfn = start >> PAGE_SHIFT;
  161. pgdat = arch_alloc_nodedata(nid);
  162. if (!pgdat)
  163. return NULL;
  164. arch_refresh_nodedata(nid, pgdat);
  165. /* we can use NODE_DATA(nid) from here */
  166. /* init node's zones as empty zones, we don't have any present pages.*/
  167. free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
  168. return pgdat;
  169. }
  170. static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
  171. {
  172. arch_refresh_nodedata(nid, NULL);
  173. arch_free_nodedata(pgdat);
  174. return;
  175. }
  176. /* add this memory to iomem resource */
  177. static void register_memory_resource(u64 start, u64 size)
  178. {
  179. struct resource *res;
  180. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  181. BUG_ON(!res);
  182. res->name = "System RAM";
  183. res->start = start;
  184. res->end = start + size - 1;
  185. res->flags = IORESOURCE_MEM;
  186. if (request_resource(&iomem_resource, res) < 0) {
  187. printk("System RAM resource %llx - %llx cannot be added\n",
  188. (unsigned long long)res->start, (unsigned long long)res->end);
  189. kfree(res);
  190. }
  191. }
  192. int add_memory(int nid, u64 start, u64 size)
  193. {
  194. pg_data_t *pgdat = NULL;
  195. int new_pgdat = 0;
  196. int ret;
  197. if (!node_online(nid)) {
  198. pgdat = hotadd_new_pgdat(nid, start);
  199. if (!pgdat)
  200. return -ENOMEM;
  201. new_pgdat = 1;
  202. ret = kswapd_run(nid);
  203. if (ret)
  204. goto error;
  205. }
  206. /* call arch's memory hotadd */
  207. ret = arch_add_memory(nid, start, size);
  208. if (ret < 0)
  209. goto error;
  210. /* we online node here. we can't roll back from here. */
  211. node_set_online(nid);
  212. if (new_pgdat) {
  213. ret = register_one_node(nid);
  214. /*
  215. * If sysfs file of new node can't create, cpu on the node
  216. * can't be hot-added. There is no rollback way now.
  217. * So, check by BUG_ON() to catch it reluctantly..
  218. */
  219. BUG_ON(ret);
  220. }
  221. /* register this memory as resource */
  222. register_memory_resource(start, size);
  223. return ret;
  224. error:
  225. /* rollback pgdat allocation and others */
  226. if (new_pgdat)
  227. rollback_node_hotadd(nid, pgdat);
  228. return ret;
  229. }
  230. EXPORT_SYMBOL_GPL(add_memory);