memory_hotplug.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /*
  2. * linux/mm/memory_hotplug.c
  3. *
  4. * Copyright (C)
  5. */
  6. #include <linux/stddef.h>
  7. #include <linux/mm.h>
  8. #include <linux/swap.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/bootmem.h>
  12. #include <linux/compiler.h>
  13. #include <linux/module.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/writeback.h>
  16. #include <linux/slab.h>
  17. #include <linux/sysctl.h>
  18. #include <linux/cpu.h>
  19. #include <linux/memory.h>
  20. #include <linux/memory_hotplug.h>
  21. #include <linux/highmem.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/ioport.h>
  24. #include <linux/cpuset.h>
  25. #include <asm/tlbflush.h>
  26. /* add this memory to iomem resource */
  27. static struct resource *register_memory_resource(u64 start, u64 size)
  28. {
  29. struct resource *res;
  30. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  31. BUG_ON(!res);
  32. res->name = "System RAM";
  33. res->start = start;
  34. res->end = start + size - 1;
  35. res->flags = IORESOURCE_MEM;
  36. if (request_resource(&iomem_resource, res) < 0) {
  37. printk("System RAM resource %llx - %llx cannot be added\n",
  38. (unsigned long long)res->start, (unsigned long long)res->end);
  39. kfree(res);
  40. res = NULL;
  41. }
  42. return res;
  43. }
  44. static void release_memory_resource(struct resource *res)
  45. {
  46. if (!res)
  47. return;
  48. release_resource(res);
  49. kfree(res);
  50. return;
  51. }
  52. #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
  53. static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
  54. {
  55. struct pglist_data *pgdat = zone->zone_pgdat;
  56. int nr_pages = PAGES_PER_SECTION;
  57. int nid = pgdat->node_id;
  58. int zone_type;
  59. zone_type = zone - pgdat->node_zones;
  60. if (!populated_zone(zone)) {
  61. int ret = 0;
  62. ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
  63. if (ret < 0)
  64. return ret;
  65. }
  66. memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
  67. zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
  68. return 0;
  69. }
  70. static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
  71. {
  72. int nr_pages = PAGES_PER_SECTION;
  73. int ret;
  74. if (pfn_valid(phys_start_pfn))
  75. return -EEXIST;
  76. ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
  77. if (ret < 0)
  78. return ret;
  79. ret = __add_zone(zone, phys_start_pfn);
  80. if (ret < 0)
  81. return ret;
  82. return register_new_memory(__pfn_to_section(phys_start_pfn));
  83. }
  84. /*
  85. * Reasonably generic function for adding memory. It is
  86. * expected that archs that support memory hotplug will
  87. * call this function after deciding the zone to which to
  88. * add the new pages.
  89. */
  90. int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
  91. unsigned long nr_pages)
  92. {
  93. unsigned long i;
  94. int err = 0;
  95. int start_sec, end_sec;
  96. /* during initialize mem_map, align hot-added range to section */
  97. start_sec = pfn_to_section_nr(phys_start_pfn);
  98. end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
  99. for (i = start_sec; i <= end_sec; i++) {
  100. err = __add_section(zone, i << PFN_SECTION_SHIFT);
  101. /*
  102. * EEXIST is finally dealed with by ioresource collision
  103. * check. see add_memory() => register_memory_resource()
  104. * Warning will be printed if there is collision.
  105. */
  106. if (err && (err != -EEXIST))
  107. break;
  108. err = 0;
  109. }
  110. return err;
  111. }
  112. EXPORT_SYMBOL_GPL(__add_pages);
  113. static void grow_zone_span(struct zone *zone,
  114. unsigned long start_pfn, unsigned long end_pfn)
  115. {
  116. unsigned long old_zone_end_pfn;
  117. zone_span_writelock(zone);
  118. old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
  119. if (start_pfn < zone->zone_start_pfn)
  120. zone->zone_start_pfn = start_pfn;
  121. zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
  122. zone->zone_start_pfn;
  123. zone_span_writeunlock(zone);
  124. }
  125. static void grow_pgdat_span(struct pglist_data *pgdat,
  126. unsigned long start_pfn, unsigned long end_pfn)
  127. {
  128. unsigned long old_pgdat_end_pfn =
  129. pgdat->node_start_pfn + pgdat->node_spanned_pages;
  130. if (start_pfn < pgdat->node_start_pfn)
  131. pgdat->node_start_pfn = start_pfn;
  132. pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
  133. pgdat->node_start_pfn;
  134. }
  135. int online_pages(unsigned long pfn, unsigned long nr_pages)
  136. {
  137. unsigned long i;
  138. unsigned long flags;
  139. unsigned long onlined_pages = 0;
  140. struct resource res;
  141. u64 section_end;
  142. unsigned long start_pfn;
  143. struct zone *zone;
  144. int need_zonelists_rebuild = 0;
  145. /*
  146. * This doesn't need a lock to do pfn_to_page().
  147. * The section can't be removed here because of the
  148. * memory_block->state_sem.
  149. */
  150. zone = page_zone(pfn_to_page(pfn));
  151. pgdat_resize_lock(zone->zone_pgdat, &flags);
  152. grow_zone_span(zone, pfn, pfn + nr_pages);
  153. grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
  154. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  155. /*
  156. * If this zone is not populated, then it is not in zonelist.
  157. * This means the page allocator ignores this zone.
  158. * So, zonelist must be updated after online.
  159. */
  160. if (!populated_zone(zone))
  161. need_zonelists_rebuild = 1;
  162. res.start = (u64)pfn << PAGE_SHIFT;
  163. res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
  164. res.flags = IORESOURCE_MEM; /* we just need system ram */
  165. section_end = res.end;
  166. while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
  167. start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
  168. nr_pages = (unsigned long)
  169. ((res.end + 1 - res.start) >> PAGE_SHIFT);
  170. if (PageReserved(pfn_to_page(start_pfn))) {
  171. /* this region's page is not onlined now */
  172. for (i = 0; i < nr_pages; i++) {
  173. struct page *page = pfn_to_page(start_pfn + i);
  174. online_page(page);
  175. onlined_pages++;
  176. }
  177. }
  178. res.start = res.end + 1;
  179. res.end = section_end;
  180. }
  181. zone->present_pages += onlined_pages;
  182. zone->zone_pgdat->node_present_pages += onlined_pages;
  183. setup_per_zone_pages_min();
  184. if (need_zonelists_rebuild)
  185. build_all_zonelists();
  186. vm_total_pages = nr_free_pagecache_pages();
  187. writeback_set_ratelimit();
  188. return 0;
  189. }
  190. #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
  191. static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
  192. {
  193. struct pglist_data *pgdat;
  194. unsigned long zones_size[MAX_NR_ZONES] = {0};
  195. unsigned long zholes_size[MAX_NR_ZONES] = {0};
  196. unsigned long start_pfn = start >> PAGE_SHIFT;
  197. pgdat = arch_alloc_nodedata(nid);
  198. if (!pgdat)
  199. return NULL;
  200. arch_refresh_nodedata(nid, pgdat);
  201. /* we can use NODE_DATA(nid) from here */
  202. /* init node's zones as empty zones, we don't have any present pages.*/
  203. free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
  204. return pgdat;
  205. }
  206. static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
  207. {
  208. arch_refresh_nodedata(nid, NULL);
  209. arch_free_nodedata(pgdat);
  210. return;
  211. }
  212. int add_memory(int nid, u64 start, u64 size)
  213. {
  214. pg_data_t *pgdat = NULL;
  215. int new_pgdat = 0;
  216. struct resource *res;
  217. int ret;
  218. res = register_memory_resource(start, size);
  219. if (!res)
  220. return -EEXIST;
  221. if (!node_online(nid)) {
  222. pgdat = hotadd_new_pgdat(nid, start);
  223. if (!pgdat)
  224. return -ENOMEM;
  225. new_pgdat = 1;
  226. ret = kswapd_run(nid);
  227. if (ret)
  228. goto error;
  229. }
  230. /* call arch's memory hotadd */
  231. ret = arch_add_memory(nid, start, size);
  232. if (ret < 0)
  233. goto error;
  234. /* we online node here. we can't roll back from here. */
  235. node_set_online(nid);
  236. cpuset_track_online_nodes();
  237. if (new_pgdat) {
  238. ret = register_one_node(nid);
  239. /*
  240. * If sysfs file of new node can't create, cpu on the node
  241. * can't be hot-added. There is no rollback way now.
  242. * So, check by BUG_ON() to catch it reluctantly..
  243. */
  244. BUG_ON(ret);
  245. }
  246. return ret;
  247. error:
  248. /* rollback pgdat allocation and others */
  249. if (new_pgdat)
  250. rollback_node_hotadd(nid, pgdat);
  251. if (res)
  252. release_memory_resource(res);
  253. return ret;
  254. }
  255. EXPORT_SYMBOL_GPL(add_memory);