memory_hotplug.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. /*
  2. * linux/mm/memory_hotplug.c
  3. *
  4. * Copyright (C)
  5. */
  6. #include <linux/stddef.h>
  7. #include <linux/mm.h>
  8. #include <linux/swap.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/bootmem.h>
  12. #include <linux/compiler.h>
  13. #include <linux/export.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/writeback.h>
  16. #include <linux/slab.h>
  17. #include <linux/sysctl.h>
  18. #include <linux/cpu.h>
  19. #include <linux/memory.h>
  20. #include <linux/memory_hotplug.h>
  21. #include <linux/highmem.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/ioport.h>
  24. #include <linux/delay.h>
  25. #include <linux/migrate.h>
  26. #include <linux/page-isolation.h>
  27. #include <linux/pfn.h>
  28. #include <linux/suspend.h>
  29. #include <linux/mm_inline.h>
  30. #include <linux/firmware-map.h>
  31. #include <asm/tlbflush.h>
  32. #include "internal.h"
  33. /*
  34. * online_page_callback contains pointer to current page onlining function.
  35. * Initially it is generic_online_page(). If it is required it could be
  36. * changed by calling set_online_page_callback() for callback registration
  37. * and restore_online_page_callback() for generic callback restore.
  38. */
  39. static void generic_online_page(struct page *page);
  40. static online_page_callback_t online_page_callback = generic_online_page;
  41. DEFINE_MUTEX(mem_hotplug_mutex);
  42. void lock_memory_hotplug(void)
  43. {
  44. mutex_lock(&mem_hotplug_mutex);
  45. /* for exclusive hibernation if CONFIG_HIBERNATION=y */
  46. lock_system_sleep();
  47. }
  48. void unlock_memory_hotplug(void)
  49. {
  50. unlock_system_sleep();
  51. mutex_unlock(&mem_hotplug_mutex);
  52. }
  53. /* add this memory to iomem resource */
  54. static struct resource *register_memory_resource(u64 start, u64 size)
  55. {
  56. struct resource *res;
  57. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  58. BUG_ON(!res);
  59. res->name = "System RAM";
  60. res->start = start;
  61. res->end = start + size - 1;
  62. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  63. if (request_resource(&iomem_resource, res) < 0) {
  64. printk("System RAM resource %pR cannot be added\n", res);
  65. kfree(res);
  66. res = NULL;
  67. }
  68. return res;
  69. }
  70. static void release_memory_resource(struct resource *res)
  71. {
  72. if (!res)
  73. return;
  74. release_resource(res);
  75. kfree(res);
  76. return;
  77. }
  78. #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
  79. #ifndef CONFIG_SPARSEMEM_VMEMMAP
  80. static void get_page_bootmem(unsigned long info, struct page *page,
  81. unsigned long type)
  82. {
  83. page->lru.next = (struct list_head *) type;
  84. SetPagePrivate(page);
  85. set_page_private(page, info);
  86. atomic_inc(&page->_count);
  87. }
  88. /* reference to __meminit __free_pages_bootmem is valid
  89. * so use __ref to tell modpost not to generate a warning */
  90. void __ref put_page_bootmem(struct page *page)
  91. {
  92. unsigned long type;
  93. type = (unsigned long) page->lru.next;
  94. BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
  95. type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
  96. if (atomic_dec_return(&page->_count) == 1) {
  97. ClearPagePrivate(page);
  98. set_page_private(page, 0);
  99. INIT_LIST_HEAD(&page->lru);
  100. __free_pages_bootmem(page, 0);
  101. }
  102. }
  103. static void register_page_bootmem_info_section(unsigned long start_pfn)
  104. {
  105. unsigned long *usemap, mapsize, section_nr, i;
  106. struct mem_section *ms;
  107. struct page *page, *memmap;
  108. section_nr = pfn_to_section_nr(start_pfn);
  109. ms = __nr_to_section(section_nr);
  110. /* Get section's memmap address */
  111. memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
  112. /*
  113. * Get page for the memmap's phys address
  114. * XXX: need more consideration for sparse_vmemmap...
  115. */
  116. page = virt_to_page(memmap);
  117. mapsize = sizeof(struct page) * PAGES_PER_SECTION;
  118. mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
  119. /* remember memmap's page */
  120. for (i = 0; i < mapsize; i++, page++)
  121. get_page_bootmem(section_nr, page, SECTION_INFO);
  122. usemap = __nr_to_section(section_nr)->pageblock_flags;
  123. page = virt_to_page(usemap);
  124. mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
  125. for (i = 0; i < mapsize; i++, page++)
  126. get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
  127. }
  128. void register_page_bootmem_info_node(struct pglist_data *pgdat)
  129. {
  130. unsigned long i, pfn, end_pfn, nr_pages;
  131. int node = pgdat->node_id;
  132. struct page *page;
  133. struct zone *zone;
  134. nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
  135. page = virt_to_page(pgdat);
  136. for (i = 0; i < nr_pages; i++, page++)
  137. get_page_bootmem(node, page, NODE_INFO);
  138. zone = &pgdat->node_zones[0];
  139. for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
  140. if (zone->wait_table) {
  141. nr_pages = zone->wait_table_hash_nr_entries
  142. * sizeof(wait_queue_head_t);
  143. nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
  144. page = virt_to_page(zone->wait_table);
  145. for (i = 0; i < nr_pages; i++, page++)
  146. get_page_bootmem(node, page, NODE_INFO);
  147. }
  148. }
  149. pfn = pgdat->node_start_pfn;
  150. end_pfn = pfn + pgdat->node_spanned_pages;
  151. /* register_section info */
  152. for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  153. /*
  154. * Some platforms can assign the same pfn to multiple nodes - on
  155. * node0 as well as nodeN. To avoid registering a pfn against
  156. * multiple nodes we check that this pfn does not already
  157. * reside in some other node.
  158. */
  159. if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
  160. register_page_bootmem_info_section(pfn);
  161. }
  162. }
  163. #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  164. static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
  165. unsigned long end_pfn)
  166. {
  167. unsigned long old_zone_end_pfn;
  168. zone_span_writelock(zone);
  169. old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
  170. if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn)
  171. zone->zone_start_pfn = start_pfn;
  172. zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
  173. zone->zone_start_pfn;
  174. zone_span_writeunlock(zone);
  175. }
  176. static void resize_zone(struct zone *zone, unsigned long start_pfn,
  177. unsigned long end_pfn)
  178. {
  179. zone_span_writelock(zone);
  180. if (end_pfn - start_pfn) {
  181. zone->zone_start_pfn = start_pfn;
  182. zone->spanned_pages = end_pfn - start_pfn;
  183. } else {
  184. /*
  185. * make it consist as free_area_init_core(),
  186. * if spanned_pages = 0, then keep start_pfn = 0
  187. */
  188. zone->zone_start_pfn = 0;
  189. zone->spanned_pages = 0;
  190. }
  191. zone_span_writeunlock(zone);
  192. }
  193. static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
  194. unsigned long end_pfn)
  195. {
  196. enum zone_type zid = zone_idx(zone);
  197. int nid = zone->zone_pgdat->node_id;
  198. unsigned long pfn;
  199. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  200. set_page_links(pfn_to_page(pfn), zid, nid, pfn);
  201. }
  202. static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
  203. unsigned long start_pfn, unsigned long end_pfn)
  204. {
  205. int ret;
  206. unsigned long flags;
  207. unsigned long z1_start_pfn;
  208. if (!z1->wait_table) {
  209. ret = init_currently_empty_zone(z1, start_pfn,
  210. end_pfn - start_pfn, MEMMAP_HOTPLUG);
  211. if (ret)
  212. return ret;
  213. }
  214. pgdat_resize_lock(z1->zone_pgdat, &flags);
  215. /* can't move pfns which are higher than @z2 */
  216. if (end_pfn > z2->zone_start_pfn + z2->spanned_pages)
  217. goto out_fail;
  218. /* the move out part mast at the left most of @z2 */
  219. if (start_pfn > z2->zone_start_pfn)
  220. goto out_fail;
  221. /* must included/overlap */
  222. if (end_pfn <= z2->zone_start_pfn)
  223. goto out_fail;
  224. /* use start_pfn for z1's start_pfn if z1 is empty */
  225. if (z1->spanned_pages)
  226. z1_start_pfn = z1->zone_start_pfn;
  227. else
  228. z1_start_pfn = start_pfn;
  229. resize_zone(z1, z1_start_pfn, end_pfn);
  230. resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages);
  231. pgdat_resize_unlock(z1->zone_pgdat, &flags);
  232. fix_zone_id(z1, start_pfn, end_pfn);
  233. return 0;
  234. out_fail:
  235. pgdat_resize_unlock(z1->zone_pgdat, &flags);
  236. return -1;
  237. }
  238. static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
  239. unsigned long start_pfn, unsigned long end_pfn)
  240. {
  241. int ret;
  242. unsigned long flags;
  243. unsigned long z2_end_pfn;
  244. if (!z2->wait_table) {
  245. ret = init_currently_empty_zone(z2, start_pfn,
  246. end_pfn - start_pfn, MEMMAP_HOTPLUG);
  247. if (ret)
  248. return ret;
  249. }
  250. pgdat_resize_lock(z1->zone_pgdat, &flags);
  251. /* can't move pfns which are lower than @z1 */
  252. if (z1->zone_start_pfn > start_pfn)
  253. goto out_fail;
  254. /* the move out part mast at the right most of @z1 */
  255. if (z1->zone_start_pfn + z1->spanned_pages > end_pfn)
  256. goto out_fail;
  257. /* must included/overlap */
  258. if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages)
  259. goto out_fail;
  260. /* use end_pfn for z2's end_pfn if z2 is empty */
  261. if (z2->spanned_pages)
  262. z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages;
  263. else
  264. z2_end_pfn = end_pfn;
  265. resize_zone(z1, z1->zone_start_pfn, start_pfn);
  266. resize_zone(z2, start_pfn, z2_end_pfn);
  267. pgdat_resize_unlock(z1->zone_pgdat, &flags);
  268. fix_zone_id(z2, start_pfn, end_pfn);
  269. return 0;
  270. out_fail:
  271. pgdat_resize_unlock(z1->zone_pgdat, &flags);
  272. return -1;
  273. }
  274. static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
  275. unsigned long end_pfn)
  276. {
  277. unsigned long old_pgdat_end_pfn =
  278. pgdat->node_start_pfn + pgdat->node_spanned_pages;
  279. if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
  280. pgdat->node_start_pfn = start_pfn;
  281. pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
  282. pgdat->node_start_pfn;
  283. }
  284. static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
  285. {
  286. struct pglist_data *pgdat = zone->zone_pgdat;
  287. int nr_pages = PAGES_PER_SECTION;
  288. int nid = pgdat->node_id;
  289. int zone_type;
  290. unsigned long flags;
  291. zone_type = zone - pgdat->node_zones;
  292. if (!zone->wait_table) {
  293. int ret;
  294. ret = init_currently_empty_zone(zone, phys_start_pfn,
  295. nr_pages, MEMMAP_HOTPLUG);
  296. if (ret)
  297. return ret;
  298. }
  299. pgdat_resize_lock(zone->zone_pgdat, &flags);
  300. grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
  301. grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
  302. phys_start_pfn + nr_pages);
  303. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  304. memmap_init_zone(nr_pages, nid, zone_type,
  305. phys_start_pfn, MEMMAP_HOTPLUG);
  306. return 0;
  307. }
  308. static int __meminit __add_section(int nid, struct zone *zone,
  309. unsigned long phys_start_pfn)
  310. {
  311. int nr_pages = PAGES_PER_SECTION;
  312. int ret;
  313. if (pfn_valid(phys_start_pfn))
  314. return -EEXIST;
  315. ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
  316. if (ret < 0)
  317. return ret;
  318. ret = __add_zone(zone, phys_start_pfn);
  319. if (ret < 0)
  320. return ret;
  321. return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
  322. }
  323. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  324. static int __remove_section(struct zone *zone, struct mem_section *ms)
  325. {
  326. /*
  327. * XXX: Freeing memmap with vmemmap is not implement yet.
  328. * This should be removed later.
  329. */
  330. return -EBUSY;
  331. }
  332. #else
  333. static int __remove_section(struct zone *zone, struct mem_section *ms)
  334. {
  335. unsigned long flags;
  336. struct pglist_data *pgdat = zone->zone_pgdat;
  337. int ret = -EINVAL;
  338. if (!valid_section(ms))
  339. return ret;
  340. ret = unregister_memory_section(ms);
  341. if (ret)
  342. return ret;
  343. pgdat_resize_lock(pgdat, &flags);
  344. sparse_remove_one_section(zone, ms);
  345. pgdat_resize_unlock(pgdat, &flags);
  346. return 0;
  347. }
  348. #endif
  349. /*
  350. * Reasonably generic function for adding memory. It is
  351. * expected that archs that support memory hotplug will
  352. * call this function after deciding the zone to which to
  353. * add the new pages.
  354. */
  355. int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
  356. unsigned long nr_pages)
  357. {
  358. unsigned long i;
  359. int err = 0;
  360. int start_sec, end_sec;
  361. /* during initialize mem_map, align hot-added range to section */
  362. start_sec = pfn_to_section_nr(phys_start_pfn);
  363. end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
  364. for (i = start_sec; i <= end_sec; i++) {
  365. err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
  366. /*
  367. * EEXIST is finally dealt with by ioresource collision
  368. * check. see add_memory() => register_memory_resource()
  369. * Warning will be printed if there is collision.
  370. */
  371. if (err && (err != -EEXIST))
  372. break;
  373. err = 0;
  374. }
  375. return err;
  376. }
  377. EXPORT_SYMBOL_GPL(__add_pages);
  378. /**
  379. * __remove_pages() - remove sections of pages from a zone
  380. * @zone: zone from which pages need to be removed
  381. * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
  382. * @nr_pages: number of pages to remove (must be multiple of section size)
  383. *
  384. * Generic helper function to remove section mappings and sysfs entries
  385. * for the section of the memory we are removing. Caller needs to make
  386. * sure that pages are marked reserved and zones are adjust properly by
  387. * calling offline_pages().
  388. */
  389. int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
  390. unsigned long nr_pages)
  391. {
  392. unsigned long i, ret = 0;
  393. int sections_to_remove;
  394. /*
  395. * We can only remove entire sections
  396. */
  397. BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
  398. BUG_ON(nr_pages % PAGES_PER_SECTION);
  399. release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE);
  400. sections_to_remove = nr_pages / PAGES_PER_SECTION;
  401. for (i = 0; i < sections_to_remove; i++) {
  402. unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
  403. ret = __remove_section(zone, __pfn_to_section(pfn));
  404. if (ret)
  405. break;
  406. }
  407. return ret;
  408. }
  409. EXPORT_SYMBOL_GPL(__remove_pages);
  410. int set_online_page_callback(online_page_callback_t callback)
  411. {
  412. int rc = -EINVAL;
  413. lock_memory_hotplug();
  414. if (online_page_callback == generic_online_page) {
  415. online_page_callback = callback;
  416. rc = 0;
  417. }
  418. unlock_memory_hotplug();
  419. return rc;
  420. }
  421. EXPORT_SYMBOL_GPL(set_online_page_callback);
  422. int restore_online_page_callback(online_page_callback_t callback)
  423. {
  424. int rc = -EINVAL;
  425. lock_memory_hotplug();
  426. if (online_page_callback == callback) {
  427. online_page_callback = generic_online_page;
  428. rc = 0;
  429. }
  430. unlock_memory_hotplug();
  431. return rc;
  432. }
  433. EXPORT_SYMBOL_GPL(restore_online_page_callback);
  434. void __online_page_set_limits(struct page *page)
  435. {
  436. unsigned long pfn = page_to_pfn(page);
  437. if (pfn >= num_physpages)
  438. num_physpages = pfn + 1;
  439. }
  440. EXPORT_SYMBOL_GPL(__online_page_set_limits);
  441. void __online_page_increment_counters(struct page *page)
  442. {
  443. totalram_pages++;
  444. #ifdef CONFIG_HIGHMEM
  445. if (PageHighMem(page))
  446. totalhigh_pages++;
  447. #endif
  448. }
  449. EXPORT_SYMBOL_GPL(__online_page_increment_counters);
  450. void __online_page_free(struct page *page)
  451. {
  452. ClearPageReserved(page);
  453. init_page_count(page);
  454. __free_page(page);
  455. }
  456. EXPORT_SYMBOL_GPL(__online_page_free);
  457. static void generic_online_page(struct page *page)
  458. {
  459. __online_page_set_limits(page);
  460. __online_page_increment_counters(page);
  461. __online_page_free(page);
  462. }
  463. static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
  464. void *arg)
  465. {
  466. unsigned long i;
  467. unsigned long onlined_pages = *(unsigned long *)arg;
  468. struct page *page;
  469. if (PageReserved(pfn_to_page(start_pfn)))
  470. for (i = 0; i < nr_pages; i++) {
  471. page = pfn_to_page(start_pfn + i);
  472. (*online_page_callback)(page);
  473. onlined_pages++;
  474. }
  475. *(unsigned long *)arg = onlined_pages;
  476. return 0;
  477. }
  478. /* check which state of node_states will be changed when online memory */
  479. static void node_states_check_changes_online(unsigned long nr_pages,
  480. struct zone *zone, struct memory_notify *arg)
  481. {
  482. int nid = zone_to_nid(zone);
  483. enum zone_type zone_last = ZONE_NORMAL;
  484. /*
  485. * If we have HIGHMEM, node_states[N_NORMAL_MEMORY] contains nodes
  486. * which have 0...ZONE_NORMAL, set zone_last to ZONE_NORMAL.
  487. *
  488. * If we don't have HIGHMEM, node_states[N_NORMAL_MEMORY] contains nodes
  489. * which have 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
  490. */
  491. if (N_HIGH_MEMORY == N_NORMAL_MEMORY)
  492. zone_last = ZONE_MOVABLE;
  493. /*
  494. * if the memory to be online is in a zone of 0...zone_last, and
  495. * the zones of 0...zone_last don't have memory before online, we will
  496. * need to set the node to node_states[N_NORMAL_MEMORY] after
  497. * the memory is online.
  498. */
  499. if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
  500. arg->status_change_nid_normal = nid;
  501. else
  502. arg->status_change_nid_normal = -1;
  503. /*
  504. * if the node don't have memory befor online, we will need to
  505. * set the node to node_states[N_HIGH_MEMORY] after the memory
  506. * is online.
  507. */
  508. if (!node_state(nid, N_HIGH_MEMORY))
  509. arg->status_change_nid = nid;
  510. else
  511. arg->status_change_nid = -1;
  512. }
  513. static void node_states_set_node(int node, struct memory_notify *arg)
  514. {
  515. if (arg->status_change_nid_normal >= 0)
  516. node_set_state(node, N_NORMAL_MEMORY);
  517. node_set_state(node, N_HIGH_MEMORY);
  518. }
  519. int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
  520. {
  521. unsigned long onlined_pages = 0;
  522. struct zone *zone;
  523. int need_zonelists_rebuild = 0;
  524. int nid;
  525. int ret;
  526. struct memory_notify arg;
  527. lock_memory_hotplug();
  528. /*
  529. * This doesn't need a lock to do pfn_to_page().
  530. * The section can't be removed here because of the
  531. * memory_block->state_mutex.
  532. */
  533. zone = page_zone(pfn_to_page(pfn));
  534. if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) {
  535. if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) {
  536. unlock_memory_hotplug();
  537. return -1;
  538. }
  539. }
  540. if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) {
  541. if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) {
  542. unlock_memory_hotplug();
  543. return -1;
  544. }
  545. }
  546. /* Previous code may changed the zone of the pfn range */
  547. zone = page_zone(pfn_to_page(pfn));
  548. arg.start_pfn = pfn;
  549. arg.nr_pages = nr_pages;
  550. node_states_check_changes_online(nr_pages, zone, &arg);
  551. nid = page_to_nid(pfn_to_page(pfn));
  552. ret = memory_notify(MEM_GOING_ONLINE, &arg);
  553. ret = notifier_to_errno(ret);
  554. if (ret) {
  555. memory_notify(MEM_CANCEL_ONLINE, &arg);
  556. unlock_memory_hotplug();
  557. return ret;
  558. }
  559. /*
  560. * If this zone is not populated, then it is not in zonelist.
  561. * This means the page allocator ignores this zone.
  562. * So, zonelist must be updated after online.
  563. */
  564. mutex_lock(&zonelists_mutex);
  565. if (!populated_zone(zone)) {
  566. need_zonelists_rebuild = 1;
  567. build_all_zonelists(NULL, zone);
  568. }
  569. ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
  570. online_pages_range);
  571. if (ret) {
  572. if (need_zonelists_rebuild)
  573. zone_pcp_reset(zone);
  574. mutex_unlock(&zonelists_mutex);
  575. printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
  576. (unsigned long long) pfn << PAGE_SHIFT,
  577. (((unsigned long long) pfn + nr_pages)
  578. << PAGE_SHIFT) - 1);
  579. memory_notify(MEM_CANCEL_ONLINE, &arg);
  580. unlock_memory_hotplug();
  581. return ret;
  582. }
  583. zone->present_pages += onlined_pages;
  584. zone->zone_pgdat->node_present_pages += onlined_pages;
  585. if (onlined_pages) {
  586. node_states_set_node(zone_to_nid(zone), &arg);
  587. if (need_zonelists_rebuild)
  588. build_all_zonelists(NULL, NULL);
  589. else
  590. zone_pcp_update(zone);
  591. }
  592. mutex_unlock(&zonelists_mutex);
  593. init_per_zone_wmark_min();
  594. if (onlined_pages)
  595. kswapd_run(zone_to_nid(zone));
  596. vm_total_pages = nr_free_pagecache_pages();
  597. writeback_set_ratelimit();
  598. if (onlined_pages)
  599. memory_notify(MEM_ONLINE, &arg);
  600. unlock_memory_hotplug();
  601. return 0;
  602. }
  603. #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
  604. /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
  605. static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
  606. {
  607. struct pglist_data *pgdat;
  608. unsigned long zones_size[MAX_NR_ZONES] = {0};
  609. unsigned long zholes_size[MAX_NR_ZONES] = {0};
  610. unsigned long start_pfn = start >> PAGE_SHIFT;
  611. pgdat = arch_alloc_nodedata(nid);
  612. if (!pgdat)
  613. return NULL;
  614. arch_refresh_nodedata(nid, pgdat);
  615. /* we can use NODE_DATA(nid) from here */
  616. /* init node's zones as empty zones, we don't have any present pages.*/
  617. free_area_init_node(nid, zones_size, start_pfn, zholes_size);
  618. /*
  619. * The node we allocated has no zone fallback lists. For avoiding
  620. * to access not-initialized zonelist, build here.
  621. */
  622. mutex_lock(&zonelists_mutex);
  623. build_all_zonelists(pgdat, NULL);
  624. mutex_unlock(&zonelists_mutex);
  625. return pgdat;
  626. }
  627. static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
  628. {
  629. arch_refresh_nodedata(nid, NULL);
  630. arch_free_nodedata(pgdat);
  631. return;
  632. }
  633. /*
  634. * called by cpu_up() to online a node without onlined memory.
  635. */
  636. int mem_online_node(int nid)
  637. {
  638. pg_data_t *pgdat;
  639. int ret;
  640. lock_memory_hotplug();
  641. pgdat = hotadd_new_pgdat(nid, 0);
  642. if (!pgdat) {
  643. ret = -ENOMEM;
  644. goto out;
  645. }
  646. node_set_online(nid);
  647. ret = register_one_node(nid);
  648. BUG_ON(ret);
  649. out:
  650. unlock_memory_hotplug();
  651. return ret;
  652. }
  653. /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
  654. int __ref add_memory(int nid, u64 start, u64 size)
  655. {
  656. pg_data_t *pgdat = NULL;
  657. int new_pgdat = 0;
  658. struct resource *res;
  659. int ret;
  660. lock_memory_hotplug();
  661. res = register_memory_resource(start, size);
  662. ret = -EEXIST;
  663. if (!res)
  664. goto out;
  665. if (!node_online(nid)) {
  666. pgdat = hotadd_new_pgdat(nid, start);
  667. ret = -ENOMEM;
  668. if (!pgdat)
  669. goto error;
  670. new_pgdat = 1;
  671. }
  672. /* call arch's memory hotadd */
  673. ret = arch_add_memory(nid, start, size);
  674. if (ret < 0)
  675. goto error;
  676. /* we online node here. we can't roll back from here. */
  677. node_set_online(nid);
  678. if (new_pgdat) {
  679. ret = register_one_node(nid);
  680. /*
  681. * If sysfs file of new node can't create, cpu on the node
  682. * can't be hot-added. There is no rollback way now.
  683. * So, check by BUG_ON() to catch it reluctantly..
  684. */
  685. BUG_ON(ret);
  686. }
  687. /* create new memmap entry */
  688. firmware_map_add_hotplug(start, start + size, "System RAM");
  689. goto out;
  690. error:
  691. /* rollback pgdat allocation and others */
  692. if (new_pgdat)
  693. rollback_node_hotadd(nid, pgdat);
  694. if (res)
  695. release_memory_resource(res);
  696. out:
  697. unlock_memory_hotplug();
  698. return ret;
  699. }
  700. EXPORT_SYMBOL_GPL(add_memory);
  701. #ifdef CONFIG_MEMORY_HOTREMOVE
  702. /*
  703. * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
  704. * set and the size of the free page is given by page_order(). Using this,
  705. * the function determines if the pageblock contains only free pages.
  706. * Due to buddy contraints, a free page at least the size of a pageblock will
  707. * be located at the start of the pageblock
  708. */
  709. static inline int pageblock_free(struct page *page)
  710. {
  711. return PageBuddy(page) && page_order(page) >= pageblock_order;
  712. }
  713. /* Return the start of the next active pageblock after a given page */
  714. static struct page *next_active_pageblock(struct page *page)
  715. {
  716. /* Ensure the starting page is pageblock-aligned */
  717. BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
  718. /* If the entire pageblock is free, move to the end of free page */
  719. if (pageblock_free(page)) {
  720. int order;
  721. /* be careful. we don't have locks, page_order can be changed.*/
  722. order = page_order(page);
  723. if ((order < MAX_ORDER) && (order >= pageblock_order))
  724. return page + (1 << order);
  725. }
  726. return page + pageblock_nr_pages;
  727. }
  728. /* Checks if this range of memory is likely to be hot-removable. */
  729. int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
  730. {
  731. struct page *page = pfn_to_page(start_pfn);
  732. struct page *end_page = page + nr_pages;
  733. /* Check the starting page of each pageblock within the range */
  734. for (; page < end_page; page = next_active_pageblock(page)) {
  735. if (!is_pageblock_removable_nolock(page))
  736. return 0;
  737. cond_resched();
  738. }
  739. /* All pageblocks in the memory block are likely to be hot-removable */
  740. return 1;
  741. }
  742. /*
  743. * Confirm all pages in a range [start, end) is belongs to the same zone.
  744. */
  745. static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
  746. {
  747. unsigned long pfn;
  748. struct zone *zone = NULL;
  749. struct page *page;
  750. int i;
  751. for (pfn = start_pfn;
  752. pfn < end_pfn;
  753. pfn += MAX_ORDER_NR_PAGES) {
  754. i = 0;
  755. /* This is just a CONFIG_HOLES_IN_ZONE check.*/
  756. while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
  757. i++;
  758. if (i == MAX_ORDER_NR_PAGES)
  759. continue;
  760. page = pfn_to_page(pfn + i);
  761. if (zone && page_zone(page) != zone)
  762. return 0;
  763. zone = page_zone(page);
  764. }
  765. return 1;
  766. }
  767. /*
  768. * Scanning pfn is much easier than scanning lru list.
  769. * Scan pfn from start to end and Find LRU page.
  770. */
  771. static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
  772. {
  773. unsigned long pfn;
  774. struct page *page;
  775. for (pfn = start; pfn < end; pfn++) {
  776. if (pfn_valid(pfn)) {
  777. page = pfn_to_page(pfn);
  778. if (PageLRU(page))
  779. return pfn;
  780. }
  781. }
  782. return 0;
  783. }
  784. #define NR_OFFLINE_AT_ONCE_PAGES (256)
  785. static int
  786. do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
  787. {
  788. unsigned long pfn;
  789. struct page *page;
  790. int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
  791. int not_managed = 0;
  792. int ret = 0;
  793. LIST_HEAD(source);
  794. for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
  795. if (!pfn_valid(pfn))
  796. continue;
  797. page = pfn_to_page(pfn);
  798. if (!get_page_unless_zero(page))
  799. continue;
  800. /*
  801. * We can skip free pages. And we can only deal with pages on
  802. * LRU.
  803. */
  804. ret = isolate_lru_page(page);
  805. if (!ret) { /* Success */
  806. put_page(page);
  807. list_add_tail(&page->lru, &source);
  808. move_pages--;
  809. inc_zone_page_state(page, NR_ISOLATED_ANON +
  810. page_is_file_cache(page));
  811. } else {
  812. #ifdef CONFIG_DEBUG_VM
  813. printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
  814. pfn);
  815. dump_page(page);
  816. #endif
  817. put_page(page);
  818. /* Because we don't have big zone->lock. we should
  819. check this again here. */
  820. if (page_count(page)) {
  821. not_managed++;
  822. ret = -EBUSY;
  823. break;
  824. }
  825. }
  826. }
  827. if (!list_empty(&source)) {
  828. if (not_managed) {
  829. putback_lru_pages(&source);
  830. goto out;
  831. }
  832. /*
  833. * alloc_migrate_target should be improooooved!!
  834. * migrate_pages returns # of failed pages.
  835. */
  836. ret = migrate_pages(&source, alloc_migrate_target, 0,
  837. true, MIGRATE_SYNC);
  838. if (ret)
  839. putback_lru_pages(&source);
  840. }
  841. out:
  842. return ret;
  843. }
  844. /*
  845. * remove from free_area[] and mark all as Reserved.
  846. */
  847. static int
  848. offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
  849. void *data)
  850. {
  851. __offline_isolated_pages(start, start + nr_pages);
  852. return 0;
  853. }
  854. static void
  855. offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  856. {
  857. walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
  858. offline_isolated_pages_cb);
  859. }
  860. /*
  861. * Check all pages in range, recoreded as memory resource, are isolated.
  862. */
  863. static int
  864. check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
  865. void *data)
  866. {
  867. int ret;
  868. long offlined = *(long *)data;
  869. ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
  870. offlined = nr_pages;
  871. if (!ret)
  872. *(long *)data += offlined;
  873. return ret;
  874. }
  875. static long
  876. check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
  877. {
  878. long offlined = 0;
  879. int ret;
  880. ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
  881. check_pages_isolated_cb);
  882. if (ret < 0)
  883. offlined = (long)ret;
  884. return offlined;
  885. }
  886. /* check which state of node_states will be changed when offline memory */
  887. static void node_states_check_changes_offline(unsigned long nr_pages,
  888. struct zone *zone, struct memory_notify *arg)
  889. {
  890. struct pglist_data *pgdat = zone->zone_pgdat;
  891. unsigned long present_pages = 0;
  892. enum zone_type zt, zone_last = ZONE_NORMAL;
  893. /*
  894. * If we have HIGHMEM, node_states[N_NORMAL_MEMORY] contains nodes
  895. * which have 0...ZONE_NORMAL, set zone_last to ZONE_NORMAL.
  896. *
  897. * If we don't have HIGHMEM, node_states[N_NORMAL_MEMORY] contains nodes
  898. * which have 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
  899. */
  900. if (N_HIGH_MEMORY == N_NORMAL_MEMORY)
  901. zone_last = ZONE_MOVABLE;
  902. /*
  903. * check whether node_states[N_NORMAL_MEMORY] will be changed.
  904. * If the memory to be offline is in a zone of 0...zone_last,
  905. * and it is the last present memory, 0...zone_last will
  906. * become empty after offline , thus we can determind we will
  907. * need to clear the node from node_states[N_NORMAL_MEMORY].
  908. */
  909. for (zt = 0; zt <= zone_last; zt++)
  910. present_pages += pgdat->node_zones[zt].present_pages;
  911. if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
  912. arg->status_change_nid_normal = zone_to_nid(zone);
  913. else
  914. arg->status_change_nid_normal = -1;
  915. /*
  916. * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
  917. */
  918. zone_last = ZONE_MOVABLE;
  919. /*
  920. * check whether node_states[N_HIGH_MEMORY] will be changed
  921. * If we try to offline the last present @nr_pages from the node,
  922. * we can determind we will need to clear the node from
  923. * node_states[N_HIGH_MEMORY].
  924. */
  925. for (; zt <= zone_last; zt++)
  926. present_pages += pgdat->node_zones[zt].present_pages;
  927. if (nr_pages >= present_pages)
  928. arg->status_change_nid = zone_to_nid(zone);
  929. else
  930. arg->status_change_nid = -1;
  931. }
  932. static void node_states_clear_node(int node, struct memory_notify *arg)
  933. {
  934. if (arg->status_change_nid_normal >= 0)
  935. node_clear_state(node, N_NORMAL_MEMORY);
  936. if ((N_HIGH_MEMORY != N_NORMAL_MEMORY) &&
  937. (arg->status_change_nid >= 0))
  938. node_clear_state(node, N_HIGH_MEMORY);
  939. }
  940. static int __ref __offline_pages(unsigned long start_pfn,
  941. unsigned long end_pfn, unsigned long timeout)
  942. {
  943. unsigned long pfn, nr_pages, expire;
  944. long offlined_pages;
  945. int ret, drain, retry_max, node;
  946. struct zone *zone;
  947. struct memory_notify arg;
  948. BUG_ON(start_pfn >= end_pfn);
  949. /* at least, alignment against pageblock is necessary */
  950. if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
  951. return -EINVAL;
  952. if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
  953. return -EINVAL;
  954. /* This makes hotplug much easier...and readable.
  955. we assume this for now. .*/
  956. if (!test_pages_in_a_zone(start_pfn, end_pfn))
  957. return -EINVAL;
  958. lock_memory_hotplug();
  959. zone = page_zone(pfn_to_page(start_pfn));
  960. node = zone_to_nid(zone);
  961. nr_pages = end_pfn - start_pfn;
  962. /* set above range as isolated */
  963. ret = start_isolate_page_range(start_pfn, end_pfn,
  964. MIGRATE_MOVABLE, true);
  965. if (ret)
  966. goto out;
  967. arg.start_pfn = start_pfn;
  968. arg.nr_pages = nr_pages;
  969. node_states_check_changes_offline(nr_pages, zone, &arg);
  970. ret = memory_notify(MEM_GOING_OFFLINE, &arg);
  971. ret = notifier_to_errno(ret);
  972. if (ret)
  973. goto failed_removal;
  974. pfn = start_pfn;
  975. expire = jiffies + timeout;
  976. drain = 0;
  977. retry_max = 5;
  978. repeat:
  979. /* start memory hot removal */
  980. ret = -EAGAIN;
  981. if (time_after(jiffies, expire))
  982. goto failed_removal;
  983. ret = -EINTR;
  984. if (signal_pending(current))
  985. goto failed_removal;
  986. ret = 0;
  987. if (drain) {
  988. lru_add_drain_all();
  989. cond_resched();
  990. drain_all_pages();
  991. }
  992. pfn = scan_lru_pages(start_pfn, end_pfn);
  993. if (pfn) { /* We have page on LRU */
  994. ret = do_migrate_range(pfn, end_pfn);
  995. if (!ret) {
  996. drain = 1;
  997. goto repeat;
  998. } else {
  999. if (ret < 0)
  1000. if (--retry_max == 0)
  1001. goto failed_removal;
  1002. yield();
  1003. drain = 1;
  1004. goto repeat;
  1005. }
  1006. }
  1007. /* drain all zone's lru pagevec, this is asyncronous... */
  1008. lru_add_drain_all();
  1009. yield();
  1010. /* drain pcp pages , this is synchrouns. */
  1011. drain_all_pages();
  1012. /* check again */
  1013. offlined_pages = check_pages_isolated(start_pfn, end_pfn);
  1014. if (offlined_pages < 0) {
  1015. ret = -EBUSY;
  1016. goto failed_removal;
  1017. }
  1018. printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
  1019. /* Ok, all of our target is islaoted.
  1020. We cannot do rollback at this point. */
  1021. offline_isolated_pages(start_pfn, end_pfn);
  1022. /* reset pagetype flags and makes migrate type to be MOVABLE */
  1023. undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
  1024. /* removal success */
  1025. zone->present_pages -= offlined_pages;
  1026. zone->zone_pgdat->node_present_pages -= offlined_pages;
  1027. totalram_pages -= offlined_pages;
  1028. init_per_zone_wmark_min();
  1029. if (!populated_zone(zone)) {
  1030. zone_pcp_reset(zone);
  1031. mutex_lock(&zonelists_mutex);
  1032. build_all_zonelists(NULL, NULL);
  1033. mutex_unlock(&zonelists_mutex);
  1034. } else
  1035. zone_pcp_update(zone);
  1036. node_states_clear_node(node, &arg);
  1037. if (arg.status_change_nid >= 0)
  1038. kswapd_stop(node);
  1039. vm_total_pages = nr_free_pagecache_pages();
  1040. writeback_set_ratelimit();
  1041. memory_notify(MEM_OFFLINE, &arg);
  1042. unlock_memory_hotplug();
  1043. return 0;
  1044. failed_removal:
  1045. printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
  1046. (unsigned long long) start_pfn << PAGE_SHIFT,
  1047. ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
  1048. memory_notify(MEM_CANCEL_OFFLINE, &arg);
  1049. /* pushback to free area */
  1050. undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
  1051. out:
  1052. unlock_memory_hotplug();
  1053. return ret;
  1054. }
  1055. int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
  1056. {
  1057. return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
  1058. }
  1059. int remove_memory(u64 start, u64 size)
  1060. {
  1061. struct memory_block *mem = NULL;
  1062. struct mem_section *section;
  1063. unsigned long start_pfn, end_pfn;
  1064. unsigned long pfn, section_nr;
  1065. int ret;
  1066. start_pfn = PFN_DOWN(start);
  1067. end_pfn = start_pfn + PFN_DOWN(size);
  1068. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  1069. section_nr = pfn_to_section_nr(pfn);
  1070. if (!present_section_nr(section_nr))
  1071. continue;
  1072. section = __nr_to_section(section_nr);
  1073. /* same memblock? */
  1074. if (mem)
  1075. if ((section_nr >= mem->start_section_nr) &&
  1076. (section_nr <= mem->end_section_nr))
  1077. continue;
  1078. mem = find_memory_block_hinted(section, mem);
  1079. if (!mem)
  1080. continue;
  1081. ret = offline_memory_block(mem);
  1082. if (ret) {
  1083. kobject_put(&mem->dev.kobj);
  1084. return ret;
  1085. }
  1086. }
  1087. if (mem)
  1088. kobject_put(&mem->dev.kobj);
  1089. return 0;
  1090. }
  1091. #else
  1092. int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
  1093. {
  1094. return -EINVAL;
  1095. }
  1096. int remove_memory(u64 start, u64 size)
  1097. {
  1098. return -EINVAL;
  1099. }
  1100. #endif /* CONFIG_MEMORY_HOTREMOVE */
  1101. EXPORT_SYMBOL_GPL(remove_memory);