memory_hotplug.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. /*
  2. * linux/mm/memory_hotplug.c
  3. *
  4. * Copyright (C)
  5. */
  6. #include <linux/stddef.h>
  7. #include <linux/mm.h>
  8. #include <linux/swap.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/bootmem.h>
  12. #include <linux/compiler.h>
  13. #include <linux/module.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/writeback.h>
  16. #include <linux/slab.h>
  17. #include <linux/sysctl.h>
  18. #include <linux/cpu.h>
  19. #include <linux/memory.h>
  20. #include <linux/memory_hotplug.h>
  21. #include <linux/highmem.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/ioport.h>
  24. #include <linux/delay.h>
  25. #include <linux/migrate.h>
  26. #include <linux/page-isolation.h>
  27. #include <linux/pfn.h>
  28. #include <linux/suspend.h>
  29. #include <linux/mm_inline.h>
  30. #include <linux/firmware-map.h>
  31. #include <asm/tlbflush.h>
  32. #include "internal.h"
  33. /*
  34. * online_page_callback contains pointer to current page onlining function.
  35. * Initially it is generic_online_page(). If it is required it could be
  36. * changed by calling set_online_page_callback() for callback registration
  37. * and restore_online_page_callback() for generic callback restore.
  38. */
  39. static void generic_online_page(struct page *page);
  40. static online_page_callback_t online_page_callback = generic_online_page;
  41. DEFINE_MUTEX(mem_hotplug_mutex);
  42. void lock_memory_hotplug(void)
  43. {
  44. mutex_lock(&mem_hotplug_mutex);
  45. /* for exclusive hibernation if CONFIG_HIBERNATION=y */
  46. lock_system_sleep();
  47. }
  48. void unlock_memory_hotplug(void)
  49. {
  50. unlock_system_sleep();
  51. mutex_unlock(&mem_hotplug_mutex);
  52. }
  53. /* add this memory to iomem resource */
  54. static struct resource *register_memory_resource(u64 start, u64 size)
  55. {
  56. struct resource *res;
  57. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  58. BUG_ON(!res);
  59. res->name = "System RAM";
  60. res->start = start;
  61. res->end = start + size - 1;
  62. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  63. if (request_resource(&iomem_resource, res) < 0) {
  64. printk("System RAM resource %llx - %llx cannot be added\n",
  65. (unsigned long long)res->start, (unsigned long long)res->end);
  66. kfree(res);
  67. res = NULL;
  68. }
  69. return res;
  70. }
  71. static void release_memory_resource(struct resource *res)
  72. {
  73. if (!res)
  74. return;
  75. release_resource(res);
  76. kfree(res);
  77. return;
  78. }
  79. #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
  80. #ifndef CONFIG_SPARSEMEM_VMEMMAP
  81. static void get_page_bootmem(unsigned long info, struct page *page,
  82. unsigned long type)
  83. {
  84. page->lru.next = (struct list_head *) type;
  85. SetPagePrivate(page);
  86. set_page_private(page, info);
  87. atomic_inc(&page->_count);
  88. }
  89. /* reference to __meminit __free_pages_bootmem is valid
  90. * so use __ref to tell modpost not to generate a warning */
  91. void __ref put_page_bootmem(struct page *page)
  92. {
  93. unsigned long type;
  94. type = (unsigned long) page->lru.next;
  95. BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
  96. type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
  97. if (atomic_dec_return(&page->_count) == 1) {
  98. ClearPagePrivate(page);
  99. set_page_private(page, 0);
  100. INIT_LIST_HEAD(&page->lru);
  101. __free_pages_bootmem(page, 0);
  102. }
  103. }
  104. static void register_page_bootmem_info_section(unsigned long start_pfn)
  105. {
  106. unsigned long *usemap, mapsize, section_nr, i;
  107. struct mem_section *ms;
  108. struct page *page, *memmap;
  109. if (!pfn_valid(start_pfn))
  110. return;
  111. section_nr = pfn_to_section_nr(start_pfn);
  112. ms = __nr_to_section(section_nr);
  113. /* Get section's memmap address */
  114. memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
  115. /*
  116. * Get page for the memmap's phys address
  117. * XXX: need more consideration for sparse_vmemmap...
  118. */
  119. page = virt_to_page(memmap);
  120. mapsize = sizeof(struct page) * PAGES_PER_SECTION;
  121. mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
  122. /* remember memmap's page */
  123. for (i = 0; i < mapsize; i++, page++)
  124. get_page_bootmem(section_nr, page, SECTION_INFO);
  125. usemap = __nr_to_section(section_nr)->pageblock_flags;
  126. page = virt_to_page(usemap);
  127. mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
  128. for (i = 0; i < mapsize; i++, page++)
  129. get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
  130. }
  131. void register_page_bootmem_info_node(struct pglist_data *pgdat)
  132. {
  133. unsigned long i, pfn, end_pfn, nr_pages;
  134. int node = pgdat->node_id;
  135. struct page *page;
  136. struct zone *zone;
  137. nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
  138. page = virt_to_page(pgdat);
  139. for (i = 0; i < nr_pages; i++, page++)
  140. get_page_bootmem(node, page, NODE_INFO);
  141. zone = &pgdat->node_zones[0];
  142. for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
  143. if (zone->wait_table) {
  144. nr_pages = zone->wait_table_hash_nr_entries
  145. * sizeof(wait_queue_head_t);
  146. nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
  147. page = virt_to_page(zone->wait_table);
  148. for (i = 0; i < nr_pages; i++, page++)
  149. get_page_bootmem(node, page, NODE_INFO);
  150. }
  151. }
  152. pfn = pgdat->node_start_pfn;
  153. end_pfn = pfn + pgdat->node_spanned_pages;
  154. /* register_section info */
  155. for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
  156. register_page_bootmem_info_section(pfn);
  157. }
  158. #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  159. static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
  160. unsigned long end_pfn)
  161. {
  162. unsigned long old_zone_end_pfn;
  163. zone_span_writelock(zone);
  164. old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
  165. if (start_pfn < zone->zone_start_pfn)
  166. zone->zone_start_pfn = start_pfn;
  167. zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
  168. zone->zone_start_pfn;
  169. zone_span_writeunlock(zone);
  170. }
  171. static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
  172. unsigned long end_pfn)
  173. {
  174. unsigned long old_pgdat_end_pfn =
  175. pgdat->node_start_pfn + pgdat->node_spanned_pages;
  176. if (start_pfn < pgdat->node_start_pfn)
  177. pgdat->node_start_pfn = start_pfn;
  178. pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
  179. pgdat->node_start_pfn;
  180. }
  181. static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
  182. {
  183. struct pglist_data *pgdat = zone->zone_pgdat;
  184. int nr_pages = PAGES_PER_SECTION;
  185. int nid = pgdat->node_id;
  186. int zone_type;
  187. unsigned long flags;
  188. zone_type = zone - pgdat->node_zones;
  189. if (!zone->wait_table) {
  190. int ret;
  191. ret = init_currently_empty_zone(zone, phys_start_pfn,
  192. nr_pages, MEMMAP_HOTPLUG);
  193. if (ret)
  194. return ret;
  195. }
  196. pgdat_resize_lock(zone->zone_pgdat, &flags);
  197. grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
  198. grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
  199. phys_start_pfn + nr_pages);
  200. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  201. memmap_init_zone(nr_pages, nid, zone_type,
  202. phys_start_pfn, MEMMAP_HOTPLUG);
  203. return 0;
  204. }
  205. static int __meminit __add_section(int nid, struct zone *zone,
  206. unsigned long phys_start_pfn)
  207. {
  208. int nr_pages = PAGES_PER_SECTION;
  209. int ret;
  210. if (pfn_valid(phys_start_pfn))
  211. return -EEXIST;
  212. ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
  213. if (ret < 0)
  214. return ret;
  215. ret = __add_zone(zone, phys_start_pfn);
  216. if (ret < 0)
  217. return ret;
  218. return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
  219. }
  220. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  221. static int __remove_section(struct zone *zone, struct mem_section *ms)
  222. {
  223. /*
  224. * XXX: Freeing memmap with vmemmap is not implement yet.
  225. * This should be removed later.
  226. */
  227. return -EBUSY;
  228. }
  229. #else
  230. static int __remove_section(struct zone *zone, struct mem_section *ms)
  231. {
  232. unsigned long flags;
  233. struct pglist_data *pgdat = zone->zone_pgdat;
  234. int ret = -EINVAL;
  235. if (!valid_section(ms))
  236. return ret;
  237. ret = unregister_memory_section(ms);
  238. if (ret)
  239. return ret;
  240. pgdat_resize_lock(pgdat, &flags);
  241. sparse_remove_one_section(zone, ms);
  242. pgdat_resize_unlock(pgdat, &flags);
  243. return 0;
  244. }
  245. #endif
  246. /*
  247. * Reasonably generic function for adding memory. It is
  248. * expected that archs that support memory hotplug will
  249. * call this function after deciding the zone to which to
  250. * add the new pages.
  251. */
  252. int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
  253. unsigned long nr_pages)
  254. {
  255. unsigned long i;
  256. int err = 0;
  257. int start_sec, end_sec;
  258. /* during initialize mem_map, align hot-added range to section */
  259. start_sec = pfn_to_section_nr(phys_start_pfn);
  260. end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
  261. for (i = start_sec; i <= end_sec; i++) {
  262. err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
  263. /*
  264. * EEXIST is finally dealt with by ioresource collision
  265. * check. see add_memory() => register_memory_resource()
  266. * Warning will be printed if there is collision.
  267. */
  268. if (err && (err != -EEXIST))
  269. break;
  270. err = 0;
  271. }
  272. return err;
  273. }
  274. EXPORT_SYMBOL_GPL(__add_pages);
  275. /**
  276. * __remove_pages() - remove sections of pages from a zone
  277. * @zone: zone from which pages need to be removed
  278. * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
  279. * @nr_pages: number of pages to remove (must be multiple of section size)
  280. *
  281. * Generic helper function to remove section mappings and sysfs entries
  282. * for the section of the memory we are removing. Caller needs to make
  283. * sure that pages are marked reserved and zones are adjust properly by
  284. * calling offline_pages().
  285. */
  286. int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
  287. unsigned long nr_pages)
  288. {
  289. unsigned long i, ret = 0;
  290. int sections_to_remove;
  291. /*
  292. * We can only remove entire sections
  293. */
  294. BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
  295. BUG_ON(nr_pages % PAGES_PER_SECTION);
  296. sections_to_remove = nr_pages / PAGES_PER_SECTION;
  297. for (i = 0; i < sections_to_remove; i++) {
  298. unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
  299. release_mem_region(pfn << PAGE_SHIFT,
  300. PAGES_PER_SECTION << PAGE_SHIFT);
  301. ret = __remove_section(zone, __pfn_to_section(pfn));
  302. if (ret)
  303. break;
  304. }
  305. return ret;
  306. }
  307. EXPORT_SYMBOL_GPL(__remove_pages);
  308. int set_online_page_callback(online_page_callback_t callback)
  309. {
  310. int rc = -EINVAL;
  311. lock_memory_hotplug();
  312. if (online_page_callback == generic_online_page) {
  313. online_page_callback = callback;
  314. rc = 0;
  315. }
  316. unlock_memory_hotplug();
  317. return rc;
  318. }
  319. EXPORT_SYMBOL_GPL(set_online_page_callback);
  320. int restore_online_page_callback(online_page_callback_t callback)
  321. {
  322. int rc = -EINVAL;
  323. lock_memory_hotplug();
  324. if (online_page_callback == callback) {
  325. online_page_callback = generic_online_page;
  326. rc = 0;
  327. }
  328. unlock_memory_hotplug();
  329. return rc;
  330. }
  331. EXPORT_SYMBOL_GPL(restore_online_page_callback);
  332. void __online_page_set_limits(struct page *page)
  333. {
  334. unsigned long pfn = page_to_pfn(page);
  335. if (pfn >= num_physpages)
  336. num_physpages = pfn + 1;
  337. }
  338. EXPORT_SYMBOL_GPL(__online_page_set_limits);
  339. void __online_page_increment_counters(struct page *page)
  340. {
  341. totalram_pages++;
  342. #ifdef CONFIG_HIGHMEM
  343. if (PageHighMem(page))
  344. totalhigh_pages++;
  345. #endif
  346. }
  347. EXPORT_SYMBOL_GPL(__online_page_increment_counters);
  348. void __online_page_free(struct page *page)
  349. {
  350. ClearPageReserved(page);
  351. init_page_count(page);
  352. __free_page(page);
  353. }
  354. EXPORT_SYMBOL_GPL(__online_page_free);
  355. static void generic_online_page(struct page *page)
  356. {
  357. __online_page_set_limits(page);
  358. __online_page_increment_counters(page);
  359. __online_page_free(page);
  360. }
  361. static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
  362. void *arg)
  363. {
  364. unsigned long i;
  365. unsigned long onlined_pages = *(unsigned long *)arg;
  366. struct page *page;
  367. if (PageReserved(pfn_to_page(start_pfn)))
  368. for (i = 0; i < nr_pages; i++) {
  369. page = pfn_to_page(start_pfn + i);
  370. (*online_page_callback)(page);
  371. onlined_pages++;
  372. }
  373. *(unsigned long *)arg = onlined_pages;
  374. return 0;
  375. }
  376. int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
  377. {
  378. unsigned long onlined_pages = 0;
  379. struct zone *zone;
  380. int need_zonelists_rebuild = 0;
  381. int nid;
  382. int ret;
  383. struct memory_notify arg;
  384. lock_memory_hotplug();
  385. arg.start_pfn = pfn;
  386. arg.nr_pages = nr_pages;
  387. arg.status_change_nid = -1;
  388. nid = page_to_nid(pfn_to_page(pfn));
  389. if (node_present_pages(nid) == 0)
  390. arg.status_change_nid = nid;
  391. ret = memory_notify(MEM_GOING_ONLINE, &arg);
  392. ret = notifier_to_errno(ret);
  393. if (ret) {
  394. memory_notify(MEM_CANCEL_ONLINE, &arg);
  395. unlock_memory_hotplug();
  396. return ret;
  397. }
  398. /*
  399. * This doesn't need a lock to do pfn_to_page().
  400. * The section can't be removed here because of the
  401. * memory_block->state_mutex.
  402. */
  403. zone = page_zone(pfn_to_page(pfn));
  404. /*
  405. * If this zone is not populated, then it is not in zonelist.
  406. * This means the page allocator ignores this zone.
  407. * So, zonelist must be updated after online.
  408. */
  409. mutex_lock(&zonelists_mutex);
  410. if (!populated_zone(zone))
  411. need_zonelists_rebuild = 1;
  412. ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
  413. online_pages_range);
  414. if (ret) {
  415. mutex_unlock(&zonelists_mutex);
  416. printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
  417. nr_pages, pfn);
  418. memory_notify(MEM_CANCEL_ONLINE, &arg);
  419. unlock_memory_hotplug();
  420. return ret;
  421. }
  422. zone->present_pages += onlined_pages;
  423. zone->zone_pgdat->node_present_pages += onlined_pages;
  424. if (need_zonelists_rebuild)
  425. build_all_zonelists(zone);
  426. else
  427. zone_pcp_update(zone);
  428. mutex_unlock(&zonelists_mutex);
  429. init_per_zone_wmark_min();
  430. if (onlined_pages) {
  431. kswapd_run(zone_to_nid(zone));
  432. node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
  433. }
  434. vm_total_pages = nr_free_pagecache_pages();
  435. writeback_set_ratelimit();
  436. if (onlined_pages)
  437. memory_notify(MEM_ONLINE, &arg);
  438. unlock_memory_hotplug();
  439. return 0;
  440. }
  441. #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
  442. /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
  443. static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
  444. {
  445. struct pglist_data *pgdat;
  446. unsigned long zones_size[MAX_NR_ZONES] = {0};
  447. unsigned long zholes_size[MAX_NR_ZONES] = {0};
  448. unsigned long start_pfn = start >> PAGE_SHIFT;
  449. pgdat = arch_alloc_nodedata(nid);
  450. if (!pgdat)
  451. return NULL;
  452. arch_refresh_nodedata(nid, pgdat);
  453. /* we can use NODE_DATA(nid) from here */
  454. /* init node's zones as empty zones, we don't have any present pages.*/
  455. free_area_init_node(nid, zones_size, start_pfn, zholes_size);
  456. /*
  457. * The node we allocated has no zone fallback lists. For avoiding
  458. * to access not-initialized zonelist, build here.
  459. */
  460. mutex_lock(&zonelists_mutex);
  461. build_all_zonelists(NULL);
  462. mutex_unlock(&zonelists_mutex);
  463. return pgdat;
  464. }
  465. static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
  466. {
  467. arch_refresh_nodedata(nid, NULL);
  468. arch_free_nodedata(pgdat);
  469. return;
  470. }
  471. /*
  472. * called by cpu_up() to online a node without onlined memory.
  473. */
  474. int mem_online_node(int nid)
  475. {
  476. pg_data_t *pgdat;
  477. int ret;
  478. lock_memory_hotplug();
  479. pgdat = hotadd_new_pgdat(nid, 0);
  480. if (!pgdat) {
  481. ret = -ENOMEM;
  482. goto out;
  483. }
  484. node_set_online(nid);
  485. ret = register_one_node(nid);
  486. BUG_ON(ret);
  487. out:
  488. unlock_memory_hotplug();
  489. return ret;
  490. }
  491. /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
  492. int __ref add_memory(int nid, u64 start, u64 size)
  493. {
  494. pg_data_t *pgdat = NULL;
  495. int new_pgdat = 0;
  496. struct resource *res;
  497. int ret;
  498. lock_memory_hotplug();
  499. res = register_memory_resource(start, size);
  500. ret = -EEXIST;
  501. if (!res)
  502. goto out;
  503. if (!node_online(nid)) {
  504. pgdat = hotadd_new_pgdat(nid, start);
  505. ret = -ENOMEM;
  506. if (!pgdat)
  507. goto out;
  508. new_pgdat = 1;
  509. }
  510. /* call arch's memory hotadd */
  511. ret = arch_add_memory(nid, start, size);
  512. if (ret < 0)
  513. goto error;
  514. /* we online node here. we can't roll back from here. */
  515. node_set_online(nid);
  516. if (new_pgdat) {
  517. ret = register_one_node(nid);
  518. /*
  519. * If sysfs file of new node can't create, cpu on the node
  520. * can't be hot-added. There is no rollback way now.
  521. * So, check by BUG_ON() to catch it reluctantly..
  522. */
  523. BUG_ON(ret);
  524. }
  525. /* create new memmap entry */
  526. firmware_map_add_hotplug(start, start + size, "System RAM");
  527. goto out;
  528. error:
  529. /* rollback pgdat allocation and others */
  530. if (new_pgdat)
  531. rollback_node_hotadd(nid, pgdat);
  532. if (res)
  533. release_memory_resource(res);
  534. out:
  535. unlock_memory_hotplug();
  536. return ret;
  537. }
  538. EXPORT_SYMBOL_GPL(add_memory);
  539. #ifdef CONFIG_MEMORY_HOTREMOVE
  540. /*
  541. * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
  542. * set and the size of the free page is given by page_order(). Using this,
  543. * the function determines if the pageblock contains only free pages.
  544. * Due to buddy contraints, a free page at least the size of a pageblock will
  545. * be located at the start of the pageblock
  546. */
  547. static inline int pageblock_free(struct page *page)
  548. {
  549. return PageBuddy(page) && page_order(page) >= pageblock_order;
  550. }
  551. /* Return the start of the next active pageblock after a given page */
  552. static struct page *next_active_pageblock(struct page *page)
  553. {
  554. /* Ensure the starting page is pageblock-aligned */
  555. BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
  556. /* If the entire pageblock is free, move to the end of free page */
  557. if (pageblock_free(page)) {
  558. int order;
  559. /* be careful. we don't have locks, page_order can be changed.*/
  560. order = page_order(page);
  561. if ((order < MAX_ORDER) && (order >= pageblock_order))
  562. return page + (1 << order);
  563. }
  564. return page + pageblock_nr_pages;
  565. }
  566. /* Checks if this range of memory is likely to be hot-removable. */
  567. int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
  568. {
  569. struct page *page = pfn_to_page(start_pfn);
  570. struct page *end_page = page + nr_pages;
  571. /* Check the starting page of each pageblock within the range */
  572. for (; page < end_page; page = next_active_pageblock(page)) {
  573. if (!is_pageblock_removable_nolock(page))
  574. return 0;
  575. cond_resched();
  576. }
  577. /* All pageblocks in the memory block are likely to be hot-removable */
  578. return 1;
  579. }
  580. /*
  581. * Confirm all pages in a range [start, end) is belongs to the same zone.
  582. */
  583. static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
  584. {
  585. unsigned long pfn;
  586. struct zone *zone = NULL;
  587. struct page *page;
  588. int i;
  589. for (pfn = start_pfn;
  590. pfn < end_pfn;
  591. pfn += MAX_ORDER_NR_PAGES) {
  592. i = 0;
  593. /* This is just a CONFIG_HOLES_IN_ZONE check.*/
  594. while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
  595. i++;
  596. if (i == MAX_ORDER_NR_PAGES)
  597. continue;
  598. page = pfn_to_page(pfn + i);
  599. if (zone && page_zone(page) != zone)
  600. return 0;
  601. zone = page_zone(page);
  602. }
  603. return 1;
  604. }
  605. /*
  606. * Scanning pfn is much easier than scanning lru list.
  607. * Scan pfn from start to end and Find LRU page.
  608. */
  609. static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
  610. {
  611. unsigned long pfn;
  612. struct page *page;
  613. for (pfn = start; pfn < end; pfn++) {
  614. if (pfn_valid(pfn)) {
  615. page = pfn_to_page(pfn);
  616. if (PageLRU(page))
  617. return pfn;
  618. }
  619. }
  620. return 0;
  621. }
  622. static struct page *
  623. hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
  624. {
  625. /* This should be improooooved!! */
  626. return alloc_page(GFP_HIGHUSER_MOVABLE);
  627. }
  628. #define NR_OFFLINE_AT_ONCE_PAGES (256)
  629. static int
  630. do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
  631. {
  632. unsigned long pfn;
  633. struct page *page;
  634. int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
  635. int not_managed = 0;
  636. int ret = 0;
  637. LIST_HEAD(source);
  638. for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
  639. if (!pfn_valid(pfn))
  640. continue;
  641. page = pfn_to_page(pfn);
  642. if (!get_page_unless_zero(page))
  643. continue;
  644. /*
  645. * We can skip free pages. And we can only deal with pages on
  646. * LRU.
  647. */
  648. ret = isolate_lru_page(page);
  649. if (!ret) { /* Success */
  650. put_page(page);
  651. list_add_tail(&page->lru, &source);
  652. move_pages--;
  653. inc_zone_page_state(page, NR_ISOLATED_ANON +
  654. page_is_file_cache(page));
  655. } else {
  656. #ifdef CONFIG_DEBUG_VM
  657. printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
  658. pfn);
  659. dump_page(page);
  660. #endif
  661. put_page(page);
  662. /* Because we don't have big zone->lock. we should
  663. check this again here. */
  664. if (page_count(page)) {
  665. not_managed++;
  666. ret = -EBUSY;
  667. break;
  668. }
  669. }
  670. }
  671. if (!list_empty(&source)) {
  672. if (not_managed) {
  673. putback_lru_pages(&source);
  674. goto out;
  675. }
  676. /* this function returns # of failed pages */
  677. ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
  678. true, true);
  679. if (ret)
  680. putback_lru_pages(&source);
  681. }
  682. out:
  683. return ret;
  684. }
  685. /*
  686. * remove from free_area[] and mark all as Reserved.
  687. */
  688. static int
  689. offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
  690. void *data)
  691. {
  692. __offline_isolated_pages(start, start + nr_pages);
  693. return 0;
  694. }
  695. static void
  696. offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  697. {
  698. walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
  699. offline_isolated_pages_cb);
  700. }
  701. /*
  702. * Check all pages in range, recoreded as memory resource, are isolated.
  703. */
  704. static int
  705. check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
  706. void *data)
  707. {
  708. int ret;
  709. long offlined = *(long *)data;
  710. ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
  711. offlined = nr_pages;
  712. if (!ret)
  713. *(long *)data += offlined;
  714. return ret;
  715. }
  716. static long
  717. check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
  718. {
  719. long offlined = 0;
  720. int ret;
  721. ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
  722. check_pages_isolated_cb);
  723. if (ret < 0)
  724. offlined = (long)ret;
  725. return offlined;
  726. }
  727. static int __ref offline_pages(unsigned long start_pfn,
  728. unsigned long end_pfn, unsigned long timeout)
  729. {
  730. unsigned long pfn, nr_pages, expire;
  731. long offlined_pages;
  732. int ret, drain, retry_max, node;
  733. struct zone *zone;
  734. struct memory_notify arg;
  735. BUG_ON(start_pfn >= end_pfn);
  736. /* at least, alignment against pageblock is necessary */
  737. if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
  738. return -EINVAL;
  739. if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
  740. return -EINVAL;
  741. /* This makes hotplug much easier...and readable.
  742. we assume this for now. .*/
  743. if (!test_pages_in_a_zone(start_pfn, end_pfn))
  744. return -EINVAL;
  745. lock_memory_hotplug();
  746. zone = page_zone(pfn_to_page(start_pfn));
  747. node = zone_to_nid(zone);
  748. nr_pages = end_pfn - start_pfn;
  749. /* set above range as isolated */
  750. ret = start_isolate_page_range(start_pfn, end_pfn);
  751. if (ret)
  752. goto out;
  753. arg.start_pfn = start_pfn;
  754. arg.nr_pages = nr_pages;
  755. arg.status_change_nid = -1;
  756. if (nr_pages >= node_present_pages(node))
  757. arg.status_change_nid = node;
  758. ret = memory_notify(MEM_GOING_OFFLINE, &arg);
  759. ret = notifier_to_errno(ret);
  760. if (ret)
  761. goto failed_removal;
  762. pfn = start_pfn;
  763. expire = jiffies + timeout;
  764. drain = 0;
  765. retry_max = 5;
  766. repeat:
  767. /* start memory hot removal */
  768. ret = -EAGAIN;
  769. if (time_after(jiffies, expire))
  770. goto failed_removal;
  771. ret = -EINTR;
  772. if (signal_pending(current))
  773. goto failed_removal;
  774. ret = 0;
  775. if (drain) {
  776. lru_add_drain_all();
  777. cond_resched();
  778. drain_all_pages();
  779. }
  780. pfn = scan_lru_pages(start_pfn, end_pfn);
  781. if (pfn) { /* We have page on LRU */
  782. ret = do_migrate_range(pfn, end_pfn);
  783. if (!ret) {
  784. drain = 1;
  785. goto repeat;
  786. } else {
  787. if (ret < 0)
  788. if (--retry_max == 0)
  789. goto failed_removal;
  790. yield();
  791. drain = 1;
  792. goto repeat;
  793. }
  794. }
  795. /* drain all zone's lru pagevec, this is asyncronous... */
  796. lru_add_drain_all();
  797. yield();
  798. /* drain pcp pages , this is synchrouns. */
  799. drain_all_pages();
  800. /* check again */
  801. offlined_pages = check_pages_isolated(start_pfn, end_pfn);
  802. if (offlined_pages < 0) {
  803. ret = -EBUSY;
  804. goto failed_removal;
  805. }
  806. printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
  807. /* Ok, all of our target is islaoted.
  808. We cannot do rollback at this point. */
  809. offline_isolated_pages(start_pfn, end_pfn);
  810. /* reset pagetype flags and makes migrate type to be MOVABLE */
  811. undo_isolate_page_range(start_pfn, end_pfn);
  812. /* removal success */
  813. zone->present_pages -= offlined_pages;
  814. zone->zone_pgdat->node_present_pages -= offlined_pages;
  815. totalram_pages -= offlined_pages;
  816. init_per_zone_wmark_min();
  817. if (!node_present_pages(node)) {
  818. node_clear_state(node, N_HIGH_MEMORY);
  819. kswapd_stop(node);
  820. }
  821. vm_total_pages = nr_free_pagecache_pages();
  822. writeback_set_ratelimit();
  823. memory_notify(MEM_OFFLINE, &arg);
  824. unlock_memory_hotplug();
  825. return 0;
  826. failed_removal:
  827. printk(KERN_INFO "memory offlining %lx to %lx failed\n",
  828. start_pfn, end_pfn);
  829. memory_notify(MEM_CANCEL_OFFLINE, &arg);
  830. /* pushback to free area */
  831. undo_isolate_page_range(start_pfn, end_pfn);
  832. out:
  833. unlock_memory_hotplug();
  834. return ret;
  835. }
  836. int remove_memory(u64 start, u64 size)
  837. {
  838. unsigned long start_pfn, end_pfn;
  839. start_pfn = PFN_DOWN(start);
  840. end_pfn = start_pfn + PFN_DOWN(size);
  841. return offline_pages(start_pfn, end_pfn, 120 * HZ);
  842. }
  843. #else
  844. int remove_memory(u64 start, u64 size)
  845. {
  846. return -EINVAL;
  847. }
  848. #endif /* CONFIG_MEMORY_HOTREMOVE */
  849. EXPORT_SYMBOL_GPL(remove_memory);