page_isolation.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /*
  2. * linux/mm/page_isolation.c
  3. */
  4. #include <linux/mm.h>
  5. #include <linux/page-isolation.h>
  6. #include <linux/pageblock-flags.h>
  7. #include <linux/memory.h>
  8. #include "internal.h"
  9. /* called while holding zone->lock */
  10. static void set_pageblock_isolate(struct page *page)
  11. {
  12. if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE)
  13. return;
  14. set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  15. page_zone(page)->nr_pageblock_isolate++;
  16. }
  17. /* called while holding zone->lock */
  18. static void restore_pageblock_isolate(struct page *page, int migratetype)
  19. {
  20. struct zone *zone = page_zone(page);
  21. if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE))
  22. return;
  23. BUG_ON(zone->nr_pageblock_isolate <= 0);
  24. set_pageblock_migratetype(page, migratetype);
  25. zone->nr_pageblock_isolate--;
  26. }
  27. int set_migratetype_isolate(struct page *page)
  28. {
  29. struct zone *zone;
  30. unsigned long flags, pfn;
  31. struct memory_isolate_notify arg;
  32. int notifier_ret;
  33. int ret = -EBUSY;
  34. zone = page_zone(page);
  35. spin_lock_irqsave(&zone->lock, flags);
  36. pfn = page_to_pfn(page);
  37. arg.start_pfn = pfn;
  38. arg.nr_pages = pageblock_nr_pages;
  39. arg.pages_found = 0;
  40. /*
  41. * It may be possible to isolate a pageblock even if the
  42. * migratetype is not MIGRATE_MOVABLE. The memory isolation
  43. * notifier chain is used by balloon drivers to return the
  44. * number of pages in a range that are held by the balloon
  45. * driver to shrink memory. If all the pages are accounted for
  46. * by balloons, are free, or on the LRU, isolation can continue.
  47. * Later, for example, when memory hotplug notifier runs, these
  48. * pages reported as "can be isolated" should be isolated(freed)
  49. * by the balloon driver through the memory notifier chain.
  50. */
  51. notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  52. notifier_ret = notifier_to_errno(notifier_ret);
  53. if (notifier_ret)
  54. goto out;
  55. /*
  56. * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  57. * We just check MOVABLE pages.
  58. */
  59. if (!has_unmovable_pages(zone, page, arg.pages_found))
  60. ret = 0;
  61. /*
  62. * immobile means "not-on-lru" paes. If immobile is larger than
  63. * removable-by-driver pages reported by notifier, we'll fail.
  64. */
  65. out:
  66. if (!ret) {
  67. set_pageblock_isolate(page);
  68. move_freepages_block(zone, page, MIGRATE_ISOLATE);
  69. }
  70. spin_unlock_irqrestore(&zone->lock, flags);
  71. if (!ret)
  72. drain_all_pages();
  73. return ret;
  74. }
  75. void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  76. {
  77. struct zone *zone;
  78. unsigned long flags;
  79. zone = page_zone(page);
  80. spin_lock_irqsave(&zone->lock, flags);
  81. if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  82. goto out;
  83. move_freepages_block(zone, page, migratetype);
  84. restore_pageblock_isolate(page, migratetype);
  85. out:
  86. spin_unlock_irqrestore(&zone->lock, flags);
  87. }
  88. static inline struct page *
  89. __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  90. {
  91. int i;
  92. for (i = 0; i < nr_pages; i++)
  93. if (pfn_valid_within(pfn + i))
  94. break;
  95. if (unlikely(i == nr_pages))
  96. return NULL;
  97. return pfn_to_page(pfn + i);
  98. }
  99. /*
  100. * start_isolate_page_range() -- make page-allocation-type of range of pages
  101. * to be MIGRATE_ISOLATE.
  102. * @start_pfn: The lower PFN of the range to be isolated.
  103. * @end_pfn: The upper PFN of the range to be isolated.
  104. * @migratetype: migrate type to set in error recovery.
  105. *
  106. * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  107. * the range will never be allocated. Any free pages and pages freed in the
  108. * future will not be allocated again.
  109. *
  110. * start_pfn/end_pfn must be aligned to pageblock_order.
  111. * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
  112. */
  113. int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  114. unsigned migratetype)
  115. {
  116. unsigned long pfn;
  117. unsigned long undo_pfn;
  118. struct page *page;
  119. BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
  120. BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
  121. for (pfn = start_pfn;
  122. pfn < end_pfn;
  123. pfn += pageblock_nr_pages) {
  124. page = __first_valid_page(pfn, pageblock_nr_pages);
  125. if (page && set_migratetype_isolate(page)) {
  126. undo_pfn = pfn;
  127. goto undo;
  128. }
  129. }
  130. return 0;
  131. undo:
  132. for (pfn = start_pfn;
  133. pfn < undo_pfn;
  134. pfn += pageblock_nr_pages)
  135. unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
  136. return -EBUSY;
  137. }
  138. /*
  139. * Make isolated pages available again.
  140. */
  141. int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  142. unsigned migratetype)
  143. {
  144. unsigned long pfn;
  145. struct page *page;
  146. BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
  147. BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
  148. for (pfn = start_pfn;
  149. pfn < end_pfn;
  150. pfn += pageblock_nr_pages) {
  151. page = __first_valid_page(pfn, pageblock_nr_pages);
  152. if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  153. continue;
  154. unset_migratetype_isolate(page, migratetype);
  155. }
  156. return 0;
  157. }
  158. /*
  159. * Test all pages in the range is free(means isolated) or not.
  160. * all pages in [start_pfn...end_pfn) must be in the same zone.
  161. * zone->lock must be held before call this.
  162. *
  163. * Returns 1 if all pages in the range are isolated.
  164. */
  165. static int
  166. __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
  167. {
  168. struct page *page;
  169. while (pfn < end_pfn) {
  170. if (!pfn_valid_within(pfn)) {
  171. pfn++;
  172. continue;
  173. }
  174. page = pfn_to_page(pfn);
  175. if (PageBuddy(page))
  176. pfn += 1 << page_order(page);
  177. else if (page_count(page) == 0 &&
  178. page_private(page) == MIGRATE_ISOLATE)
  179. pfn += 1;
  180. else
  181. break;
  182. }
  183. if (pfn < end_pfn)
  184. return 0;
  185. return 1;
  186. }
  187. int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
  188. {
  189. unsigned long pfn, flags;
  190. struct page *page;
  191. struct zone *zone;
  192. int ret;
  193. /*
  194. * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
  195. * is not aligned to pageblock_nr_pages.
  196. * Then we just check pagetype fist.
  197. */
  198. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  199. page = __first_valid_page(pfn, pageblock_nr_pages);
  200. if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  201. break;
  202. }
  203. page = __first_valid_page(start_pfn, end_pfn - start_pfn);
  204. if ((pfn < end_pfn) || !page)
  205. return -EBUSY;
  206. /* Check all pages are free or Marked as ISOLATED */
  207. zone = page_zone(page);
  208. spin_lock_irqsave(&zone->lock, flags);
  209. ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
  210. spin_unlock_irqrestore(&zone->lock, flags);
  211. return ret ? 0 : -EBUSY;
  212. }