|
@@ -34,7 +34,6 @@
|
|
|
#include "internal.h"
|
|
|
|
|
|
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
|
|
|
-static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
|
|
|
unsigned long hugepages_treat_as_movable;
|
|
|
|
|
|
int hugetlb_max_hstate __read_mostly;
|
|
@@ -539,6 +538,15 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
+/* Movability of hugepages depends on migration support. */
|
|
|
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
|
|
|
+{
|
|
|
+ if (hugepages_treat_as_movable || hugepage_migration_support(h))
|
|
|
+ return GFP_HIGHUSER_MOVABLE;
|
|
|
+ else
|
|
|
+ return GFP_HIGHUSER;
|
|
|
+}
|
|
|
+
|
|
|
static struct page *dequeue_huge_page_vma(struct hstate *h,
|
|
|
struct vm_area_struct *vma,
|
|
|
unsigned long address, int avoid_reserve,
|
|
@@ -568,11 +576,11 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
|
|
|
retry_cpuset:
|
|
|
cpuset_mems_cookie = get_mems_allowed();
|
|
|
zonelist = huge_zonelist(vma, address,
|
|
|
- htlb_alloc_mask, &mpol, &nodemask);
|
|
|
+ htlb_alloc_mask(h), &mpol, &nodemask);
|
|
|
|
|
|
for_each_zone_zonelist_nodemask(zone, z, zonelist,
|
|
|
MAX_NR_ZONES - 1, nodemask) {
|
|
|
- if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
|
|
|
+ if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) {
|
|
|
page = dequeue_huge_page_node(h, zone_to_nid(zone));
|
|
|
if (page) {
|
|
|
if (avoid_reserve)
|
|
@@ -738,7 +746,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
|
|
|
return NULL;
|
|
|
|
|
|
page = alloc_pages_exact_node(nid,
|
|
|
- htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
|
|
|
+ htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
|
|
|
__GFP_REPEAT|__GFP_NOWARN,
|
|
|
huge_page_order(h));
|
|
|
if (page) {
|
|
@@ -965,12 +973,12 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
|
|
|
spin_unlock(&hugetlb_lock);
|
|
|
|
|
|
if (nid == NUMA_NO_NODE)
|
|
|
- page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
|
|
|
+ page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
|
|
|
__GFP_REPEAT|__GFP_NOWARN,
|
|
|
huge_page_order(h));
|
|
|
else
|
|
|
page = alloc_pages_exact_node(nid,
|
|
|
- htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
|
|
|
+ htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
|
|
|
__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
|
|
|
|
|
|
if (page && arch_prepare_hugepage(page)) {
|
|
@@ -2117,18 +2125,6 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
|
|
|
}
|
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
|
|
-int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
|
|
|
- void __user *buffer,
|
|
|
- size_t *length, loff_t *ppos)
|
|
|
-{
|
|
|
- proc_dointvec(table, write, buffer, length, ppos);
|
|
|
- if (hugepages_treat_as_movable)
|
|
|
- htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
|
|
|
- else
|
|
|
- htlb_alloc_mask = GFP_HIGHUSER;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
|
|
|
void __user *buffer,
|
|
|
size_t *length, loff_t *ppos)
|