|
@@ -1639,6 +1639,8 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
|
|
|
* to the struct mempolicy for conditional unref after allocation.
|
|
|
* If the effective policy is 'BIND, returns a pointer to the mempolicy's
|
|
|
* @nodemask for filtering the zonelist.
|
|
|
+ *
|
|
|
+ * Must be protected by get_mems_allowed()
|
|
|
*/
|
|
|
struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
|
|
|
gfp_t gfp_flags, struct mempolicy **mpol,
|
|
@@ -1684,6 +1686,7 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
|
|
|
if (!(mask && current->mempolicy))
|
|
|
return false;
|
|
|
|
|
|
+ task_lock(current);
|
|
|
mempolicy = current->mempolicy;
|
|
|
switch (mempolicy->mode) {
|
|
|
case MPOL_PREFERRED:
|
|
@@ -1703,6 +1706,7 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
|
|
|
default:
|
|
|
BUG();
|
|
|
}
|
|
|
+ task_unlock(current);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -1750,13 +1754,17 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
|
|
|
{
|
|
|
struct mempolicy *pol = get_vma_policy(current, vma, addr);
|
|
|
struct zonelist *zl;
|
|
|
+ struct page *page;
|
|
|
|
|
|
+ get_mems_allowed();
|
|
|
if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
|
|
|
unsigned nid;
|
|
|
|
|
|
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
|
|
|
mpol_cond_put(pol);
|
|
|
- return alloc_page_interleave(gfp, 0, nid);
|
|
|
+ page = alloc_page_interleave(gfp, 0, nid);
|
|
|
+ put_mems_allowed();
|
|
|
+ return page;
|
|
|
}
|
|
|
zl = policy_zonelist(gfp, pol);
|
|
|
if (unlikely(mpol_needs_cond_ref(pol))) {
|
|
@@ -1766,12 +1774,15 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
|
|
|
struct page *page = __alloc_pages_nodemask(gfp, 0,
|
|
|
zl, policy_nodemask(gfp, pol));
|
|
|
__mpol_put(pol);
|
|
|
+ put_mems_allowed();
|
|
|
return page;
|
|
|
}
|
|
|
/*
|
|
|
* fast path: default or task policy
|
|
|
*/
|
|
|
- return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
|
|
|
+ page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
|
|
|
+ put_mems_allowed();
|
|
|
+ return page;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1796,18 +1807,23 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
|
|
|
struct page *alloc_pages_current(gfp_t gfp, unsigned order)
|
|
|
{
|
|
|
struct mempolicy *pol = current->mempolicy;
|
|
|
+ struct page *page;
|
|
|
|
|
|
if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
|
|
|
pol = &default_policy;
|
|
|
|
|
|
+ get_mems_allowed();
|
|
|
/*
|
|
|
* No reference counting needed for current->mempolicy
|
|
|
* nor system default_policy
|
|
|
*/
|
|
|
if (pol->mode == MPOL_INTERLEAVE)
|
|
|
- return alloc_page_interleave(gfp, order, interleave_nodes(pol));
|
|
|
- return __alloc_pages_nodemask(gfp, order,
|
|
|
+ page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
|
|
|
+ else
|
|
|
+ page = __alloc_pages_nodemask(gfp, order,
|
|
|
policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
|
|
|
+ put_mems_allowed();
|
|
|
+ return page;
|
|
|
}
|
|
|
EXPORT_SYMBOL(alloc_pages_current);
|
|
|
|