|
@@ -772,33 +772,6 @@ static int hstate_next_node_to_alloc(struct hstate *h,
|
|
|
return nid;
|
|
|
}
|
|
|
|
|
|
-static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
|
|
|
-{
|
|
|
- struct page *page;
|
|
|
- int start_nid;
|
|
|
- int next_nid;
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
|
|
|
- next_nid = start_nid;
|
|
|
-
|
|
|
- do {
|
|
|
- page = alloc_fresh_huge_page_node(h, next_nid);
|
|
|
- if (page) {
|
|
|
- ret = 1;
|
|
|
- break;
|
|
|
- }
|
|
|
- next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
|
|
|
- } while (next_nid != start_nid);
|
|
|
-
|
|
|
- if (ret)
|
|
|
- count_vm_event(HTLB_BUDDY_PGALLOC);
|
|
|
- else
|
|
|
- count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* helper for free_pool_huge_page() - return the previously saved
|
|
|
* node ["this node"] from which to free a huge page. Advance the
|
|
@@ -817,6 +790,40 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
|
|
|
return nid;
|
|
|
}
|
|
|
|
|
|
+#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
|
|
|
+ for (nr_nodes = nodes_weight(*mask); \
|
|
|
+ nr_nodes > 0 && \
|
|
|
+ ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
|
|
|
+ nr_nodes--)
|
|
|
+
|
|
|
+#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
|
|
|
+ for (nr_nodes = nodes_weight(*mask); \
|
|
|
+ nr_nodes > 0 && \
|
|
|
+ ((node = hstate_next_node_to_free(hs, mask)) || 1); \
|
|
|
+ nr_nodes--)
|
|
|
+
|
|
|
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+ int nr_nodes, node;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
|
|
|
+ page = alloc_fresh_huge_page_node(h, node);
|
|
|
+ if (page) {
|
|
|
+ ret = 1;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ count_vm_event(HTLB_BUDDY_PGALLOC);
|
|
|
+ else
|
|
|
+ count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Free huge page from pool from next node to free.
|
|
|
* Attempt to keep persistent huge pages more or less
|
|
@@ -826,36 +833,31 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
|
|
|
static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
|
|
|
bool acct_surplus)
|
|
|
{
|
|
|
- int start_nid;
|
|
|
- int next_nid;
|
|
|
+ int nr_nodes, node;
|
|
|
int ret = 0;
|
|
|
|
|
|
- start_nid = hstate_next_node_to_free(h, nodes_allowed);
|
|
|
- next_nid = start_nid;
|
|
|
-
|
|
|
- do {
|
|
|
+ for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
|
|
|
/*
|
|
|
* If we're returning unused surplus pages, only examine
|
|
|
* nodes with surplus pages.
|
|
|
*/
|
|
|
- if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
|
|
|
- !list_empty(&h->hugepage_freelists[next_nid])) {
|
|
|
+ if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
|
|
|
+ !list_empty(&h->hugepage_freelists[node])) {
|
|
|
struct page *page =
|
|
|
- list_entry(h->hugepage_freelists[next_nid].next,
|
|
|
+ list_entry(h->hugepage_freelists[node].next,
|
|
|
struct page, lru);
|
|
|
list_del(&page->lru);
|
|
|
h->free_huge_pages--;
|
|
|
- h->free_huge_pages_node[next_nid]--;
|
|
|
+ h->free_huge_pages_node[node]--;
|
|
|
if (acct_surplus) {
|
|
|
h->surplus_huge_pages--;
|
|
|
- h->surplus_huge_pages_node[next_nid]--;
|
|
|
+ h->surplus_huge_pages_node[node]--;
|
|
|
}
|
|
|
update_and_free_page(h, page);
|
|
|
ret = 1;
|
|
|
break;
|
|
|
}
|
|
|
- next_nid = hstate_next_node_to_free(h, nodes_allowed);
|
|
|
- } while (next_nid != start_nid);
|
|
|
+ }
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -1192,14 +1194,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
|
int __weak alloc_bootmem_huge_page(struct hstate *h)
|
|
|
{
|
|
|
struct huge_bootmem_page *m;
|
|
|
- int nr_nodes = nodes_weight(node_states[N_MEMORY]);
|
|
|
+ int nr_nodes, node;
|
|
|
|
|
|
- while (nr_nodes) {
|
|
|
+ for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
|
|
|
void *addr;
|
|
|
|
|
|
- addr = __alloc_bootmem_node_nopanic(
|
|
|
- NODE_DATA(hstate_next_node_to_alloc(h,
|
|
|
- &node_states[N_MEMORY])),
|
|
|
+ addr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
|
|
|
huge_page_size(h), huge_page_size(h), 0);
|
|
|
|
|
|
if (addr) {
|
|
@@ -1211,7 +1211,6 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
|
|
|
m = addr;
|
|
|
goto found;
|
|
|
}
|
|
|
- nr_nodes--;
|
|
|
}
|
|
|
return 0;
|
|
|
|
|
@@ -1350,48 +1349,28 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count,
|
|
|
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
|
|
|
int delta)
|
|
|
{
|
|
|
- int start_nid, next_nid;
|
|
|
- int ret = 0;
|
|
|
+ int nr_nodes, node;
|
|
|
|
|
|
VM_BUG_ON(delta != -1 && delta != 1);
|
|
|
|
|
|
- if (delta < 0)
|
|
|
- start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
|
|
|
- else
|
|
|
- start_nid = hstate_next_node_to_free(h, nodes_allowed);
|
|
|
- next_nid = start_nid;
|
|
|
-
|
|
|
- do {
|
|
|
- int nid = next_nid;
|
|
|
- if (delta < 0) {
|
|
|
- /*
|
|
|
- * To shrink on this node, there must be a surplus page
|
|
|
- */
|
|
|
- if (!h->surplus_huge_pages_node[nid]) {
|
|
|
- next_nid = hstate_next_node_to_alloc(h,
|
|
|
- nodes_allowed);
|
|
|
- continue;
|
|
|
- }
|
|
|
+ if (delta < 0) {
|
|
|
+ for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
|
|
|
+ if (h->surplus_huge_pages_node[node])
|
|
|
+ goto found;
|
|
|
}
|
|
|
- if (delta > 0) {
|
|
|
- /*
|
|
|
- * Surplus cannot exceed the total number of pages
|
|
|
- */
|
|
|
- if (h->surplus_huge_pages_node[nid] >=
|
|
|
- h->nr_huge_pages_node[nid]) {
|
|
|
- next_nid = hstate_next_node_to_free(h,
|
|
|
- nodes_allowed);
|
|
|
- continue;
|
|
|
- }
|
|
|
+ } else {
|
|
|
+ for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
|
|
|
+ if (h->surplus_huge_pages_node[node] <
|
|
|
+ h->nr_huge_pages_node[node])
|
|
|
+ goto found;
|
|
|
}
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
|
|
|
- h->surplus_huge_pages += delta;
|
|
|
- h->surplus_huge_pages_node[nid] += delta;
|
|
|
- ret = 1;
|
|
|
- break;
|
|
|
- } while (next_nid != start_nid);
|
|
|
-
|
|
|
- return ret;
|
|
|
+found:
|
|
|
+ h->surplus_huge_pages += delta;
|
|
|
+ h->surplus_huge_pages_node[node] += delta;
|
|
|
+ return 1;
|
|
|
}
|
|
|
|
|
|
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
|