|
@@ -513,10 +513,10 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
|
|
|
* free pages of length of (1 << order) and marked with _mapcount -2. Page's
|
|
|
* order is recorded in page_private(page) field.
|
|
|
* So when we are allocating or freeing one, we can derive the state of the
|
|
|
- * other. That is, if we allocate a small block, and both were
|
|
|
- * free, the remainder of the region must be split into blocks.
|
|
|
+ * other. That is, if we allocate a small block, and both were
|
|
|
+ * free, the remainder of the region must be split into blocks.
|
|
|
* If a block is freed, and its buddy is also free, then this
|
|
|
- * triggers coalescing into a block of larger size.
|
|
|
+ * triggers coalescing into a block of larger size.
|
|
|
*
|
|
|
* -- wli
|
|
|
*/
|
|
@@ -1061,17 +1061,17 @@ retry_reserve:
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
+/*
|
|
|
* Obtain a specified number of elements from the buddy allocator, all under
|
|
|
* a single hold of the lock, for efficiency. Add them to the supplied list.
|
|
|
* Returns the number of new pages which were placed at *list.
|
|
|
*/
|
|
|
-static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|
|
+static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|
|
unsigned long count, struct list_head *list,
|
|
|
int migratetype, int cold)
|
|
|
{
|
|
|
int i;
|
|
|
-
|
|
|
+
|
|
|
spin_lock(&zone->lock);
|
|
|
for (i = 0; i < count; ++i) {
|
|
|
struct page *page = __rmqueue(zone, order, migratetype);
|
|
@@ -4301,7 +4301,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
|
|
|
init_waitqueue_head(&pgdat->kswapd_wait);
|
|
|
pgdat->kswapd_max_order = 0;
|
|
|
pgdat_page_cgroup_init(pgdat);
|
|
|
-
|
|
|
+
|
|
|
for (j = 0; j < MAX_NR_ZONES; j++) {
|
|
|
struct zone *zone = pgdat->node_zones + j;
|
|
|
unsigned long size, realsize, memmap_pages;
|