|
@@ -17,7 +17,7 @@
|
|
#include <linux/nodemask.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/notifier.h>
|
|
-#include <linux/lmb.h>
|
|
|
|
|
|
+#include <linux/memblock.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/pfn.h>
|
|
#include <asm/sparsemem.h>
|
|
#include <asm/sparsemem.h>
|
|
@@ -351,7 +351,7 @@ struct of_drconf_cell {
|
|
#define DRCONF_MEM_RESERVED 0x00000080
|
|
#define DRCONF_MEM_RESERVED 0x00000080
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Read the next lmb list entry from the ibm,dynamic-memory property
|
|
|
|
|
|
+ * Read the next memblock list entry from the ibm,dynamic-memory property
|
|
* and return the information in the provided of_drconf_cell structure.
|
|
* and return the information in the provided of_drconf_cell structure.
|
|
*/
|
|
*/
|
|
static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
|
|
static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
|
|
@@ -372,8 +372,8 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
|
|
/*
|
|
/*
|
|
* Retreive and validate the ibm,dynamic-memory property of the device tree.
|
|
* Retreive and validate the ibm,dynamic-memory property of the device tree.
|
|
*
|
|
*
|
|
- * The layout of the ibm,dynamic-memory property is a number N of lmb
|
|
|
|
- * list entries followed by N lmb list entries. Each lmb list entry
|
|
|
|
|
|
+ * The layout of the ibm,dynamic-memory property is a number N of memblock
|
|
|
|
+ * list entries followed by N memblock list entries. Each memblock list entry
|
|
* contains information as layed out in the of_drconf_cell struct above.
|
|
* contains information as layed out in the of_drconf_cell struct above.
|
|
*/
|
|
*/
|
|
static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
|
|
static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
|
|
@@ -398,15 +398,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Retreive and validate the ibm,lmb-size property for drconf memory
|
|
|
|
|
|
+ * Retreive and validate the ibm,memblock-size property for drconf memory
|
|
* from the device tree.
|
|
* from the device tree.
|
|
*/
|
|
*/
|
|
-static u64 of_get_lmb_size(struct device_node *memory)
|
|
|
|
|
|
+static u64 of_get_memblock_size(struct device_node *memory)
|
|
{
|
|
{
|
|
const u32 *prop;
|
|
const u32 *prop;
|
|
u32 len;
|
|
u32 len;
|
|
|
|
|
|
- prop = of_get_property(memory, "ibm,lmb-size", &len);
|
|
|
|
|
|
+ prop = of_get_property(memory, "ibm,memblock-size", &len);
|
|
if (!prop || len < sizeof(unsigned int))
|
|
if (!prop || len < sizeof(unsigned int))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
@@ -540,19 +540,19 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
|
|
unsigned long size)
|
|
unsigned long size)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
- * We use lmb_end_of_DRAM() in here instead of memory_limit because
|
|
|
|
|
|
+ * We use memblock_end_of_DRAM() in here instead of memory_limit because
|
|
* we've already adjusted it for the limit and it takes care of
|
|
* we've already adjusted it for the limit and it takes care of
|
|
* having memory holes below the limit. Also, in the case of
|
|
* having memory holes below the limit. Also, in the case of
|
|
* iommu_is_off, memory_limit is not set but is implicitly enforced.
|
|
* iommu_is_off, memory_limit is not set but is implicitly enforced.
|
|
*/
|
|
*/
|
|
|
|
|
|
- if (start + size <= lmb_end_of_DRAM())
|
|
|
|
|
|
+ if (start + size <= memblock_end_of_DRAM())
|
|
return size;
|
|
return size;
|
|
|
|
|
|
- if (start >= lmb_end_of_DRAM())
|
|
|
|
|
|
+ if (start >= memblock_end_of_DRAM())
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- return lmb_end_of_DRAM() - start;
|
|
|
|
|
|
+ return memblock_end_of_DRAM() - start;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -562,7 +562,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
|
|
static inline int __init read_usm_ranges(const u32 **usm)
|
|
static inline int __init read_usm_ranges(const u32 **usm)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
- * For each lmb in ibm,dynamic-memory a corresponding
|
|
|
|
|
|
+ * For each memblock in ibm,dynamic-memory a corresponding
|
|
* entry in linux,drconf-usable-memory property contains
|
|
* entry in linux,drconf-usable-memory property contains
|
|
* a counter followed by that many (base, size) duple.
|
|
* a counter followed by that many (base, size) duple.
|
|
* read the counter from linux,drconf-usable-memory
|
|
* read the counter from linux,drconf-usable-memory
|
|
@@ -578,7 +578,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
|
|
{
|
|
{
|
|
const u32 *dm, *usm;
|
|
const u32 *dm, *usm;
|
|
unsigned int n, rc, ranges, is_kexec_kdump = 0;
|
|
unsigned int n, rc, ranges, is_kexec_kdump = 0;
|
|
- unsigned long lmb_size, base, size, sz;
|
|
|
|
|
|
+ unsigned long memblock_size, base, size, sz;
|
|
int nid;
|
|
int nid;
|
|
struct assoc_arrays aa;
|
|
struct assoc_arrays aa;
|
|
|
|
|
|
@@ -586,8 +586,8 @@ static void __init parse_drconf_memory(struct device_node *memory)
|
|
if (!n)
|
|
if (!n)
|
|
return;
|
|
return;
|
|
|
|
|
|
- lmb_size = of_get_lmb_size(memory);
|
|
|
|
- if (!lmb_size)
|
|
|
|
|
|
+ memblock_size = of_get_memblock_size(memory);
|
|
|
|
+ if (!memblock_size)
|
|
return;
|
|
return;
|
|
|
|
|
|
rc = of_get_assoc_arrays(memory, &aa);
|
|
rc = of_get_assoc_arrays(memory, &aa);
|
|
@@ -611,7 +611,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
base = drmem.base_addr;
|
|
base = drmem.base_addr;
|
|
- size = lmb_size;
|
|
|
|
|
|
+ size = memblock_size;
|
|
ranges = 1;
|
|
ranges = 1;
|
|
|
|
|
|
if (is_kexec_kdump) {
|
|
if (is_kexec_kdump) {
|
|
@@ -731,7 +731,7 @@ new_range:
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Now do the same thing for each LMB listed in the ibm,dynamic-memory
|
|
|
|
|
|
+ * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
|
|
* property in the ibm,dynamic-reconfiguration-memory node.
|
|
* property in the ibm,dynamic-reconfiguration-memory node.
|
|
*/
|
|
*/
|
|
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
|
|
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
|
|
@@ -743,8 +743,8 @@ new_range:
|
|
|
|
|
|
static void __init setup_nonnuma(void)
|
|
static void __init setup_nonnuma(void)
|
|
{
|
|
{
|
|
- unsigned long top_of_ram = lmb_end_of_DRAM();
|
|
|
|
- unsigned long total_ram = lmb_phys_mem_size();
|
|
|
|
|
|
+ unsigned long top_of_ram = memblock_end_of_DRAM();
|
|
|
|
+ unsigned long total_ram = memblock_phys_mem_size();
|
|
unsigned long start_pfn, end_pfn;
|
|
unsigned long start_pfn, end_pfn;
|
|
unsigned int i, nid = 0;
|
|
unsigned int i, nid = 0;
|
|
|
|
|
|
@@ -753,9 +753,9 @@ static void __init setup_nonnuma(void)
|
|
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
|
|
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
|
|
(top_of_ram - total_ram) >> 20);
|
|
(top_of_ram - total_ram) >> 20);
|
|
|
|
|
|
- for (i = 0; i < lmb.memory.cnt; ++i) {
|
|
|
|
- start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
|
|
|
|
- end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
|
|
|
|
|
|
+ for (i = 0; i < memblock.memory.cnt; ++i) {
|
|
|
|
+ start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
|
|
|
|
+ end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
|
|
|
|
|
|
fake_numa_create_new_node(end_pfn, &nid);
|
|
fake_numa_create_new_node(end_pfn, &nid);
|
|
add_active_range(nid, start_pfn, end_pfn);
|
|
add_active_range(nid, start_pfn, end_pfn);
|
|
@@ -813,7 +813,7 @@ static void __init dump_numa_memory_topology(void)
|
|
|
|
|
|
count = 0;
|
|
count = 0;
|
|
|
|
|
|
- for (i = 0; i < lmb_end_of_DRAM();
|
|
|
|
|
|
+ for (i = 0; i < memblock_end_of_DRAM();
|
|
i += (1 << SECTION_SIZE_BITS)) {
|
|
i += (1 << SECTION_SIZE_BITS)) {
|
|
if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
|
|
if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
|
|
if (count == 0)
|
|
if (count == 0)
|
|
@@ -833,7 +833,7 @@ static void __init dump_numa_memory_topology(void)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Allocate some memory, satisfying the lmb or bootmem allocator where
|
|
|
|
|
|
+ * Allocate some memory, satisfying the memblock or bootmem allocator where
|
|
* required. nid is the preferred node and end is the physical address of
|
|
* required. nid is the preferred node and end is the physical address of
|
|
* the highest address in the node.
|
|
* the highest address in the node.
|
|
*
|
|
*
|
|
@@ -847,11 +847,11 @@ static void __init *careful_zallocation(int nid, unsigned long size,
|
|
int new_nid;
|
|
int new_nid;
|
|
unsigned long ret_paddr;
|
|
unsigned long ret_paddr;
|
|
|
|
|
|
- ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
|
|
|
|
|
|
+ ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
|
|
|
|
|
|
/* retry over all memory */
|
|
/* retry over all memory */
|
|
if (!ret_paddr)
|
|
if (!ret_paddr)
|
|
- ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
|
|
|
|
|
|
+ ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
|
|
|
|
|
|
if (!ret_paddr)
|
|
if (!ret_paddr)
|
|
panic("numa.c: cannot allocate %lu bytes for node %d",
|
|
panic("numa.c: cannot allocate %lu bytes for node %d",
|
|
@@ -861,14 +861,14 @@ static void __init *careful_zallocation(int nid, unsigned long size,
|
|
|
|
|
|
/*
|
|
/*
|
|
* We initialize the nodes in numeric order: 0, 1, 2...
|
|
* We initialize the nodes in numeric order: 0, 1, 2...
|
|
- * and hand over control from the LMB allocator to the
|
|
|
|
|
|
+ * and hand over control from the MEMBLOCK allocator to the
|
|
* bootmem allocator. If this function is called for
|
|
* bootmem allocator. If this function is called for
|
|
* node 5, then we know that all nodes <5 are using the
|
|
* node 5, then we know that all nodes <5 are using the
|
|
- * bootmem allocator instead of the LMB allocator.
|
|
|
|
|
|
+ * bootmem allocator instead of the MEMBLOCK allocator.
|
|
*
|
|
*
|
|
* So, check the nid from which this allocation came
|
|
* So, check the nid from which this allocation came
|
|
* and double check to see if we need to use bootmem
|
|
* and double check to see if we need to use bootmem
|
|
- * instead of the LMB. We don't free the LMB memory
|
|
|
|
|
|
+ * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
|
|
* since it would be useless.
|
|
* since it would be useless.
|
|
*/
|
|
*/
|
|
new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
|
|
new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
|
|
@@ -893,9 +893,9 @@ static void mark_reserved_regions_for_nid(int nid)
|
|
struct pglist_data *node = NODE_DATA(nid);
|
|
struct pglist_data *node = NODE_DATA(nid);
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < lmb.reserved.cnt; i++) {
|
|
|
|
- unsigned long physbase = lmb.reserved.region[i].base;
|
|
|
|
- unsigned long size = lmb.reserved.region[i].size;
|
|
|
|
|
|
+ for (i = 0; i < memblock.reserved.cnt; i++) {
|
|
|
|
+ unsigned long physbase = memblock.reserved.region[i].base;
|
|
|
|
+ unsigned long size = memblock.reserved.region[i].size;
|
|
unsigned long start_pfn = physbase >> PAGE_SHIFT;
|
|
unsigned long start_pfn = physbase >> PAGE_SHIFT;
|
|
unsigned long end_pfn = PFN_UP(physbase + size);
|
|
unsigned long end_pfn = PFN_UP(physbase + size);
|
|
struct node_active_region node_ar;
|
|
struct node_active_region node_ar;
|
|
@@ -903,7 +903,7 @@ static void mark_reserved_regions_for_nid(int nid)
|
|
node->node_spanned_pages;
|
|
node->node_spanned_pages;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Check to make sure that this lmb.reserved area is
|
|
|
|
|
|
+ * Check to make sure that this memblock.reserved area is
|
|
* within the bounds of the node that we care about.
|
|
* within the bounds of the node that we care about.
|
|
* Checking the nid of the start and end points is not
|
|
* Checking the nid of the start and end points is not
|
|
* sufficient because the reserved area could span the
|
|
* sufficient because the reserved area could span the
|
|
@@ -961,7 +961,7 @@ void __init do_init_bootmem(void)
|
|
int nid;
|
|
int nid;
|
|
|
|
|
|
min_low_pfn = 0;
|
|
min_low_pfn = 0;
|
|
- max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
|
|
|
|
|
|
+ max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
|
max_pfn = max_low_pfn;
|
|
max_pfn = max_low_pfn;
|
|
|
|
|
|
if (parse_numa_properties())
|
|
if (parse_numa_properties())
|
|
@@ -1038,7 +1038,7 @@ void __init paging_init(void)
|
|
{
|
|
{
|
|
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
|
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
|
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
|
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
|
- max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
|
|
|
|
|
|
+ max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
|
free_area_init_nodes(max_zone_pfns);
|
|
free_area_init_nodes(max_zone_pfns);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1072,7 +1072,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
|
|
{
|
|
{
|
|
const u32 *dm;
|
|
const u32 *dm;
|
|
unsigned int drconf_cell_cnt, rc;
|
|
unsigned int drconf_cell_cnt, rc;
|
|
- unsigned long lmb_size;
|
|
|
|
|
|
+ unsigned long memblock_size;
|
|
struct assoc_arrays aa;
|
|
struct assoc_arrays aa;
|
|
int nid = -1;
|
|
int nid = -1;
|
|
|
|
|
|
@@ -1080,8 +1080,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
|
|
if (!drconf_cell_cnt)
|
|
if (!drconf_cell_cnt)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
- lmb_size = of_get_lmb_size(memory);
|
|
|
|
- if (!lmb_size)
|
|
|
|
|
|
+ memblock_size = of_get_memblock_size(memory);
|
|
|
|
+ if (!memblock_size)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
rc = of_get_assoc_arrays(memory, &aa);
|
|
rc = of_get_assoc_arrays(memory, &aa);
|
|
@@ -1100,7 +1100,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
|
|
continue;
|
|
continue;
|
|
|
|
|
|
if ((scn_addr < drmem.base_addr)
|
|
if ((scn_addr < drmem.base_addr)
|
|
- || (scn_addr >= (drmem.base_addr + lmb_size)))
|
|
|
|
|
|
+ || (scn_addr >= (drmem.base_addr + memblock_size)))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
nid = of_drconf_to_nid_single(&drmem, &aa);
|
|
nid = of_drconf_to_nid_single(&drmem, &aa);
|
|
@@ -1113,7 +1113,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
|
|
/*
|
|
/*
|
|
* Find the node associated with a hot added memory section for memory
|
|
* Find the node associated with a hot added memory section for memory
|
|
* represented in the device tree as a node (i.e. memory@XXXX) for
|
|
* represented in the device tree as a node (i.e. memory@XXXX) for
|
|
- * each lmb.
|
|
|
|
|
|
+ * each memblock.
|
|
*/
|
|
*/
|
|
int hot_add_node_scn_to_nid(unsigned long scn_addr)
|
|
int hot_add_node_scn_to_nid(unsigned long scn_addr)
|
|
{
|
|
{
|
|
@@ -1154,8 +1154,8 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Find the node associated with a hot added memory section. Section
|
|
* Find the node associated with a hot added memory section. Section
|
|
- * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
|
|
|
|
- * sections are fully contained within a single LMB.
|
|
|
|
|
|
+ * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
|
|
|
|
+ * sections are fully contained within a single MEMBLOCK.
|
|
*/
|
|
*/
|
|
int hot_add_scn_to_nid(unsigned long scn_addr)
|
|
int hot_add_scn_to_nid(unsigned long scn_addr)
|
|
{
|
|
{
|