|
@@ -100,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
|
|
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
|
|
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
|
|
* called yet. Note that node 0 will also count all non-existent cpus.
|
|
* called yet. Note that node 0 will also count all non-existent cpus.
|
|
*/
|
|
*/
|
|
-static int __init early_nr_cpus_node(int node)
|
|
|
|
|
|
+static int __meminit early_nr_cpus_node(int node)
|
|
{
|
|
{
|
|
int cpu, n = 0;
|
|
int cpu, n = 0;
|
|
|
|
|
|
@@ -115,7 +115,7 @@ static int __init early_nr_cpus_node(int node)
|
|
* compute_pernodesize - compute size of pernode data
|
|
* compute_pernodesize - compute size of pernode data
|
|
* @node: the node id.
|
|
* @node: the node id.
|
|
*/
|
|
*/
|
|
-static unsigned long __init compute_pernodesize(int node)
|
|
|
|
|
|
+static unsigned long __meminit compute_pernodesize(int node)
|
|
{
|
|
{
|
|
unsigned long pernodesize = 0, cpus;
|
|
unsigned long pernodesize = 0, cpus;
|
|
|
|
|
|
@@ -792,6 +792,18 @@ void __init paging_init(void)
|
|
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
|
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+pg_data_t *arch_alloc_nodedata(int nid)
|
|
|
|
+{
|
|
|
|
+ unsigned long size = compute_pernodesize(nid);
|
|
|
|
+
|
|
|
|
+ return kzalloc(size, GFP_KERNEL);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void arch_free_nodedata(pg_data_t *pgdat)
|
|
|
|
+{
|
|
|
|
+ kfree(pgdat);
|
|
|
|
+}
|
|
|
|
+
|
|
void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
|
|
void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
|
|
{
|
|
{
|
|
pgdat_list[update_node] = update_pgdat;
|
|
pgdat_list[update_node] = update_pgdat;
|