|
@@ -76,15 +76,14 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
|
|
|
|
|
/* These are not inline because of header tangles. */
|
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
|
-bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
|
|
|
+bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
|
|
|
{
|
|
|
if (likely(slab_is_available()))
|
|
|
- *mask = kmalloc(cpumask_size(), flags);
|
|
|
+ *mask = kmalloc_node(cpumask_size(), flags, node);
|
|
|
else {
|
|
|
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
|
|
printk(KERN_ERR
|
|
|
"=> alloc_cpumask_var: kmalloc not available!\n");
|
|
|
- dump_stack();
|
|
|
#endif
|
|
|
*mask = NULL;
|
|
|
}
|
|
@@ -96,6 +95,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
|
|
|
#endif
|
|
|
return *mask != NULL;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(alloc_cpumask_var_node);
|
|
|
+
|
|
|
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
|
|
|
+{
|
|
|
+ return alloc_cpumask_var_node(mask, flags, numa_node_id());
|
|
|
+}
|
|
|
EXPORT_SYMBOL(alloc_cpumask_var);
|
|
|
|
|
|
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
|