|
@@ -90,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
|
|
|
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
|
|
|
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
|
|
|
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
|
|
|
+
|
|
|
+/*
|
|
|
+ * We need this for trampoline_base protection from concurrent accesses when
|
|
|
+ * off- and onlining cores wildly.
|
|
|
+ */
|
|
|
+static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
|
|
|
+
|
|
|
+void cpu_hotplug_driver_lock()
|
|
|
+{
|
|
|
+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+void cpu_hotplug_driver_unlock()
|
|
|
+{
|
|
|
+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
|
|
|
+ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
|
|
|
#else
|
|
|
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
|
|
|
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
|