|
@@ -33,17 +33,13 @@ static struct {
|
|
* an ongoing cpu hotplug operation.
|
|
* an ongoing cpu hotplug operation.
|
|
*/
|
|
*/
|
|
int refcount;
|
|
int refcount;
|
|
- wait_queue_head_t writer_queue;
|
|
|
|
} cpu_hotplug;
|
|
} cpu_hotplug;
|
|
|
|
|
|
-#define writer_exists() (cpu_hotplug.active_writer != NULL)
|
|
|
|
-
|
|
|
|
void __init cpu_hotplug_init(void)
|
|
void __init cpu_hotplug_init(void)
|
|
{
|
|
{
|
|
cpu_hotplug.active_writer = NULL;
|
|
cpu_hotplug.active_writer = NULL;
|
|
mutex_init(&cpu_hotplug.lock);
|
|
mutex_init(&cpu_hotplug.lock);
|
|
cpu_hotplug.refcount = 0;
|
|
cpu_hotplug.refcount = 0;
|
|
- init_waitqueue_head(&cpu_hotplug.writer_queue);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
@@ -65,11 +61,8 @@ void put_online_cpus(void)
|
|
if (cpu_hotplug.active_writer == current)
|
|
if (cpu_hotplug.active_writer == current)
|
|
return;
|
|
return;
|
|
mutex_lock(&cpu_hotplug.lock);
|
|
mutex_lock(&cpu_hotplug.lock);
|
|
- cpu_hotplug.refcount--;
|
|
|
|
-
|
|
|
|
- if (unlikely(writer_exists()) && !cpu_hotplug.refcount)
|
|
|
|
- wake_up(&cpu_hotplug.writer_queue);
|
|
|
|
-
|
|
|
|
|
|
+ if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
|
|
|
|
+ wake_up_process(cpu_hotplug.active_writer);
|
|
mutex_unlock(&cpu_hotplug.lock);
|
|
mutex_unlock(&cpu_hotplug.lock);
|
|
|
|
|
|
}
|
|
}
|
|
@@ -98,8 +91,8 @@ void cpu_maps_update_done(void)
|
|
* Note that during a cpu-hotplug operation, the new readers, if any,
|
|
* Note that during a cpu-hotplug operation, the new readers, if any,
|
|
* will be blocked by the cpu_hotplug.lock
|
|
* will be blocked by the cpu_hotplug.lock
|
|
*
|
|
*
|
|
- * Since cpu_maps_update_begin is always called after invoking
|
|
|
|
- * cpu_maps_update_begin, we can be sure that only one writer is active.
|
|
|
|
|
|
+ * Since cpu_hotplug_begin() is always called after invoking
|
|
|
|
+ * cpu_maps_update_begin(), we can be sure that only one writer is active.
|
|
*
|
|
*
|
|
* Note that theoretically, there is a possibility of a livelock:
|
|
* Note that theoretically, there is a possibility of a livelock:
|
|
* - Refcount goes to zero, last reader wakes up the sleeping
|
|
* - Refcount goes to zero, last reader wakes up the sleeping
|
|
@@ -115,19 +108,16 @@ void cpu_maps_update_done(void)
|
|
*/
|
|
*/
|
|
static void cpu_hotplug_begin(void)
|
|
static void cpu_hotplug_begin(void)
|
|
{
|
|
{
|
|
- DECLARE_WAITQUEUE(wait, current);
|
|
|
|
-
|
|
|
|
- mutex_lock(&cpu_hotplug.lock);
|
|
|
|
-
|
|
|
|
cpu_hotplug.active_writer = current;
|
|
cpu_hotplug.active_writer = current;
|
|
- add_wait_queue_exclusive(&cpu_hotplug.writer_queue, &wait);
|
|
|
|
- while (cpu_hotplug.refcount) {
|
|
|
|
- set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
|
|
+
|
|
|
|
+ for (;;) {
|
|
|
|
+ mutex_lock(&cpu_hotplug.lock);
|
|
|
|
+ if (likely(!cpu_hotplug.refcount))
|
|
|
|
+ break;
|
|
|
|
+ __set_current_state(TASK_UNINTERRUPTIBLE);
|
|
mutex_unlock(&cpu_hotplug.lock);
|
|
mutex_unlock(&cpu_hotplug.lock);
|
|
schedule();
|
|
schedule();
|
|
- mutex_lock(&cpu_hotplug.lock);
|
|
|
|
}
|
|
}
|
|
- remove_wait_queue_locked(&cpu_hotplug.writer_queue, &wait);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void cpu_hotplug_done(void)
|
|
static void cpu_hotplug_done(void)
|