|
@@ -5196,11 +5196,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
|
|
|
__wake_up_common(q, mode, 1, 0, NULL);
|
|
|
}
|
|
|
|
|
|
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
|
|
|
+{
|
|
|
+ __wake_up_common(q, mode, 1, 0, key);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
- * __wake_up_sync - wake up threads blocked on a waitqueue.
|
|
|
+ * __wake_up_sync_key - wake up threads blocked on a waitqueue.
|
|
|
* @q: the waitqueue
|
|
|
* @mode: which threads
|
|
|
* @nr_exclusive: how many wake-one or wake-many threads to wake up
|
|
|
+ * @key: opaque value to be passed to wakeup targets
|
|
|
*
|
|
|
* The sync wakeup differs that the waker knows that it will schedule
|
|
|
* away soon, so while the target thread will be woken up, it will not
|
|
@@ -5209,8 +5215,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
|
|
|
*
|
|
|
* On UP it can prevent extra preemption.
|
|
|
*/
|
|
|
-void
|
|
|
-__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
|
|
|
+void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
|
|
|
+ int nr_exclusive, void *key)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int sync = 1;
|
|
@@ -5222,9 +5228,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
|
|
|
sync = 0;
|
|
|
|
|
|
spin_lock_irqsave(&q->lock, flags);
|
|
|
- __wake_up_common(q, mode, nr_exclusive, sync, NULL);
|
|
|
+ __wake_up_common(q, mode, nr_exclusive, sync, key);
|
|
|
spin_unlock_irqrestore(&q->lock, flags);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(__wake_up_sync_key);
|
|
|
+
|
|
|
+/*
|
|
|
+ * __wake_up_sync - see __wake_up_sync_key()
|
|
|
+ */
|
|
|
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
|
|
|
+{
|
|
|
+ __wake_up_sync_key(q, mode, nr_exclusive, NULL);
|
|
|
+}
|
|
|
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
|
|
|
|
|
|
/**
|