|
@@ -254,16 +254,165 @@ void __sched mutex_unlock(struct mutex *lock)
|
|
|
|
|
|
EXPORT_SYMBOL(mutex_unlock);
|
|
|
|
|
|
+/**
|
|
|
+ * ww_mutex_unlock - release the w/w mutex
|
|
|
+ * @lock: the mutex to be released
|
|
|
+ *
|
|
|
+ * Unlock a mutex that has been locked by this task previously with any of the
|
|
|
+ * ww_mutex_lock* functions (with or without an acquire context). It is
|
|
|
+ * forbidden to release the locks after releasing the acquire context.
|
|
|
+ *
|
|
|
+ * This function must not be used in interrupt context. Unlocking
|
|
|
+ * of a unlocked mutex is not allowed.
|
|
|
+ */
|
|
|
+void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * The unlocking fastpath is the 0->1 transition from 'locked'
|
|
|
+ * into 'unlocked' state:
|
|
|
+ */
|
|
|
+ if (lock->ctx) {
|
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
+ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
|
|
|
+#endif
|
|
|
+ if (lock->ctx->acquired > 0)
|
|
|
+ lock->ctx->acquired--;
|
|
|
+ lock->ctx = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+#ifndef CONFIG_DEBUG_MUTEXES
|
|
|
+ /*
|
|
|
+ * When debugging is enabled we must not clear the owner before time,
|
|
|
+ * the slow path will always be taken, and that clears the owner field
|
|
|
+ * after verifying that it was indeed current.
|
|
|
+ */
|
|
|
+ mutex_clear_owner(&lock->base);
|
|
|
+#endif
|
|
|
+ __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(ww_mutex_unlock);
|
|
|
+
|
|
|
+static inline int __sched
|
|
|
+__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+{
|
|
|
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
|
|
+ struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
|
|
|
+
|
|
|
+ if (!hold_ctx)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (unlikely(ctx == hold_ctx))
|
|
|
+ return -EALREADY;
|
|
|
+
|
|
|
+ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
|
|
|
+ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
|
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
+ DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
|
|
|
+ ctx->contending_lock = ww;
|
|
|
+#endif
|
|
|
+ return -EDEADLK;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
|
|
|
+ struct ww_acquire_ctx *ww_ctx)
|
|
|
+{
|
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
+ /*
|
|
|
+ * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
|
|
|
+ * but released with a normal mutex_unlock in this call.
|
|
|
+ *
|
|
|
+ * This should never happen, always use ww_mutex_unlock.
|
|
|
+ */
|
|
|
+ DEBUG_LOCKS_WARN_ON(ww->ctx);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Not quite done after calling ww_acquire_done() ?
|
|
|
+ */
|
|
|
+ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
|
|
|
+
|
|
|
+ if (ww_ctx->contending_lock) {
|
|
|
+ /*
|
|
|
+ * After -EDEADLK you tried to
|
|
|
+ * acquire a different ww_mutex? Bad!
|
|
|
+ */
|
|
|
+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * You called ww_mutex_lock after receiving -EDEADLK,
|
|
|
+ * but 'forgot' to unlock everything else first?
|
|
|
+ */
|
|
|
+ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
|
|
|
+ ww_ctx->contending_lock = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Naughty, using a different class will lead to undefined behavior!
|
|
|
+ */
|
|
|
+ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
|
|
|
+#endif
|
|
|
+ ww_ctx->acquired++;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * after acquiring lock with fastpath or when we lost out in contested
|
|
|
+ * slowpath, set ctx and wake up any waiters so they can recheck.
|
|
|
+ *
|
|
|
+ * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
|
|
|
+ * as the fastpath and opportunistic spinning are disabled in that case.
|
|
|
+ */
|
|
|
+static __always_inline void
|
|
|
+ww_mutex_set_context_fastpath(struct ww_mutex *lock,
|
|
|
+ struct ww_acquire_ctx *ctx)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct mutex_waiter *cur;
|
|
|
+
|
|
|
+ ww_mutex_lock_acquired(lock, ctx);
|
|
|
+
|
|
|
+ lock->ctx = ctx;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The lock->ctx update should be visible on all cores before
|
|
|
+ * the atomic read is done, otherwise contended waiters might be
|
|
|
+ * missed. The contended waiters will either see ww_ctx == NULL
|
|
|
+ * and keep spinning, or it will acquire wait_lock, add itself
|
|
|
+ * to waiter list and sleep.
|
|
|
+ */
|
|
|
+ smp_mb(); /* ^^^ */
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check if lock is contended, if not there is nobody to wake up
|
|
|
+ */
|
|
|
+ if (likely(atomic_read(&lock->base.count) == 0))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Uh oh, we raced in fastpath, wake up everyone in this case,
|
|
|
+ * so they can see the new lock->ctx.
|
|
|
+ */
|
|
|
+ spin_lock_mutex(&lock->base.wait_lock, flags);
|
|
|
+ list_for_each_entry(cur, &lock->base.wait_list, list) {
|
|
|
+ debug_mutex_wake_waiter(&lock->base, cur);
|
|
|
+ wake_up_process(cur->task);
|
|
|
+ }
|
|
|
+ spin_unlock_mutex(&lock->base.wait_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Lock a mutex (possibly interruptible), slowpath:
|
|
|
*/
|
|
|
-static inline int __sched
|
|
|
+static __always_inline int __sched
|
|
|
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
- struct lockdep_map *nest_lock, unsigned long ip)
|
|
|
+ struct lockdep_map *nest_lock, unsigned long ip,
|
|
|
+ struct ww_acquire_ctx *ww_ctx)
|
|
|
{
|
|
|
struct task_struct *task = current;
|
|
|
struct mutex_waiter waiter;
|
|
|
unsigned long flags;
|
|
|
+ int ret;
|
|
|
|
|
|
preempt_disable();
|
|
|
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
|
@@ -298,6 +447,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
struct task_struct *owner;
|
|
|
struct mspin_node node;
|
|
|
|
|
|
+ if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
|
|
|
+ struct ww_mutex *ww;
|
|
|
+
|
|
|
+ ww = container_of(lock, struct ww_mutex, base);
|
|
|
+ /*
|
|
|
+ * If ww->ctx is set the contents are undefined, only
|
|
|
+ * by acquiring wait_lock there is a guarantee that
|
|
|
+ * they are not invalid when reading.
|
|
|
+ *
|
|
|
+ * As such, when deadlock detection needs to be
|
|
|
+ * performed the optimistic spinning cannot be done.
|
|
|
+ */
|
|
|
+ if (ACCESS_ONCE(ww->ctx))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* If there's an owner, wait for it to either
|
|
|
* release the lock or go to sleep.
|
|
@@ -312,6 +477,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
if ((atomic_read(&lock->count) == 1) &&
|
|
|
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
|
|
|
lock_acquired(&lock->dep_map, ip);
|
|
|
+ if (!__builtin_constant_p(ww_ctx == NULL)) {
|
|
|
+ struct ww_mutex *ww;
|
|
|
+ ww = container_of(lock, struct ww_mutex, base);
|
|
|
+
|
|
|
+ ww_mutex_set_context_fastpath(ww, ww_ctx);
|
|
|
+ }
|
|
|
+
|
|
|
mutex_set_owner(lock);
|
|
|
mspin_unlock(MLOCK(lock), &node);
|
|
|
preempt_enable();
|
|
@@ -371,15 +543,16 @@ slowpath:
|
|
|
* TASK_UNINTERRUPTIBLE case.)
|
|
|
*/
|
|
|
if (unlikely(signal_pending_state(state, task))) {
|
|
|
- mutex_remove_waiter(lock, &waiter,
|
|
|
- task_thread_info(task));
|
|
|
- mutex_release(&lock->dep_map, 1, ip);
|
|
|
- spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
+ ret = -EINTR;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
|
|
|
- debug_mutex_free_waiter(&waiter);
|
|
|
- preempt_enable();
|
|
|
- return -EINTR;
|
|
|
+ if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
|
|
|
+ ret = __mutex_lock_check_stamp(lock, ww_ctx);
|
|
|
+ if (ret)
|
|
|
+ goto err;
|
|
|
}
|
|
|
+
|
|
|
__set_task_state(task, state);
|
|
|
|
|
|
/* didn't get the lock, go to sleep: */
|
|
@@ -394,6 +567,30 @@ done:
|
|
|
mutex_remove_waiter(lock, &waiter, current_thread_info());
|
|
|
mutex_set_owner(lock);
|
|
|
|
|
|
+ if (!__builtin_constant_p(ww_ctx == NULL)) {
|
|
|
+ struct ww_mutex *ww = container_of(lock,
|
|
|
+ struct ww_mutex,
|
|
|
+ base);
|
|
|
+ struct mutex_waiter *cur;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This branch gets optimized out for the common case,
|
|
|
+ * and is only important for ww_mutex_lock.
|
|
|
+ */
|
|
|
+
|
|
|
+ ww_mutex_lock_acquired(ww, ww_ctx);
|
|
|
+ ww->ctx = ww_ctx;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Give any possible sleeping processes the chance to wake up,
|
|
|
+ * so they can recheck if they have to back off.
|
|
|
+ */
|
|
|
+ list_for_each_entry(cur, &lock->wait_list, list) {
|
|
|
+ debug_mutex_wake_waiter(lock, cur);
|
|
|
+ wake_up_process(cur->task);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* set it to 0 if there are no waiters left: */
|
|
|
if (likely(list_empty(&lock->wait_list)))
|
|
|
atomic_set(&lock->count, 0);
|
|
@@ -404,6 +601,14 @@ done:
|
|
|
preempt_enable();
|
|
|
|
|
|
return 0;
|
|
|
+
|
|
|
+err:
|
|
|
+ mutex_remove_waiter(lock, &waiter, task_thread_info(task));
|
|
|
+ spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
+ debug_mutex_free_waiter(&waiter);
|
|
|
+ mutex_release(&lock->dep_map, 1, ip);
|
|
|
+ preempt_enable();
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
@@ -411,7 +616,8 @@ void __sched
|
|
|
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
|
|
{
|
|
|
might_sleep();
|
|
|
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
|
|
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
|
|
+ subclass, NULL, _RET_IP_, NULL);
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
|
@@ -420,7 +626,8 @@ void __sched
|
|
|
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
|
|
{
|
|
|
might_sleep();
|
|
|
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
|
|
|
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
|
|
+ 0, nest, _RET_IP_, NULL);
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
|
@@ -429,7 +636,8 @@ int __sched
|
|
|
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
|
|
|
{
|
|
|
might_sleep();
|
|
|
- return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
|
|
|
+ return __mutex_lock_common(lock, TASK_KILLABLE,
|
|
|
+ subclass, NULL, _RET_IP_, NULL);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
|
|
|
|
@@ -438,10 +646,68 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
|
|
{
|
|
|
might_sleep();
|
|
|
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
|
|
|
- subclass, NULL, _RET_IP_);
|
|
|
+ subclass, NULL, _RET_IP_, NULL);
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
|
|
+
|
|
|
+static inline int
|
|
|
+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+{
|
|
|
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
|
|
|
+ unsigned tmp;
|
|
|
+
|
|
|
+ if (ctx->deadlock_inject_countdown-- == 0) {
|
|
|
+ tmp = ctx->deadlock_inject_interval;
|
|
|
+ if (tmp > UINT_MAX/4)
|
|
|
+ tmp = UINT_MAX;
|
|
|
+ else
|
|
|
+ tmp = tmp*2 + tmp + tmp/2;
|
|
|
+
|
|
|
+ ctx->deadlock_inject_interval = tmp;
|
|
|
+ ctx->deadlock_inject_countdown = tmp;
|
|
|
+ ctx->contending_lock = lock;
|
|
|
+
|
|
|
+ ww_mutex_unlock(lock);
|
|
|
+
|
|
|
+ return -EDEADLK;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int __sched
|
|
|
+__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ might_sleep();
|
|
|
+ ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
|
|
|
+ 0, &ctx->dep_map, _RET_IP_, ctx);
|
|
|
+ if (!ret && ctx->acquired > 0)
|
|
|
+ return ww_mutex_deadlock_injection(lock, ctx);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__ww_mutex_lock);
|
|
|
+
|
|
|
+int __sched
|
|
|
+__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ might_sleep();
|
|
|
+ ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
|
|
|
+ 0, &ctx->dep_map, _RET_IP_, ctx);
|
|
|
+
|
|
|
+ if (!ret && ctx->acquired > 0)
|
|
|
+ return ww_mutex_deadlock_injection(lock, ctx);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
|
|
|
+
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -494,10 +760,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
|
|
|
* mutex_lock_interruptible() and mutex_trylock().
|
|
|
*/
|
|
|
static noinline int __sched
|
|
|
-__mutex_lock_killable_slowpath(atomic_t *lock_count);
|
|
|
+__mutex_lock_killable_slowpath(struct mutex *lock);
|
|
|
|
|
|
static noinline int __sched
|
|
|
-__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
|
|
|
+__mutex_lock_interruptible_slowpath(struct mutex *lock);
|
|
|
|
|
|
/**
|
|
|
* mutex_lock_interruptible - acquire the mutex, interruptible
|
|
@@ -515,12 +781,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
|
|
|
int ret;
|
|
|
|
|
|
might_sleep();
|
|
|
- ret = __mutex_fastpath_lock_retval
|
|
|
- (&lock->count, __mutex_lock_interruptible_slowpath);
|
|
|
- if (!ret)
|
|
|
+ ret = __mutex_fastpath_lock_retval(&lock->count);
|
|
|
+ if (likely(!ret)) {
|
|
|
mutex_set_owner(lock);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
+ } else
|
|
|
+ return __mutex_lock_interruptible_slowpath(lock);
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(mutex_lock_interruptible);
|
|
@@ -530,12 +796,12 @@ int __sched mutex_lock_killable(struct mutex *lock)
|
|
|
int ret;
|
|
|
|
|
|
might_sleep();
|
|
|
- ret = __mutex_fastpath_lock_retval
|
|
|
- (&lock->count, __mutex_lock_killable_slowpath);
|
|
|
- if (!ret)
|
|
|
+ ret = __mutex_fastpath_lock_retval(&lock->count);
|
|
|
+ if (likely(!ret)) {
|
|
|
mutex_set_owner(lock);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
+ } else
|
|
|
+ return __mutex_lock_killable_slowpath(lock);
|
|
|
}
|
|
|
EXPORT_SYMBOL(mutex_lock_killable);
|
|
|
|
|
@@ -544,24 +810,39 @@ __mutex_lock_slowpath(atomic_t *lock_count)
|
|
|
{
|
|
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
|
|
|
|
|
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
|
|
|
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
|
|
|
+ NULL, _RET_IP_, NULL);
|
|
|
}
|
|
|
|
|
|
static noinline int __sched
|
|
|
-__mutex_lock_killable_slowpath(atomic_t *lock_count)
|
|
|
+__mutex_lock_killable_slowpath(struct mutex *lock)
|
|
|
{
|
|
|
- struct mutex *lock = container_of(lock_count, struct mutex, count);
|
|
|
+ return __mutex_lock_common(lock, TASK_KILLABLE, 0,
|
|
|
+ NULL, _RET_IP_, NULL);
|
|
|
+}
|
|
|
|
|
|
- return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
|
|
|
+static noinline int __sched
|
|
|
+__mutex_lock_interruptible_slowpath(struct mutex *lock)
|
|
|
+{
|
|
|
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
|
|
|
+ NULL, _RET_IP_, NULL);
|
|
|
}
|
|
|
|
|
|
static noinline int __sched
|
|
|
-__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
|
|
|
+__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
- struct mutex *lock = container_of(lock_count, struct mutex, count);
|
|
|
+ return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
|
|
|
+ NULL, _RET_IP_, ctx);
|
|
|
+}
|
|
|
|
|
|
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
|
|
|
+static noinline int __sched
|
|
|
+__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
|
|
+ struct ww_acquire_ctx *ctx)
|
|
|
+{
|
|
|
+ return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
|
|
|
+ NULL, _RET_IP_, ctx);
|
|
|
}
|
|
|
+
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -617,6 +898,45 @@ int __sched mutex_trylock(struct mutex *lock)
|
|
|
}
|
|
|
EXPORT_SYMBOL(mutex_trylock);
|
|
|
|
|
|
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
+int __sched
|
|
|
+__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ might_sleep();
|
|
|
+
|
|
|
+ ret = __mutex_fastpath_lock_retval(&lock->base.count);
|
|
|
+
|
|
|
+ if (likely(!ret)) {
|
|
|
+ ww_mutex_set_context_fastpath(lock, ctx);
|
|
|
+ mutex_set_owner(&lock->base);
|
|
|
+ } else
|
|
|
+ ret = __ww_mutex_lock_slowpath(lock, ctx);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(__ww_mutex_lock);
|
|
|
+
|
|
|
+int __sched
|
|
|
+__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ might_sleep();
|
|
|
+
|
|
|
+ ret = __mutex_fastpath_lock_retval(&lock->base.count);
|
|
|
+
|
|
|
+ if (likely(!ret)) {
|
|
|
+ ww_mutex_set_context_fastpath(lock, ctx);
|
|
|
+ mutex_set_owner(&lock->base);
|
|
|
+ } else
|
|
|
+ ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
|
|
|
+
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
|
|
|
* @cnt: the atomic which we are to dec
|