|
@@ -426,14 +426,10 @@ struct request_queue {
|
|
|
(1 << QUEUE_FLAG_SAME_COMP) | \
|
|
|
(1 << QUEUE_FLAG_ADD_RANDOM))
|
|
|
|
|
|
-static inline int queue_is_locked(struct request_queue *q)
|
|
|
+static inline void queue_lockdep_assert_held(struct request_queue *q)
|
|
|
{
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- spinlock_t *lock = q->queue_lock;
|
|
|
- return lock && spin_is_locked(lock);
|
|
|
-#else
|
|
|
- return 1;
|
|
|
-#endif
|
|
|
+ if (q->queue_lock)
|
|
|
+ lockdep_assert_held(q->queue_lock);
|
|
|
}
|
|
|
|
|
|
static inline void queue_flag_set_unlocked(unsigned int flag,
|
|
@@ -445,7 +441,7 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
|
|
|
static inline int queue_flag_test_and_clear(unsigned int flag,
|
|
|
struct request_queue *q)
|
|
|
{
|
|
|
- WARN_ON_ONCE(!queue_is_locked(q));
|
|
|
+ queue_lockdep_assert_held(q);
|
|
|
|
|
|
if (test_bit(flag, &q->queue_flags)) {
|
|
|
__clear_bit(flag, &q->queue_flags);
|
|
@@ -458,7 +454,7 @@ static inline int queue_flag_test_and_clear(unsigned int flag,
|
|
|
static inline int queue_flag_test_and_set(unsigned int flag,
|
|
|
struct request_queue *q)
|
|
|
{
|
|
|
- WARN_ON_ONCE(!queue_is_locked(q));
|
|
|
+ queue_lockdep_assert_held(q);
|
|
|
|
|
|
if (!test_bit(flag, &q->queue_flags)) {
|
|
|
__set_bit(flag, &q->queue_flags);
|
|
@@ -470,7 +466,7 @@ static inline int queue_flag_test_and_set(unsigned int flag,
|
|
|
|
|
|
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
|
|
|
{
|
|
|
- WARN_ON_ONCE(!queue_is_locked(q));
|
|
|
+ queue_lockdep_assert_held(q);
|
|
|
__set_bit(flag, &q->queue_flags);
|
|
|
}
|
|
|
|
|
@@ -487,7 +483,7 @@ static inline int queue_in_flight(struct request_queue *q)
|
|
|
|
|
|
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
|
|
{
|
|
|
- WARN_ON_ONCE(!queue_is_locked(q));
|
|
|
+ queue_lockdep_assert_held(q);
|
|
|
__clear_bit(flag, &q->queue_flags);
|
|
|
}
|
|
|
|