|
@@ -480,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
|
/* mark @q DEAD, no new request or merges will be allowed afterwards */
|
|
/* mark @q DEAD, no new request or merges will be allowed afterwards */
|
|
mutex_lock(&q->sysfs_lock);
|
|
mutex_lock(&q->sysfs_lock);
|
|
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
|
|
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
|
|
-
|
|
|
|
spin_lock_irq(lock);
|
|
spin_lock_irq(lock);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -498,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
|
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
|
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
|
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
|
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
|
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
|
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
|
-
|
|
|
|
- if (q->queue_lock != &q->__queue_lock)
|
|
|
|
- q->queue_lock = &q->__queue_lock;
|
|
|
|
-
|
|
|
|
spin_unlock_irq(lock);
|
|
spin_unlock_irq(lock);
|
|
mutex_unlock(&q->sysfs_lock);
|
|
mutex_unlock(&q->sysfs_lock);
|
|
|
|
|
|
@@ -512,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
|
|
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
|
|
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
|
|
blk_sync_queue(q);
|
|
blk_sync_queue(q);
|
|
|
|
|
|
|
|
+ spin_lock_irq(lock);
|
|
|
|
+ if (q->queue_lock != &q->__queue_lock)
|
|
|
|
+ q->queue_lock = &q->__queue_lock;
|
|
|
|
+ spin_unlock_irq(lock);
|
|
|
|
+
|
|
/* @q is and will stay empty, shutdown and put */
|
|
/* @q is and will stay empty, shutdown and put */
|
|
blk_put_queue(q);
|
|
blk_put_queue(q);
|
|
}
|
|
}
|