|
@@ -1057,18 +1057,16 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
|
|
|
|
|
|
bqt->tag_index[tag] = NULL;
|
|
|
|
|
|
- /*
|
|
|
- * We use test_and_clear_bit's memory ordering properties here.
|
|
|
- * The tag_map bit acts as a lock for tag_index[bit], so we need
|
|
|
- * a barrer before clearing the bit (precisely: release semantics).
|
|
|
- * Could use clear_bit_unlock when it is merged.
|
|
|
- */
|
|
|
- if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) {
|
|
|
+ if (unlikely(!test_bit(tag, bqt->tag_map))) {
|
|
|
printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
|
|
|
__FUNCTION__, tag);
|
|
|
return;
|
|
|
}
|
|
|
-
|
|
|
+ /*
|
|
|
+ * The tag_map bit acts as a lock for tag_index[bit], so we need
|
|
|
+ * unlock memory barrier semantics.
|
|
|
+ */
|
|
|
+ clear_bit_unlock(tag, bqt->tag_map);
|
|
|
bqt->busy--;
|
|
|
}
|
|
|
|
|
@@ -1114,10 +1112,10 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
|
|
|
if (tag >= bqt->max_depth)
|
|
|
return 1;
|
|
|
|
|
|
- } while (test_and_set_bit(tag, bqt->tag_map));
|
|
|
+ } while (test_and_set_bit_lock(tag, bqt->tag_map));
|
|
|
/*
|
|
|
- * We rely on test_and_set_bit providing lock memory ordering semantics
|
|
|
- * (could use test_and_set_bit_lock when it is merged).
|
|
|
+ * We need lock ordering semantics given by test_and_set_bit_lock.
|
|
|
+ * See blk_queue_end_tag for details.
|
|
|
*/
|
|
|
|
|
|
rq->cmd_flags |= REQ_QUEUED;
|