|
@@ -388,20 +388,19 @@ struct request_queue
|
|
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
|
|
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
|
|
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
|
|
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
|
|
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
|
|
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
|
|
-#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
|
|
|
|
-#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */
|
|
|
|
-#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */
|
|
|
|
-#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */
|
|
|
|
-#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */
|
|
|
|
-#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */
|
|
|
|
-#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */
|
|
|
|
-#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
|
|
|
|
|
|
+#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
|
|
|
|
+#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
|
|
|
|
+#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
|
|
|
|
+#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
|
|
|
|
+#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
|
|
|
|
+#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
|
|
|
|
+#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
|
|
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
|
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
|
-#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
|
|
|
|
-#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
|
|
|
|
-#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
|
|
|
|
-#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
|
|
|
|
-#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
|
|
|
|
|
|
+#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
|
|
|
|
+#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
|
|
|
|
+#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
|
|
|
|
+#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
|
|
|
|
+#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
|
|
|
|
|
|
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
|
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
|
(1 << QUEUE_FLAG_STACKABLE) | \
|
|
(1 << QUEUE_FLAG_STACKABLE) | \
|
|
@@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q);
|
|
extern void __blk_stop_queue(struct request_queue *q);
|
|
extern void __blk_stop_queue(struct request_queue *q);
|
|
extern void __blk_run_queue(struct request_queue *q);
|
|
extern void __blk_run_queue(struct request_queue *q);
|
|
extern void blk_run_queue(struct request_queue *);
|
|
extern void blk_run_queue(struct request_queue *);
|
|
|
|
+extern void blk_run_queue_async(struct request_queue *q);
|
|
extern int blk_rq_map_user(struct request_queue *, struct request *,
|
|
extern int blk_rq_map_user(struct request_queue *, struct request *,
|
|
struct rq_map_data *, void __user *, unsigned long,
|
|
struct rq_map_data *, void __user *, unsigned long,
|
|
gfp_t);
|
|
gfp_t);
|