|
@@ -89,6 +89,7 @@ enum {
|
|
enum rq_flag_bits {
|
|
enum rq_flag_bits {
|
|
__REQ_RW, /* not set, read. set, write */
|
|
__REQ_RW, /* not set, read. set, write */
|
|
__REQ_FAILFAST, /* no low level driver retries */
|
|
__REQ_FAILFAST, /* no low level driver retries */
|
|
|
|
+ __REQ_DISCARD, /* request to discard sectors */
|
|
__REQ_SORTED, /* elevator knows about this request */
|
|
__REQ_SORTED, /* elevator knows about this request */
|
|
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
|
|
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
|
|
__REQ_HARDBARRIER, /* may not be passed by drive either */
|
|
__REQ_HARDBARRIER, /* may not be passed by drive either */
|
|
@@ -111,6 +112,7 @@ enum rq_flag_bits {
|
|
};
|
|
};
|
|
|
|
|
|
#define REQ_RW (1 << __REQ_RW)
|
|
#define REQ_RW (1 << __REQ_RW)
|
|
|
|
+#define REQ_DISCARD (1 << __REQ_DISCARD)
|
|
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
|
|
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
|
|
#define REQ_SORTED (1 << __REQ_SORTED)
|
|
#define REQ_SORTED (1 << __REQ_SORTED)
|
|
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
|
|
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
|
|
@@ -252,6 +254,7 @@ typedef void (request_fn_proc) (struct request_queue *q);
|
|
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
|
|
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
|
|
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
|
|
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
|
|
typedef void (unplug_fn) (struct request_queue *);
|
|
typedef void (unplug_fn) (struct request_queue *);
|
|
|
|
+typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
|
|
|
|
|
|
struct bio_vec;
|
|
struct bio_vec;
|
|
struct bvec_merge_data {
|
|
struct bvec_merge_data {
|
|
@@ -307,6 +310,7 @@ struct request_queue
|
|
make_request_fn *make_request_fn;
|
|
make_request_fn *make_request_fn;
|
|
prep_rq_fn *prep_rq_fn;
|
|
prep_rq_fn *prep_rq_fn;
|
|
unplug_fn *unplug_fn;
|
|
unplug_fn *unplug_fn;
|
|
|
|
+ prepare_discard_fn *prepare_discard_fn;
|
|
merge_bvec_fn *merge_bvec_fn;
|
|
merge_bvec_fn *merge_bvec_fn;
|
|
prepare_flush_fn *prepare_flush_fn;
|
|
prepare_flush_fn *prepare_flush_fn;
|
|
softirq_done_fn *softirq_done_fn;
|
|
softirq_done_fn *softirq_done_fn;
|
|
@@ -546,6 +550,7 @@ enum {
|
|
#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
|
|
#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
|
|
#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
|
|
#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
|
|
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
|
|
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
|
|
|
|
+#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
|
|
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
|
|
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
|
|
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
|
|
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
|
|
/* rq->queuelist of dequeued request must be list_empty() */
|
|
/* rq->queuelist of dequeued request must be list_empty() */
|
|
@@ -796,6 +801,7 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
|
|
extern void blk_queue_dma_alignment(struct request_queue *, int);
|
|
extern void blk_queue_dma_alignment(struct request_queue *, int);
|
|
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
|
|
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
|
|
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
|
|
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
|
|
|
|
+extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
|
|
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
|
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
|
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
|
|
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
|
|
extern int blk_do_ordered(struct request_queue *, struct request **);
|
|
extern int blk_do_ordered(struct request_queue *, struct request **);
|
|
@@ -837,6 +843,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
|
|
}
|
|
}
|
|
|
|
|
|
extern int blkdev_issue_flush(struct block_device *, sector_t *);
|
|
extern int blkdev_issue_flush(struct block_device *, sector_t *);
|
|
|
|
+extern int blkdev_issue_discard(struct block_device *, sector_t sector,
|
|
|
|
+ unsigned nr_sects);
|
|
|
|
+
|
|
|
|
+static inline int sb_issue_discard(struct super_block *sb,
|
|
|
|
+ sector_t block, unsigned nr_blocks)
|
|
|
|
+{
|
|
|
|
+ block <<= (sb->s_blocksize_bits - 9);
|
|
|
|
+ nr_blocks <<= (sb->s_blocksize_bits - 9);
|
|
|
|
+ return blkdev_issue_discard(sb->s_bdev, block, nr_blocks);
|
|
|
|
+}
|
|
|
|
|
|
/*
|
|
/*
|
|
* command filter functions
|
|
* command filter functions
|