|
@@ -17,6 +17,7 @@
|
|
|
#include <linux/u64_stats_sync.h>
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/radix-tree.h>
|
|
|
+#include <linux/blkdev.h>
|
|
|
|
|
|
/* Max limits for throttle policy */
|
|
|
#define THROTL_IOPS_MAX UINT_MAX
|
|
@@ -93,6 +94,8 @@ struct blkcg_gq {
|
|
|
struct list_head q_node;
|
|
|
struct hlist_node blkcg_node;
|
|
|
struct blkcg *blkcg;
|
|
|
+ /* request allocation list for this blkcg-q pair */
|
|
|
+ struct request_list rl;
|
|
|
/* reference count */
|
|
|
int refcnt;
|
|
|
|
|
@@ -250,6 +253,95 @@ static inline void blkg_put(struct blkcg_gq *blkg)
|
|
|
__blkg_release(blkg);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * blk_get_rl - get request_list to use
|
|
|
+ * @q: request_queue of interest
|
|
|
+ * @bio: bio which will be attached to the allocated request (may be %NULL)
|
|
|
+ *
|
|
|
+ * The caller wants to allocate a request from @q to use for @bio. Find
|
|
|
+ * the request_list to use and obtain a reference on it. Should be called
|
|
|
+ * under queue_lock. This function is guaranteed to return non-%NULL
|
|
|
+ * request_list.
|
|
|
+ */
|
|
|
+static inline struct request_list *blk_get_rl(struct request_queue *q,
|
|
|
+ struct bio *bio)
|
|
|
+{
|
|
|
+ struct blkcg *blkcg;
|
|
|
+ struct blkcg_gq *blkg;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+
|
|
|
+ blkcg = bio_blkcg(bio);
|
|
|
+
|
|
|
+ /* bypass blkg lookup and use @q->root_rl directly for root */
|
|
|
+ if (blkcg == &blkcg_root)
|
|
|
+ goto root_rl;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Try to use blkg->rl. blkg lookup may fail under memory pressure
|
|
|
+ * or if either the blkcg or queue is going away. Fall back to
|
|
|
+ * root_rl in such cases.
|
|
|
+ */
|
|
|
+ blkg = blkg_lookup_create(blkcg, q);
|
|
|
+ if (unlikely(IS_ERR(blkg)))
|
|
|
+ goto root_rl;
|
|
|
+
|
|
|
+ blkg_get(blkg);
|
|
|
+ rcu_read_unlock();
|
|
|
+ return &blkg->rl;
|
|
|
+root_rl:
|
|
|
+ rcu_read_unlock();
|
|
|
+ return &q->root_rl;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_put_rl - put request_list
|
|
|
+ * @rl: request_list to put
|
|
|
+ *
|
|
|
+ * Put the reference acquired by blk_get_rl(). Should be called under
|
|
|
+ * queue_lock.
|
|
|
+ */
|
|
|
+static inline void blk_put_rl(struct request_list *rl)
|
|
|
+{
|
|
|
+ /* root_rl may not have blkg set */
|
|
|
+ if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
|
|
|
+ blkg_put(rl->blkg);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_rq_set_rl - associate a request with a request_list
|
|
|
+ * @rq: request of interest
|
|
|
+ * @rl: target request_list
|
|
|
+ *
|
|
|
+ * Associate @rq with @rl so that accounting and freeing can know the
|
|
|
+ * request_list @rq came from.
|
|
|
+ */
|
|
|
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
|
|
|
+{
|
|
|
+ rq->rl = rl;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_rq_rl - return the request_list a request came from
|
|
|
+ * @rq: request of interest
|
|
|
+ *
|
|
|
+ * Return the request_list @rq is allocated from.
|
|
|
+ */
|
|
|
+static inline struct request_list *blk_rq_rl(struct request *rq)
|
|
|
+{
|
|
|
+ return rq->rl;
|
|
|
+}
|
|
|
+
|
|
|
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
|
|
|
+ struct request_queue *q);
|
|
|
+/**
|
|
|
+ * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
|
|
|
+ *
|
|
|
+ * Should be used under queue_lock.
|
|
|
+ */
|
|
|
+#define blk_queue_for_each_rl(rl, q) \
|
|
|
+ for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
|
|
|
+
|
|
|
/**
|
|
|
* blkg_stat_add - add a value to a blkg_stat
|
|
|
* @stat: target blkg_stat
|
|
@@ -392,6 +484,7 @@ static inline void blkcg_deactivate_policy(struct request_queue *q,
|
|
|
|
|
|
static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
|
|
|
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
|
|
|
+
|
|
|
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
|
|
|
struct blkcg_policy *pol) { return NULL; }
|
|
|
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
|
|
@@ -399,5 +492,14 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
|
|
|
static inline void blkg_get(struct blkcg_gq *blkg) { }
|
|
|
static inline void blkg_put(struct blkcg_gq *blkg) { }
|
|
|
|
|
|
+static inline struct request_list *blk_get_rl(struct request_queue *q,
|
|
|
+ struct bio *bio) { return &q->root_rl; }
|
|
|
+static inline void blk_put_rl(struct request_list *rl) { }
|
|
|
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
|
|
|
+static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
|
|
|
+
|
|
|
+#define blk_queue_for_each_rl(rl, q) \
|
|
|
+ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
|
|
|
+
|
|
|
#endif /* CONFIG_BLK_CGROUP */
|
|
|
#endif /* _BLK_CGROUP_H */
|