blk-cgroup.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. #ifndef _BLK_CGROUP_H
  2. #define _BLK_CGROUP_H
  3. /*
  4. * Common Block IO controller cgroup interface
  5. *
  6. * Based on ideas and code from CFQ, CFS and BFQ:
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. *
  9. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  10. * Paolo Valente <paolo.valente@unimore.it>
  11. *
  12. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  13. * Nauman Rafique <nauman@google.com>
  14. */
  15. #include <linux/cgroup.h>
  16. #include <linux/u64_stats_sync.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/radix-tree.h>
  19. /* Max limits for throttle policy */
  20. #define THROTL_IOPS_MAX UINT_MAX
  21. /* CFQ specific, out here for blkcg->cfq_weight */
  22. #define CFQ_WEIGHT_MIN 10
  23. #define CFQ_WEIGHT_MAX 1000
  24. #define CFQ_WEIGHT_DEFAULT 500
  25. #ifdef CONFIG_BLK_CGROUP
  26. enum blkg_rwstat_type {
  27. BLKG_RWSTAT_READ,
  28. BLKG_RWSTAT_WRITE,
  29. BLKG_RWSTAT_SYNC,
  30. BLKG_RWSTAT_ASYNC,
  31. BLKG_RWSTAT_NR,
  32. BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
  33. };
  34. struct blkcg_gq;
  35. struct blkcg {
  36. struct cgroup_subsys_state css;
  37. spinlock_t lock;
  38. struct radix_tree_root blkg_tree;
  39. struct blkcg_gq *blkg_hint;
  40. struct hlist_head blkg_list;
  41. /* for policies to test whether associated blkcg has changed */
  42. uint64_t id;
  43. /* TODO: per-policy storage in blkcg */
  44. unsigned int cfq_weight; /* belongs to cfq */
  45. };
  46. struct blkg_stat {
  47. struct u64_stats_sync syncp;
  48. uint64_t cnt;
  49. };
  50. struct blkg_rwstat {
  51. struct u64_stats_sync syncp;
  52. uint64_t cnt[BLKG_RWSTAT_NR];
  53. };
  54. /*
  55. * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
  56. * request_queue (q). This is used by blkcg policies which need to track
  57. * information per blkcg - q pair.
  58. *
  59. * There can be multiple active blkcg policies and each has its private
  60. * data on each blkg, the size of which is determined by
  61. * blkcg_policy->pd_size. blkcg core allocates and frees such areas
  62. * together with blkg and invokes pd_init/exit_fn() methods.
  63. *
  64. * Such private data must embed struct blkg_policy_data (pd) at the
  65. * beginning and pd_size can't be smaller than pd.
  66. */
  67. struct blkg_policy_data {
  68. /* the blkg this per-policy data belongs to */
  69. struct blkcg_gq *blkg;
  70. /* used during policy activation */
  71. struct list_head alloc_node;
  72. };
  73. /* association between a blk cgroup and a request queue */
  74. struct blkcg_gq {
  75. /* Pointer to the associated request_queue */
  76. struct request_queue *q;
  77. struct list_head q_node;
  78. struct hlist_node blkcg_node;
  79. struct blkcg *blkcg;
  80. /* reference count */
  81. int refcnt;
  82. struct blkg_policy_data *pd[BLKCG_MAX_POLS];
  83. struct rcu_head rcu_head;
  84. };
  85. typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
  86. typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
  87. typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
  88. struct blkcg_policy {
  89. int plid;
  90. /* policy specific private data size */
  91. size_t pd_size;
  92. /* cgroup files for the policy */
  93. struct cftype *cftypes;
  94. /* operations */
  95. blkcg_pol_init_pd_fn *pd_init_fn;
  96. blkcg_pol_exit_pd_fn *pd_exit_fn;
  97. blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
  98. };
  99. extern struct blkcg blkcg_root;
  100. struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
  101. struct blkcg *bio_blkcg(struct bio *bio);
  102. struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
  103. struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
  104. struct request_queue *q);
  105. int blkcg_init_queue(struct request_queue *q);
  106. void blkcg_drain_queue(struct request_queue *q);
  107. void blkcg_exit_queue(struct request_queue *q);
  108. /* Blkio controller policy registration */
  109. int blkcg_policy_register(struct blkcg_policy *pol);
  110. void blkcg_policy_unregister(struct blkcg_policy *pol);
  111. int blkcg_activate_policy(struct request_queue *q,
  112. const struct blkcg_policy *pol);
  113. void blkcg_deactivate_policy(struct request_queue *q,
  114. const struct blkcg_policy *pol);
  115. void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
  116. u64 (*prfill)(struct seq_file *,
  117. struct blkg_policy_data *, int),
  118. const struct blkcg_policy *pol, int data,
  119. bool show_total);
  120. u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
  121. u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  122. const struct blkg_rwstat *rwstat);
  123. u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
  124. u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  125. int off);
  126. struct blkg_conf_ctx {
  127. struct gendisk *disk;
  128. struct blkcg_gq *blkg;
  129. u64 v;
  130. };
  131. int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
  132. const char *input, struct blkg_conf_ctx *ctx);
  133. void blkg_conf_finish(struct blkg_conf_ctx *ctx);
  134. /**
  135. * blkg_to_pdata - get policy private data
  136. * @blkg: blkg of interest
  137. * @pol: policy of interest
  138. *
  139. * Return pointer to private data associated with the @blkg-@pol pair.
  140. */
  141. static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  142. struct blkcg_policy *pol)
  143. {
  144. return blkg ? blkg->pd[pol->plid] : NULL;
  145. }
  146. /**
  147. * pdata_to_blkg - get blkg associated with policy private data
  148. * @pd: policy private data of interest
  149. *
  150. * @pd is policy private data. Determine the blkg it's associated with.
  151. */
  152. static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
  153. {
  154. return pd ? pd->blkg : NULL;
  155. }
  156. /**
  157. * blkg_path - format cgroup path of blkg
  158. * @blkg: blkg of interest
  159. * @buf: target buffer
  160. * @buflen: target buffer length
  161. *
  162. * Format the path of the cgroup of @blkg into @buf.
  163. */
  164. static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
  165. {
  166. int ret;
  167. rcu_read_lock();
  168. ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
  169. rcu_read_unlock();
  170. if (ret)
  171. strncpy(buf, "<unavailable>", buflen);
  172. return ret;
  173. }
  174. /**
  175. * blkg_get - get a blkg reference
  176. * @blkg: blkg to get
  177. *
  178. * The caller should be holding queue_lock and an existing reference.
  179. */
  180. static inline void blkg_get(struct blkcg_gq *blkg)
  181. {
  182. lockdep_assert_held(blkg->q->queue_lock);
  183. WARN_ON_ONCE(!blkg->refcnt);
  184. blkg->refcnt++;
  185. }
  186. void __blkg_release(struct blkcg_gq *blkg);
  187. /**
  188. * blkg_put - put a blkg reference
  189. * @blkg: blkg to put
  190. *
  191. * The caller should be holding queue_lock.
  192. */
  193. static inline void blkg_put(struct blkcg_gq *blkg)
  194. {
  195. lockdep_assert_held(blkg->q->queue_lock);
  196. WARN_ON_ONCE(blkg->refcnt <= 0);
  197. if (!--blkg->refcnt)
  198. __blkg_release(blkg);
  199. }
  200. /**
  201. * blkg_stat_add - add a value to a blkg_stat
  202. * @stat: target blkg_stat
  203. * @val: value to add
  204. *
  205. * Add @val to @stat. The caller is responsible for synchronizing calls to
  206. * this function.
  207. */
  208. static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
  209. {
  210. u64_stats_update_begin(&stat->syncp);
  211. stat->cnt += val;
  212. u64_stats_update_end(&stat->syncp);
  213. }
  214. /**
  215. * blkg_stat_read - read the current value of a blkg_stat
  216. * @stat: blkg_stat to read
  217. *
  218. * Read the current value of @stat. This function can be called without
  219. * synchroniztion and takes care of u64 atomicity.
  220. */
  221. static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
  222. {
  223. unsigned int start;
  224. uint64_t v;
  225. do {
  226. start = u64_stats_fetch_begin(&stat->syncp);
  227. v = stat->cnt;
  228. } while (u64_stats_fetch_retry(&stat->syncp, start));
  229. return v;
  230. }
  231. /**
  232. * blkg_stat_reset - reset a blkg_stat
  233. * @stat: blkg_stat to reset
  234. */
  235. static inline void blkg_stat_reset(struct blkg_stat *stat)
  236. {
  237. stat->cnt = 0;
  238. }
  239. /**
  240. * blkg_rwstat_add - add a value to a blkg_rwstat
  241. * @rwstat: target blkg_rwstat
  242. * @rw: mask of REQ_{WRITE|SYNC}
  243. * @val: value to add
  244. *
  245. * Add @val to @rwstat. The counters are chosen according to @rw. The
  246. * caller is responsible for synchronizing calls to this function.
  247. */
  248. static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
  249. int rw, uint64_t val)
  250. {
  251. u64_stats_update_begin(&rwstat->syncp);
  252. if (rw & REQ_WRITE)
  253. rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
  254. else
  255. rwstat->cnt[BLKG_RWSTAT_READ] += val;
  256. if (rw & REQ_SYNC)
  257. rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
  258. else
  259. rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
  260. u64_stats_update_end(&rwstat->syncp);
  261. }
  262. /**
  263. * blkg_rwstat_read - read the current values of a blkg_rwstat
  264. * @rwstat: blkg_rwstat to read
  265. *
  266. * Read the current snapshot of @rwstat and return it as the return value.
  267. * This function can be called without synchronization and takes care of
  268. * u64 atomicity.
  269. */
  270. static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
  271. {
  272. unsigned int start;
  273. struct blkg_rwstat tmp;
  274. do {
  275. start = u64_stats_fetch_begin(&rwstat->syncp);
  276. tmp = *rwstat;
  277. } while (u64_stats_fetch_retry(&rwstat->syncp, start));
  278. return tmp;
  279. }
  280. /**
  281. * blkg_rwstat_sum - read the total count of a blkg_rwstat
  282. * @rwstat: blkg_rwstat to read
  283. *
  284. * Return the total count of @rwstat regardless of the IO direction. This
  285. * function can be called without synchronization and takes care of u64
  286. * atomicity.
  287. */
  288. static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
  289. {
  290. struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
  291. return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
  292. }
  293. /**
  294. * blkg_rwstat_reset - reset a blkg_rwstat
  295. * @rwstat: blkg_rwstat to reset
  296. */
  297. static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
  298. {
  299. memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
  300. }
  301. #else /* CONFIG_BLK_CGROUP */
  302. struct cgroup;
  303. struct blkg_policy_data {
  304. };
  305. struct blkcg_gq {
  306. };
  307. struct blkcg_policy {
  308. };
  309. static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
  310. static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
  311. static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
  312. static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
  313. static inline void blkcg_drain_queue(struct request_queue *q) { }
  314. static inline void blkcg_exit_queue(struct request_queue *q) { }
  315. static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
  316. static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
  317. static inline int blkcg_activate_policy(struct request_queue *q,
  318. const struct blkcg_policy *pol) { return 0; }
  319. static inline void blkcg_deactivate_policy(struct request_queue *q,
  320. const struct blkcg_policy *pol) { }
  321. static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  322. struct blkcg_policy *pol) { return NULL; }
  323. static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
  324. static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
  325. static inline void blkg_get(struct blkcg_gq *blkg) { }
  326. static inline void blkg_put(struct blkcg_gq *blkg) { }
  327. #endif /* CONFIG_BLK_CGROUP */
  328. #endif /* _BLK_CGROUP_H */