blk-cgroup.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. #ifndef _BLK_CGROUP_H
  2. #define _BLK_CGROUP_H
  3. /*
  4. * Common Block IO controller cgroup interface
  5. *
  6. * Based on ideas and code from CFQ, CFS and BFQ:
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. *
  9. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  10. * Paolo Valente <paolo.valente@unimore.it>
  11. *
  12. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  13. * Nauman Rafique <nauman@google.com>
  14. */
  15. #include <linux/cgroup.h>
  16. #include <linux/u64_stats_sync.h>
  17. enum blkio_policy_id {
  18. BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
  19. BLKIO_POLICY_THROTL, /* Throttling */
  20. BLKIO_NR_POLICIES,
  21. };
  22. /* Max limits for throttle policy */
  23. #define THROTL_IOPS_MAX UINT_MAX
  24. #ifdef CONFIG_BLK_CGROUP
  25. enum stat_type {
  26. /* Number of IOs merged */
  27. BLKIO_STAT_MERGED,
  28. /* Total time spent (in ns) between request dispatch to the driver and
  29. * request completion for IOs doen by this cgroup. This may not be
  30. * accurate when NCQ is turned on. */
  31. BLKIO_STAT_SERVICE_TIME,
  32. /* Total time spent waiting in scheduler queue in ns */
  33. BLKIO_STAT_WAIT_TIME,
  34. /* Number of IOs queued up */
  35. BLKIO_STAT_QUEUED,
  36. /* All the single valued stats go below this */
  37. BLKIO_STAT_TIME,
  38. #ifdef CONFIG_DEBUG_BLK_CGROUP
  39. /* Time not charged to this cgroup */
  40. BLKIO_STAT_UNACCOUNTED_TIME,
  41. BLKIO_STAT_AVG_QUEUE_SIZE,
  42. BLKIO_STAT_IDLE_TIME,
  43. BLKIO_STAT_EMPTY_TIME,
  44. BLKIO_STAT_GROUP_WAIT_TIME,
  45. BLKIO_STAT_DEQUEUE
  46. #endif
  47. };
  48. /* Types lower than this live in stat_arr and have subtypes */
  49. #define BLKIO_STAT_ARR_NR (BLKIO_STAT_QUEUED + 1)
  50. /* Per cpu stats */
  51. enum stat_type_cpu {
  52. /* Total bytes transferred */
  53. BLKIO_STAT_CPU_SERVICE_BYTES,
  54. /* Total IOs serviced, post merge */
  55. BLKIO_STAT_CPU_SERVICED,
  56. /* All the single valued stats go below this */
  57. BLKIO_STAT_CPU_SECTORS,
  58. };
  59. #define BLKIO_STAT_CPU_ARR_NR (BLKIO_STAT_CPU_SERVICED + 1)
  60. enum blkg_rwstat_type {
  61. BLKG_RWSTAT_READ,
  62. BLKG_RWSTAT_WRITE,
  63. BLKG_RWSTAT_SYNC,
  64. BLKG_RWSTAT_ASYNC,
  65. BLKG_RWSTAT_NR,
  66. BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
  67. };
  68. /* blkg state flags */
  69. enum blkg_state_flags {
  70. BLKG_waiting = 0,
  71. BLKG_idling,
  72. BLKG_empty,
  73. };
  74. /* cgroup files owned by proportional weight policy */
  75. enum blkcg_file_name_prop {
  76. BLKIO_PROP_weight = 1,
  77. BLKIO_PROP_weight_device,
  78. BLKIO_PROP_io_service_bytes,
  79. BLKIO_PROP_io_serviced,
  80. BLKIO_PROP_time,
  81. BLKIO_PROP_sectors,
  82. BLKIO_PROP_unaccounted_time,
  83. BLKIO_PROP_io_service_time,
  84. BLKIO_PROP_io_wait_time,
  85. BLKIO_PROP_io_merged,
  86. BLKIO_PROP_io_queued,
  87. BLKIO_PROP_avg_queue_size,
  88. BLKIO_PROP_group_wait_time,
  89. BLKIO_PROP_idle_time,
  90. BLKIO_PROP_empty_time,
  91. BLKIO_PROP_dequeue,
  92. };
  93. /* cgroup files owned by throttle policy */
  94. enum blkcg_file_name_throtl {
  95. BLKIO_THROTL_read_bps_device,
  96. BLKIO_THROTL_write_bps_device,
  97. BLKIO_THROTL_read_iops_device,
  98. BLKIO_THROTL_write_iops_device,
  99. BLKIO_THROTL_io_service_bytes,
  100. BLKIO_THROTL_io_serviced,
  101. };
  102. struct blkio_cgroup {
  103. struct cgroup_subsys_state css;
  104. unsigned int weight;
  105. spinlock_t lock;
  106. struct hlist_head blkg_list;
  107. /* for policies to test whether associated blkcg has changed */
  108. uint64_t id;
  109. };
  110. struct blkg_stat {
  111. struct u64_stats_sync syncp;
  112. uint64_t cnt;
  113. };
  114. struct blkg_rwstat {
  115. struct u64_stats_sync syncp;
  116. uint64_t cnt[BLKG_RWSTAT_NR];
  117. };
  118. struct blkio_group_stats {
  119. /* number of ios merged */
  120. struct blkg_rwstat merged;
  121. /* total time spent on device in ns, may not be accurate w/ queueing */
  122. struct blkg_rwstat service_time;
  123. /* total time spent waiting in scheduler queue in ns */
  124. struct blkg_rwstat wait_time;
  125. /* number of IOs queued up */
  126. struct blkg_rwstat queued;
  127. /* total disk time and nr sectors dispatched by this group */
  128. struct blkg_stat time;
  129. #ifdef CONFIG_DEBUG_BLK_CGROUP
  130. /* time not charged to this cgroup */
  131. struct blkg_stat unaccounted_time;
  132. /* sum of number of ios queued across all samples */
  133. struct blkg_stat avg_queue_size_sum;
  134. /* count of samples taken for average */
  135. struct blkg_stat avg_queue_size_samples;
  136. /* how many times this group has been removed from service tree */
  137. struct blkg_stat dequeue;
  138. /* total time spent waiting for it to be assigned a timeslice. */
  139. struct blkg_stat group_wait_time;
  140. /* time spent idling for this blkio_group */
  141. struct blkg_stat idle_time;
  142. /* total time with empty current active q with other requests queued */
  143. struct blkg_stat empty_time;
  144. /* fields after this shouldn't be cleared on stat reset */
  145. uint64_t start_group_wait_time;
  146. uint64_t start_idle_time;
  147. uint64_t start_empty_time;
  148. uint16_t flags;
  149. #endif
  150. };
  151. /* Per cpu blkio group stats */
  152. struct blkio_group_stats_cpu {
  153. /* total bytes transferred */
  154. struct blkg_rwstat service_bytes;
  155. /* total IOs serviced, post merge */
  156. struct blkg_rwstat serviced;
  157. /* total sectors transferred */
  158. struct blkg_stat sectors;
  159. };
  160. struct blkio_group_conf {
  161. unsigned int weight;
  162. unsigned int iops[2];
  163. u64 bps[2];
  164. };
  165. /* per-blkg per-policy data */
  166. struct blkg_policy_data {
  167. /* the blkg this per-policy data belongs to */
  168. struct blkio_group *blkg;
  169. /* Configuration */
  170. struct blkio_group_conf conf;
  171. struct blkio_group_stats stats;
  172. /* Per cpu stats pointer */
  173. struct blkio_group_stats_cpu __percpu *stats_cpu;
  174. /* pol->pdata_size bytes of private data used by policy impl */
  175. char pdata[] __aligned(__alignof__(unsigned long long));
  176. };
  177. struct blkio_group {
  178. /* Pointer to the associated request_queue */
  179. struct request_queue *q;
  180. struct list_head q_node;
  181. struct hlist_node blkcg_node;
  182. struct blkio_cgroup *blkcg;
  183. /* Store cgroup path */
  184. char path[128];
  185. /* reference count */
  186. int refcnt;
  187. struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
  188. /* List of blkg waiting for per cpu stats memory to be allocated */
  189. struct list_head alloc_node;
  190. struct rcu_head rcu_head;
  191. };
  192. typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
  193. typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
  194. struct blkio_group *blkg, unsigned int weight);
  195. typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
  196. struct blkio_group *blkg, u64 read_bps);
  197. typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
  198. struct blkio_group *blkg, u64 write_bps);
  199. typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
  200. struct blkio_group *blkg, unsigned int read_iops);
  201. typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
  202. struct blkio_group *blkg, unsigned int write_iops);
  203. struct blkio_policy_ops {
  204. blkio_init_group_fn *blkio_init_group_fn;
  205. blkio_update_group_weight_fn *blkio_update_group_weight_fn;
  206. blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
  207. blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
  208. blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
  209. blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
  210. };
  211. struct blkio_policy_type {
  212. struct list_head list;
  213. struct blkio_policy_ops ops;
  214. enum blkio_policy_id plid;
  215. size_t pdata_size; /* policy specific private data size */
  216. };
  217. extern int blkcg_init_queue(struct request_queue *q);
  218. extern void blkcg_drain_queue(struct request_queue *q);
  219. extern void blkcg_exit_queue(struct request_queue *q);
  220. /* Blkio controller policy registration */
  221. extern void blkio_policy_register(struct blkio_policy_type *);
  222. extern void blkio_policy_unregister(struct blkio_policy_type *);
  223. extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
  224. extern void update_root_blkg_pd(struct request_queue *q,
  225. enum blkio_policy_id plid);
  226. /**
  227. * blkg_to_pdata - get policy private data
  228. * @blkg: blkg of interest
  229. * @pol: policy of interest
  230. *
  231. * Return pointer to private data associated with the @blkg-@pol pair.
  232. */
  233. static inline void *blkg_to_pdata(struct blkio_group *blkg,
  234. struct blkio_policy_type *pol)
  235. {
  236. return blkg ? blkg->pd[pol->plid]->pdata : NULL;
  237. }
  238. /**
  239. * pdata_to_blkg - get blkg associated with policy private data
  240. * @pdata: policy private data of interest
  241. *
  242. * @pdata is policy private data. Determine the blkg it's associated with.
  243. */
  244. static inline struct blkio_group *pdata_to_blkg(void *pdata)
  245. {
  246. if (pdata) {
  247. struct blkg_policy_data *pd =
  248. container_of(pdata, struct blkg_policy_data, pdata);
  249. return pd->blkg;
  250. }
  251. return NULL;
  252. }
  253. static inline char *blkg_path(struct blkio_group *blkg)
  254. {
  255. return blkg->path;
  256. }
  257. /**
  258. * blkg_get - get a blkg reference
  259. * @blkg: blkg to get
  260. *
  261. * The caller should be holding queue_lock and an existing reference.
  262. */
  263. static inline void blkg_get(struct blkio_group *blkg)
  264. {
  265. lockdep_assert_held(blkg->q->queue_lock);
  266. WARN_ON_ONCE(!blkg->refcnt);
  267. blkg->refcnt++;
  268. }
  269. void __blkg_release(struct blkio_group *blkg);
  270. /**
  271. * blkg_put - put a blkg reference
  272. * @blkg: blkg to put
  273. *
  274. * The caller should be holding queue_lock.
  275. */
  276. static inline void blkg_put(struct blkio_group *blkg)
  277. {
  278. lockdep_assert_held(blkg->q->queue_lock);
  279. WARN_ON_ONCE(blkg->refcnt <= 0);
  280. if (!--blkg->refcnt)
  281. __blkg_release(blkg);
  282. }
  283. /**
  284. * blkg_stat_add - add a value to a blkg_stat
  285. * @stat: target blkg_stat
  286. * @val: value to add
  287. *
  288. * Add @val to @stat. The caller is responsible for synchronizing calls to
  289. * this function.
  290. */
  291. static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
  292. {
  293. u64_stats_update_begin(&stat->syncp);
  294. stat->cnt += val;
  295. u64_stats_update_end(&stat->syncp);
  296. }
  297. /**
  298. * blkg_stat_read - read the current value of a blkg_stat
  299. * @stat: blkg_stat to read
  300. *
  301. * Read the current value of @stat. This function can be called without
  302. * synchroniztion and takes care of u64 atomicity.
  303. */
  304. static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
  305. {
  306. unsigned int start;
  307. uint64_t v;
  308. do {
  309. start = u64_stats_fetch_begin(&stat->syncp);
  310. v = stat->cnt;
  311. } while (u64_stats_fetch_retry(&stat->syncp, start));
  312. return v;
  313. }
  314. /**
  315. * blkg_stat_reset - reset a blkg_stat
  316. * @stat: blkg_stat to reset
  317. */
  318. static inline void blkg_stat_reset(struct blkg_stat *stat)
  319. {
  320. stat->cnt = 0;
  321. }
  322. /**
  323. * blkg_rwstat_add - add a value to a blkg_rwstat
  324. * @rwstat: target blkg_rwstat
  325. * @rw: mask of REQ_{WRITE|SYNC}
  326. * @val: value to add
  327. *
  328. * Add @val to @rwstat. The counters are chosen according to @rw. The
  329. * caller is responsible for synchronizing calls to this function.
  330. */
  331. static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
  332. int rw, uint64_t val)
  333. {
  334. u64_stats_update_begin(&rwstat->syncp);
  335. if (rw & REQ_WRITE)
  336. rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
  337. else
  338. rwstat->cnt[BLKG_RWSTAT_READ] += val;
  339. if (rw & REQ_SYNC)
  340. rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
  341. else
  342. rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
  343. u64_stats_update_end(&rwstat->syncp);
  344. }
  345. /**
  346. * blkg_rwstat_read - read the current values of a blkg_rwstat
  347. * @rwstat: blkg_rwstat to read
  348. *
  349. * Read the current snapshot of @rwstat and return it as the return value.
  350. * This function can be called without synchronization and takes care of
  351. * u64 atomicity.
  352. */
  353. static struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
  354. {
  355. unsigned int start;
  356. struct blkg_rwstat tmp;
  357. do {
  358. start = u64_stats_fetch_begin(&rwstat->syncp);
  359. tmp = *rwstat;
  360. } while (u64_stats_fetch_retry(&rwstat->syncp, start));
  361. return tmp;
  362. }
  363. /**
  364. * blkg_rwstat_sum - read the total count of a blkg_rwstat
  365. * @rwstat: blkg_rwstat to read
  366. *
  367. * Return the total count of @rwstat regardless of the IO direction. This
  368. * function can be called without synchronization and takes care of u64
  369. * atomicity.
  370. */
  371. static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
  372. {
  373. struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
  374. return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
  375. }
  376. /**
  377. * blkg_rwstat_reset - reset a blkg_rwstat
  378. * @rwstat: blkg_rwstat to reset
  379. */
  380. static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
  381. {
  382. memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
  383. }
  384. #else
  385. struct blkio_group {
  386. };
  387. struct blkio_policy_type {
  388. };
  389. static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
  390. static inline void blkcg_drain_queue(struct request_queue *q) { }
  391. static inline void blkcg_exit_queue(struct request_queue *q) { }
  392. static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
  393. static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
  394. static inline void blkg_destroy_all(struct request_queue *q,
  395. bool destory_root) { }
  396. static inline void update_root_blkg_pd(struct request_queue *q,
  397. enum blkio_policy_id plid) { }
  398. static inline void *blkg_to_pdata(struct blkio_group *blkg,
  399. struct blkio_policy_type *pol) { return NULL; }
  400. static inline struct blkio_group *pdata_to_blkg(void *pdata,
  401. struct blkio_policy_type *pol) { return NULL; }
  402. static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
  403. static inline void blkg_get(struct blkio_group *blkg) { }
  404. static inline void blkg_put(struct blkio_group *blkg) { }
  405. #endif
  406. #define BLKIO_WEIGHT_MIN 10
  407. #define BLKIO_WEIGHT_MAX 1000
  408. #define BLKIO_WEIGHT_DEFAULT 500
  409. #ifdef CONFIG_DEBUG_BLK_CGROUP
  410. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
  411. struct blkio_policy_type *pol);
  412. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  413. struct blkio_policy_type *pol,
  414. unsigned long dequeue);
  415. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
  416. struct blkio_policy_type *pol);
  417. void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
  418. struct blkio_policy_type *pol);
  419. void blkiocg_set_start_empty_time(struct blkio_group *blkg,
  420. struct blkio_policy_type *pol);
  421. #define BLKG_FLAG_FNS(name) \
  422. static inline void blkio_mark_blkg_##name( \
  423. struct blkio_group_stats *stats) \
  424. { \
  425. stats->flags |= (1 << BLKG_##name); \
  426. } \
  427. static inline void blkio_clear_blkg_##name( \
  428. struct blkio_group_stats *stats) \
  429. { \
  430. stats->flags &= ~(1 << BLKG_##name); \
  431. } \
  432. static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
  433. { \
  434. return (stats->flags & (1 << BLKG_##name)) != 0; \
  435. } \
  436. BLKG_FLAG_FNS(waiting)
  437. BLKG_FLAG_FNS(idling)
  438. BLKG_FLAG_FNS(empty)
  439. #undef BLKG_FLAG_FNS
  440. #else
  441. static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
  442. struct blkio_policy_type *pol) { }
  443. static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  444. struct blkio_policy_type *pol, unsigned long dequeue) { }
  445. static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
  446. struct blkio_policy_type *pol) { }
  447. static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
  448. struct blkio_policy_type *pol) { }
  449. static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
  450. struct blkio_policy_type *pol) { }
  451. #endif
  452. #ifdef CONFIG_BLK_CGROUP
  453. extern struct blkio_cgroup blkio_root_cgroup;
  454. extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
  455. extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
  456. extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  457. struct request_queue *q);
  458. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  459. struct request_queue *q,
  460. bool for_root);
  461. void blkiocg_update_timeslice_used(struct blkio_group *blkg,
  462. struct blkio_policy_type *pol,
  463. unsigned long time,
  464. unsigned long unaccounted_time);
  465. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  466. struct blkio_policy_type *pol,
  467. uint64_t bytes, bool direction, bool sync);
  468. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  469. struct blkio_policy_type *pol,
  470. uint64_t start_time,
  471. uint64_t io_start_time, bool direction,
  472. bool sync);
  473. void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  474. struct blkio_policy_type *pol,
  475. bool direction, bool sync);
  476. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  477. struct blkio_policy_type *pol,
  478. struct blkio_group *curr_blkg, bool direction,
  479. bool sync);
  480. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  481. struct blkio_policy_type *pol,
  482. bool direction, bool sync);
  483. #else
  484. struct cgroup;
  485. static inline struct blkio_cgroup *
  486. cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
  487. static inline struct blkio_cgroup *
  488. bio_blkio_cgroup(struct bio *bio) { return NULL; }
  489. static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  490. void *key) { return NULL; }
  491. static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
  492. struct blkio_policy_type *pol, unsigned long time,
  493. unsigned long unaccounted_time) { }
  494. static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  495. struct blkio_policy_type *pol, uint64_t bytes,
  496. bool direction, bool sync) { }
  497. static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
  498. struct blkio_policy_type *pol, uint64_t start_time,
  499. uint64_t io_start_time, bool direction, bool sync) { }
  500. static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  501. struct blkio_policy_type *pol, bool direction,
  502. bool sync) { }
  503. static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  504. struct blkio_policy_type *pol,
  505. struct blkio_group *curr_blkg, bool direction,
  506. bool sync) { }
  507. static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  508. struct blkio_policy_type *pol, bool direction,
  509. bool sync) { }
  510. #endif
  511. #endif /* _BLK_CGROUP_H */