blk-cgroup.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include "blk-cgroup.h"
  20. #define MAX_KEY_LEN 100
  21. static DEFINE_SPINLOCK(blkio_list_lock);
  22. static LIST_HEAD(blkio_list);
  23. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  24. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  25. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  26. struct cgroup *);
  27. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  28. struct task_struct *, bool);
  29. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  30. struct cgroup *, struct task_struct *, bool);
  31. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  32. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  33. struct cgroup_subsys blkio_subsys = {
  34. .name = "blkio",
  35. .create = blkiocg_create,
  36. .can_attach = blkiocg_can_attach,
  37. .attach = blkiocg_attach,
  38. .destroy = blkiocg_destroy,
  39. .populate = blkiocg_populate,
  40. #ifdef CONFIG_BLK_CGROUP
  41. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  42. .subsys_id = blkio_subsys_id,
  43. #endif
  44. .use_id = 1,
  45. .module = THIS_MODULE,
  46. };
  47. EXPORT_SYMBOL_GPL(blkio_subsys);
  48. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  49. {
  50. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  51. struct blkio_cgroup, css);
  52. }
  53. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  54. void blkio_group_init(struct blkio_group *blkg)
  55. {
  56. spin_lock_init(&blkg->stats_lock);
  57. }
  58. EXPORT_SYMBOL_GPL(blkio_group_init);
  59. /*
  60. * Add to the appropriate stat variable depending on the request type.
  61. * This should be called with the blkg->stats_lock held.
  62. */
  63. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  64. bool sync)
  65. {
  66. if (direction)
  67. stat[BLKIO_STAT_WRITE] += add;
  68. else
  69. stat[BLKIO_STAT_READ] += add;
  70. if (sync)
  71. stat[BLKIO_STAT_SYNC] += add;
  72. else
  73. stat[BLKIO_STAT_ASYNC] += add;
  74. }
  75. /*
  76. * Decrements the appropriate stat variable if non-zero depending on the
  77. * request type. Panics on value being zero.
  78. * This should be called with the blkg->stats_lock held.
  79. */
  80. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  81. {
  82. if (direction) {
  83. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  84. stat[BLKIO_STAT_WRITE]--;
  85. } else {
  86. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  87. stat[BLKIO_STAT_READ]--;
  88. }
  89. if (sync) {
  90. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  91. stat[BLKIO_STAT_SYNC]--;
  92. } else {
  93. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  94. stat[BLKIO_STAT_ASYNC]--;
  95. }
  96. }
  97. #ifdef CONFIG_DEBUG_BLK_CGROUP
  98. /* This should be called with the blkg->stats_lock held. */
  99. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  100. struct blkio_group *curr_blkg)
  101. {
  102. if (blkio_blkg_waiting(&blkg->stats))
  103. return;
  104. if (blkg == curr_blkg)
  105. return;
  106. blkg->stats.start_group_wait_time = sched_clock();
  107. blkio_mark_blkg_waiting(&blkg->stats);
  108. }
  109. /* This should be called with the blkg->stats_lock held. */
  110. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  111. {
  112. unsigned long long now;
  113. if (!blkio_blkg_waiting(stats))
  114. return;
  115. now = sched_clock();
  116. if (time_after64(now, stats->start_group_wait_time))
  117. stats->group_wait_time += now - stats->start_group_wait_time;
  118. blkio_clear_blkg_waiting(stats);
  119. }
  120. /* This should be called with the blkg->stats_lock held. */
  121. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  122. {
  123. unsigned long long now;
  124. if (!blkio_blkg_empty(stats))
  125. return;
  126. now = sched_clock();
  127. if (time_after64(now, stats->start_empty_time))
  128. stats->empty_time += now - stats->start_empty_time;
  129. blkio_clear_blkg_empty(stats);
  130. }
  131. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  132. {
  133. unsigned long flags;
  134. spin_lock_irqsave(&blkg->stats_lock, flags);
  135. BUG_ON(blkio_blkg_idling(&blkg->stats));
  136. blkg->stats.start_idle_time = sched_clock();
  137. blkio_mark_blkg_idling(&blkg->stats);
  138. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  139. }
  140. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  141. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  142. {
  143. unsigned long flags;
  144. unsigned long long now;
  145. struct blkio_group_stats *stats;
  146. spin_lock_irqsave(&blkg->stats_lock, flags);
  147. stats = &blkg->stats;
  148. if (blkio_blkg_idling(stats)) {
  149. now = sched_clock();
  150. if (time_after64(now, stats->start_idle_time))
  151. stats->idle_time += now - stats->start_idle_time;
  152. blkio_clear_blkg_idling(stats);
  153. }
  154. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  155. }
  156. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  157. void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg)
  158. {
  159. unsigned long flags;
  160. struct blkio_group_stats *stats;
  161. spin_lock_irqsave(&blkg->stats_lock, flags);
  162. stats = &blkg->stats;
  163. stats->avg_queue_size_sum +=
  164. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  165. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  166. stats->avg_queue_size_samples++;
  167. blkio_update_group_wait_time(stats);
  168. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  169. }
  170. EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats);
  171. #else
  172. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  173. struct blkio_group *curr_blkg) {}
  174. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  175. #endif
  176. void blkiocg_update_request_add_stats(struct blkio_group *blkg,
  177. struct blkio_group *curr_blkg, bool direction,
  178. bool sync)
  179. {
  180. unsigned long flags;
  181. spin_lock_irqsave(&blkg->stats_lock, flags);
  182. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  183. sync);
  184. blkio_end_empty_time(&blkg->stats);
  185. blkio_set_start_group_wait_time(blkg, curr_blkg);
  186. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  187. }
  188. EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats);
  189. void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
  190. bool direction, bool sync)
  191. {
  192. unsigned long flags;
  193. spin_lock_irqsave(&blkg->stats_lock, flags);
  194. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  195. direction, sync);
  196. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  197. }
  198. EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats);
  199. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
  200. {
  201. unsigned long flags;
  202. spin_lock_irqsave(&blkg->stats_lock, flags);
  203. blkg->stats.time += time;
  204. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  205. }
  206. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  207. void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
  208. {
  209. unsigned long flags;
  210. struct blkio_group_stats *stats;
  211. spin_lock_irqsave(&blkg->stats_lock, flags);
  212. stats = &blkg->stats;
  213. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  214. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  215. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  216. return;
  217. }
  218. /*
  219. * If ignore is set, we do not panic on the empty flag being set
  220. * already. This is to avoid cases where there are superfluous timeslice
  221. * complete events (for eg., forced_dispatch in CFQ) when no IOs are
  222. * served which could result in triggering the empty check incorrectly.
  223. */
  224. BUG_ON(!ignore && blkio_blkg_empty(stats));
  225. stats->start_empty_time = sched_clock();
  226. blkio_mark_blkg_empty(stats);
  227. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  228. }
  229. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  230. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  231. uint64_t bytes, bool direction, bool sync)
  232. {
  233. struct blkio_group_stats *stats;
  234. unsigned long flags;
  235. spin_lock_irqsave(&blkg->stats_lock, flags);
  236. stats = &blkg->stats;
  237. stats->sectors += bytes >> 9;
  238. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
  239. sync);
  240. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
  241. direction, sync);
  242. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  243. }
  244. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  245. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  246. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  247. {
  248. struct blkio_group_stats *stats;
  249. unsigned long flags;
  250. unsigned long long now = sched_clock();
  251. spin_lock_irqsave(&blkg->stats_lock, flags);
  252. stats = &blkg->stats;
  253. if (time_after64(now, io_start_time))
  254. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  255. now - io_start_time, direction, sync);
  256. if (time_after64(io_start_time, start_time))
  257. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  258. io_start_time - start_time, direction, sync);
  259. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  260. }
  261. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  262. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  263. bool sync)
  264. {
  265. unsigned long flags;
  266. spin_lock_irqsave(&blkg->stats_lock, flags);
  267. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
  268. sync);
  269. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  270. }
  271. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  272. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  273. struct blkio_group *blkg, void *key, dev_t dev)
  274. {
  275. unsigned long flags;
  276. spin_lock_irqsave(&blkcg->lock, flags);
  277. rcu_assign_pointer(blkg->key, key);
  278. blkg->blkcg_id = css_id(&blkcg->css);
  279. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  280. spin_unlock_irqrestore(&blkcg->lock, flags);
  281. #ifdef CONFIG_DEBUG_BLK_CGROUP
  282. /* Need to take css reference ? */
  283. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  284. #endif
  285. blkg->dev = dev;
  286. }
  287. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  288. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  289. {
  290. hlist_del_init_rcu(&blkg->blkcg_node);
  291. blkg->blkcg_id = 0;
  292. }
  293. /*
  294. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  295. * indicating that blk_group was unhashed by the time we got to it.
  296. */
  297. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  298. {
  299. struct blkio_cgroup *blkcg;
  300. unsigned long flags;
  301. struct cgroup_subsys_state *css;
  302. int ret = 1;
  303. rcu_read_lock();
  304. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  305. if (!css)
  306. goto out;
  307. blkcg = container_of(css, struct blkio_cgroup, css);
  308. spin_lock_irqsave(&blkcg->lock, flags);
  309. if (!hlist_unhashed(&blkg->blkcg_node)) {
  310. __blkiocg_del_blkio_group(blkg);
  311. ret = 0;
  312. }
  313. spin_unlock_irqrestore(&blkcg->lock, flags);
  314. out:
  315. rcu_read_unlock();
  316. return ret;
  317. }
  318. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  319. /* called under rcu_read_lock(). */
  320. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  321. {
  322. struct blkio_group *blkg;
  323. struct hlist_node *n;
  324. void *__key;
  325. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  326. __key = blkg->key;
  327. if (__key == key)
  328. return blkg;
  329. }
  330. return NULL;
  331. }
  332. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  333. #define SHOW_FUNCTION(__VAR) \
  334. static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  335. struct cftype *cftype) \
  336. { \
  337. struct blkio_cgroup *blkcg; \
  338. \
  339. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  340. return (u64)blkcg->__VAR; \
  341. }
  342. SHOW_FUNCTION(weight);
  343. #undef SHOW_FUNCTION
  344. static int
  345. blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  346. {
  347. struct blkio_cgroup *blkcg;
  348. struct blkio_group *blkg;
  349. struct hlist_node *n;
  350. struct blkio_policy_type *blkiop;
  351. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  352. return -EINVAL;
  353. blkcg = cgroup_to_blkio_cgroup(cgroup);
  354. spin_lock(&blkio_list_lock);
  355. spin_lock_irq(&blkcg->lock);
  356. blkcg->weight = (unsigned int)val;
  357. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  358. list_for_each_entry(blkiop, &blkio_list, list)
  359. blkiop->ops.blkio_update_group_weight_fn(blkg,
  360. blkcg->weight);
  361. }
  362. spin_unlock_irq(&blkcg->lock);
  363. spin_unlock(&blkio_list_lock);
  364. return 0;
  365. }
  366. static int
  367. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  368. {
  369. struct blkio_cgroup *blkcg;
  370. struct blkio_group *blkg;
  371. struct blkio_group_stats *stats;
  372. struct hlist_node *n;
  373. uint64_t queued[BLKIO_STAT_TOTAL];
  374. int i;
  375. #ifdef CONFIG_DEBUG_BLK_CGROUP
  376. bool idling, waiting, empty;
  377. unsigned long long now = sched_clock();
  378. #endif
  379. blkcg = cgroup_to_blkio_cgroup(cgroup);
  380. spin_lock_irq(&blkcg->lock);
  381. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  382. spin_lock(&blkg->stats_lock);
  383. stats = &blkg->stats;
  384. #ifdef CONFIG_DEBUG_BLK_CGROUP
  385. idling = blkio_blkg_idling(stats);
  386. waiting = blkio_blkg_waiting(stats);
  387. empty = blkio_blkg_empty(stats);
  388. #endif
  389. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  390. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  391. memset(stats, 0, sizeof(struct blkio_group_stats));
  392. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  393. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  394. #ifdef CONFIG_DEBUG_BLK_CGROUP
  395. if (idling) {
  396. blkio_mark_blkg_idling(stats);
  397. stats->start_idle_time = now;
  398. }
  399. if (waiting) {
  400. blkio_mark_blkg_waiting(stats);
  401. stats->start_group_wait_time = now;
  402. }
  403. if (empty) {
  404. blkio_mark_blkg_empty(stats);
  405. stats->start_empty_time = now;
  406. }
  407. #endif
  408. spin_unlock(&blkg->stats_lock);
  409. }
  410. spin_unlock_irq(&blkcg->lock);
  411. return 0;
  412. }
  413. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  414. int chars_left, bool diskname_only)
  415. {
  416. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  417. chars_left -= strlen(str);
  418. if (chars_left <= 0) {
  419. printk(KERN_WARNING
  420. "Possibly incorrect cgroup stat display format");
  421. return;
  422. }
  423. if (diskname_only)
  424. return;
  425. switch (type) {
  426. case BLKIO_STAT_READ:
  427. strlcat(str, " Read", chars_left);
  428. break;
  429. case BLKIO_STAT_WRITE:
  430. strlcat(str, " Write", chars_left);
  431. break;
  432. case BLKIO_STAT_SYNC:
  433. strlcat(str, " Sync", chars_left);
  434. break;
  435. case BLKIO_STAT_ASYNC:
  436. strlcat(str, " Async", chars_left);
  437. break;
  438. case BLKIO_STAT_TOTAL:
  439. strlcat(str, " Total", chars_left);
  440. break;
  441. default:
  442. strlcat(str, " Invalid", chars_left);
  443. }
  444. }
  445. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  446. struct cgroup_map_cb *cb, dev_t dev)
  447. {
  448. blkio_get_key_name(0, dev, str, chars_left, true);
  449. cb->fill(cb, str, val);
  450. return val;
  451. }
  452. /* This should be called with blkg->stats_lock held */
  453. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  454. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  455. {
  456. uint64_t disk_total;
  457. char key_str[MAX_KEY_LEN];
  458. enum stat_sub_type sub_type;
  459. if (type == BLKIO_STAT_TIME)
  460. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  461. blkg->stats.time, cb, dev);
  462. if (type == BLKIO_STAT_SECTORS)
  463. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  464. blkg->stats.sectors, cb, dev);
  465. #ifdef CONFIG_DEBUG_BLK_CGROUP
  466. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  467. uint64_t sum = blkg->stats.avg_queue_size_sum;
  468. uint64_t samples = blkg->stats.avg_queue_size_samples;
  469. if (samples)
  470. do_div(sum, samples);
  471. else
  472. sum = 0;
  473. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  474. }
  475. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  476. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  477. blkg->stats.group_wait_time, cb, dev);
  478. if (type == BLKIO_STAT_IDLE_TIME)
  479. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  480. blkg->stats.idle_time, cb, dev);
  481. if (type == BLKIO_STAT_EMPTY_TIME)
  482. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  483. blkg->stats.empty_time, cb, dev);
  484. if (type == BLKIO_STAT_DEQUEUE)
  485. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  486. blkg->stats.dequeue, cb, dev);
  487. #endif
  488. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  489. sub_type++) {
  490. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  491. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  492. }
  493. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  494. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  495. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  496. cb->fill(cb, key_str, disk_total);
  497. return disk_total;
  498. }
  499. #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
  500. static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  501. struct cftype *cftype, struct cgroup_map_cb *cb) \
  502. { \
  503. struct blkio_cgroup *blkcg; \
  504. struct blkio_group *blkg; \
  505. struct hlist_node *n; \
  506. uint64_t cgroup_total = 0; \
  507. \
  508. if (!cgroup_lock_live_group(cgroup)) \
  509. return -ENODEV; \
  510. \
  511. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  512. rcu_read_lock(); \
  513. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
  514. if (blkg->dev) { \
  515. spin_lock_irq(&blkg->stats_lock); \
  516. cgroup_total += blkio_get_stat(blkg, cb, \
  517. blkg->dev, type); \
  518. spin_unlock_irq(&blkg->stats_lock); \
  519. } \
  520. } \
  521. if (show_total) \
  522. cb->fill(cb, "Total", cgroup_total); \
  523. rcu_read_unlock(); \
  524. cgroup_unlock(); \
  525. return 0; \
  526. }
  527. SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
  528. SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
  529. SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
  530. SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
  531. SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
  532. SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
  533. SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
  534. SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
  535. #ifdef CONFIG_DEBUG_BLK_CGROUP
  536. SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
  537. SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
  538. SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
  539. SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
  540. SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
  541. #endif
  542. #undef SHOW_FUNCTION_PER_GROUP
  543. #ifdef CONFIG_DEBUG_BLK_CGROUP
  544. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  545. unsigned long dequeue)
  546. {
  547. blkg->stats.dequeue += dequeue;
  548. }
  549. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  550. #endif
  551. struct cftype blkio_files[] = {
  552. {
  553. .name = "weight",
  554. .read_u64 = blkiocg_weight_read,
  555. .write_u64 = blkiocg_weight_write,
  556. },
  557. {
  558. .name = "time",
  559. .read_map = blkiocg_time_read,
  560. },
  561. {
  562. .name = "sectors",
  563. .read_map = blkiocg_sectors_read,
  564. },
  565. {
  566. .name = "io_service_bytes",
  567. .read_map = blkiocg_io_service_bytes_read,
  568. },
  569. {
  570. .name = "io_serviced",
  571. .read_map = blkiocg_io_serviced_read,
  572. },
  573. {
  574. .name = "io_service_time",
  575. .read_map = blkiocg_io_service_time_read,
  576. },
  577. {
  578. .name = "io_wait_time",
  579. .read_map = blkiocg_io_wait_time_read,
  580. },
  581. {
  582. .name = "io_merged",
  583. .read_map = blkiocg_io_merged_read,
  584. },
  585. {
  586. .name = "io_queued",
  587. .read_map = blkiocg_io_queued_read,
  588. },
  589. {
  590. .name = "reset_stats",
  591. .write_u64 = blkiocg_reset_stats,
  592. },
  593. #ifdef CONFIG_DEBUG_BLK_CGROUP
  594. {
  595. .name = "avg_queue_size",
  596. .read_map = blkiocg_avg_queue_size_read,
  597. },
  598. {
  599. .name = "group_wait_time",
  600. .read_map = blkiocg_group_wait_time_read,
  601. },
  602. {
  603. .name = "idle_time",
  604. .read_map = blkiocg_idle_time_read,
  605. },
  606. {
  607. .name = "empty_time",
  608. .read_map = blkiocg_empty_time_read,
  609. },
  610. {
  611. .name = "dequeue",
  612. .read_map = blkiocg_dequeue_read,
  613. },
  614. #endif
  615. };
  616. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  617. {
  618. return cgroup_add_files(cgroup, subsys, blkio_files,
  619. ARRAY_SIZE(blkio_files));
  620. }
  621. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  622. {
  623. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  624. unsigned long flags;
  625. struct blkio_group *blkg;
  626. void *key;
  627. struct blkio_policy_type *blkiop;
  628. rcu_read_lock();
  629. remove_entry:
  630. spin_lock_irqsave(&blkcg->lock, flags);
  631. if (hlist_empty(&blkcg->blkg_list)) {
  632. spin_unlock_irqrestore(&blkcg->lock, flags);
  633. goto done;
  634. }
  635. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  636. blkcg_node);
  637. key = rcu_dereference(blkg->key);
  638. __blkiocg_del_blkio_group(blkg);
  639. spin_unlock_irqrestore(&blkcg->lock, flags);
  640. /*
  641. * This blkio_group is being unlinked as associated cgroup is going
  642. * away. Let all the IO controlling policies know about this event.
  643. *
  644. * Currently this is static call to one io controlling policy. Once
  645. * we have more policies in place, we need some dynamic registration
  646. * of callback function.
  647. */
  648. spin_lock(&blkio_list_lock);
  649. list_for_each_entry(blkiop, &blkio_list, list)
  650. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  651. spin_unlock(&blkio_list_lock);
  652. goto remove_entry;
  653. done:
  654. free_css_id(&blkio_subsys, &blkcg->css);
  655. rcu_read_unlock();
  656. if (blkcg != &blkio_root_cgroup)
  657. kfree(blkcg);
  658. }
  659. static struct cgroup_subsys_state *
  660. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  661. {
  662. struct blkio_cgroup *blkcg, *parent_blkcg;
  663. if (!cgroup->parent) {
  664. blkcg = &blkio_root_cgroup;
  665. goto done;
  666. }
  667. /* Currently we do not support hierarchy deeper than two level (0,1) */
  668. parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
  669. if (css_depth(&parent_blkcg->css) > 0)
  670. return ERR_PTR(-EINVAL);
  671. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  672. if (!blkcg)
  673. return ERR_PTR(-ENOMEM);
  674. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  675. done:
  676. spin_lock_init(&blkcg->lock);
  677. INIT_HLIST_HEAD(&blkcg->blkg_list);
  678. return &blkcg->css;
  679. }
  680. /*
  681. * We cannot support shared io contexts, as we have no mean to support
  682. * two tasks with the same ioc in two different groups without major rework
  683. * of the main cic data structures. For now we allow a task to change
  684. * its cgroup only if it's the only owner of its ioc.
  685. */
  686. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  687. struct cgroup *cgroup, struct task_struct *tsk,
  688. bool threadgroup)
  689. {
  690. struct io_context *ioc;
  691. int ret = 0;
  692. /* task_lock() is needed to avoid races with exit_io_context() */
  693. task_lock(tsk);
  694. ioc = tsk->io_context;
  695. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  696. ret = -EINVAL;
  697. task_unlock(tsk);
  698. return ret;
  699. }
  700. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  701. struct cgroup *prev, struct task_struct *tsk,
  702. bool threadgroup)
  703. {
  704. struct io_context *ioc;
  705. task_lock(tsk);
  706. ioc = tsk->io_context;
  707. if (ioc)
  708. ioc->cgroup_changed = 1;
  709. task_unlock(tsk);
  710. }
  711. void blkio_policy_register(struct blkio_policy_type *blkiop)
  712. {
  713. spin_lock(&blkio_list_lock);
  714. list_add_tail(&blkiop->list, &blkio_list);
  715. spin_unlock(&blkio_list_lock);
  716. }
  717. EXPORT_SYMBOL_GPL(blkio_policy_register);
  718. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  719. {
  720. spin_lock(&blkio_list_lock);
  721. list_del_init(&blkiop->list);
  722. spin_unlock(&blkio_list_lock);
  723. }
  724. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  725. static int __init init_cgroup_blkio(void)
  726. {
  727. return cgroup_load_subsys(&blkio_subsys);
  728. }
  729. static void __exit exit_cgroup_blkio(void)
  730. {
  731. cgroup_unload_subsys(&blkio_subsys);
  732. }
  733. module_init(init_cgroup_blkio);
  734. module_exit(exit_cgroup_blkio);
  735. MODULE_LICENSE("GPL");