blk-cgroup.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include "blk-cgroup.h"
  17. #include "cfq-iosched.h"
  18. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  19. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  20. {
  21. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  22. struct blkio_cgroup, css);
  23. }
  24. void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
  25. unsigned long time, unsigned long sectors)
  26. {
  27. blkg->time += time;
  28. blkg->sectors += sectors;
  29. }
  30. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  31. struct blkio_group *blkg, void *key, dev_t dev)
  32. {
  33. unsigned long flags;
  34. spin_lock_irqsave(&blkcg->lock, flags);
  35. rcu_assign_pointer(blkg->key, key);
  36. blkg->blkcg_id = css_id(&blkcg->css);
  37. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  38. spin_unlock_irqrestore(&blkcg->lock, flags);
  39. #ifdef CONFIG_DEBUG_BLK_CGROUP
  40. /* Need to take css reference ? */
  41. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  42. #endif
  43. blkg->dev = dev;
  44. }
  45. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  46. {
  47. hlist_del_init_rcu(&blkg->blkcg_node);
  48. blkg->blkcg_id = 0;
  49. }
  50. /*
  51. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  52. * indicating that blk_group was unhashed by the time we got to it.
  53. */
  54. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  55. {
  56. struct blkio_cgroup *blkcg;
  57. unsigned long flags;
  58. struct cgroup_subsys_state *css;
  59. int ret = 1;
  60. rcu_read_lock();
  61. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  62. if (!css)
  63. goto out;
  64. blkcg = container_of(css, struct blkio_cgroup, css);
  65. spin_lock_irqsave(&blkcg->lock, flags);
  66. if (!hlist_unhashed(&blkg->blkcg_node)) {
  67. __blkiocg_del_blkio_group(blkg);
  68. ret = 0;
  69. }
  70. spin_unlock_irqrestore(&blkcg->lock, flags);
  71. out:
  72. rcu_read_unlock();
  73. return ret;
  74. }
  75. /* called under rcu_read_lock(). */
  76. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  77. {
  78. struct blkio_group *blkg;
  79. struct hlist_node *n;
  80. void *__key;
  81. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  82. __key = blkg->key;
  83. if (__key == key)
  84. return blkg;
  85. }
  86. return NULL;
  87. }
  88. #define SHOW_FUNCTION(__VAR) \
  89. static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  90. struct cftype *cftype) \
  91. { \
  92. struct blkio_cgroup *blkcg; \
  93. \
  94. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  95. return (u64)blkcg->__VAR; \
  96. }
  97. SHOW_FUNCTION(weight);
  98. #undef SHOW_FUNCTION
  99. static int
  100. blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  101. {
  102. struct blkio_cgroup *blkcg;
  103. struct blkio_group *blkg;
  104. struct hlist_node *n;
  105. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  106. return -EINVAL;
  107. blkcg = cgroup_to_blkio_cgroup(cgroup);
  108. spin_lock_irq(&blkcg->lock);
  109. blkcg->weight = (unsigned int)val;
  110. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  111. cfq_update_blkio_group_weight(blkg, blkcg->weight);
  112. spin_unlock_irq(&blkcg->lock);
  113. return 0;
  114. }
  115. #define SHOW_FUNCTION_PER_GROUP(__VAR) \
  116. static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  117. struct cftype *cftype, struct seq_file *m) \
  118. { \
  119. struct blkio_cgroup *blkcg; \
  120. struct blkio_group *blkg; \
  121. struct hlist_node *n; \
  122. \
  123. if (!cgroup_lock_live_group(cgroup)) \
  124. return -ENODEV; \
  125. \
  126. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  127. rcu_read_lock(); \
  128. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
  129. if (blkg->dev) \
  130. seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
  131. MINOR(blkg->dev), blkg->__VAR); \
  132. } \
  133. rcu_read_unlock(); \
  134. cgroup_unlock(); \
  135. return 0; \
  136. }
  137. SHOW_FUNCTION_PER_GROUP(time);
  138. SHOW_FUNCTION_PER_GROUP(sectors);
  139. #ifdef CONFIG_DEBUG_BLK_CGROUP
  140. SHOW_FUNCTION_PER_GROUP(dequeue);
  141. #endif
  142. #undef SHOW_FUNCTION_PER_GROUP
  143. #ifdef CONFIG_DEBUG_BLK_CGROUP
  144. void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
  145. unsigned long dequeue)
  146. {
  147. blkg->dequeue += dequeue;
  148. }
  149. #endif
  150. struct cftype blkio_files[] = {
  151. {
  152. .name = "weight",
  153. .read_u64 = blkiocg_weight_read,
  154. .write_u64 = blkiocg_weight_write,
  155. },
  156. {
  157. .name = "time",
  158. .read_seq_string = blkiocg_time_read,
  159. },
  160. {
  161. .name = "sectors",
  162. .read_seq_string = blkiocg_sectors_read,
  163. },
  164. #ifdef CONFIG_DEBUG_BLK_CGROUP
  165. {
  166. .name = "dequeue",
  167. .read_seq_string = blkiocg_dequeue_read,
  168. },
  169. #endif
  170. };
  171. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  172. {
  173. return cgroup_add_files(cgroup, subsys, blkio_files,
  174. ARRAY_SIZE(blkio_files));
  175. }
  176. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  177. {
  178. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  179. unsigned long flags;
  180. struct blkio_group *blkg;
  181. void *key;
  182. rcu_read_lock();
  183. remove_entry:
  184. spin_lock_irqsave(&blkcg->lock, flags);
  185. if (hlist_empty(&blkcg->blkg_list)) {
  186. spin_unlock_irqrestore(&blkcg->lock, flags);
  187. goto done;
  188. }
  189. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  190. blkcg_node);
  191. key = rcu_dereference(blkg->key);
  192. __blkiocg_del_blkio_group(blkg);
  193. spin_unlock_irqrestore(&blkcg->lock, flags);
  194. /*
  195. * This blkio_group is being unlinked as associated cgroup is going
  196. * away. Let all the IO controlling policies know about this event.
  197. *
  198. * Currently this is static call to one io controlling policy. Once
  199. * we have more policies in place, we need some dynamic registration
  200. * of callback function.
  201. */
  202. cfq_unlink_blkio_group(key, blkg);
  203. goto remove_entry;
  204. done:
  205. free_css_id(&blkio_subsys, &blkcg->css);
  206. rcu_read_unlock();
  207. kfree(blkcg);
  208. }
  209. static struct cgroup_subsys_state *
  210. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  211. {
  212. struct blkio_cgroup *blkcg, *parent_blkcg;
  213. if (!cgroup->parent) {
  214. blkcg = &blkio_root_cgroup;
  215. goto done;
  216. }
  217. /* Currently we do not support hierarchy deeper than two level (0,1) */
  218. parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
  219. if (css_depth(&parent_blkcg->css) > 0)
  220. return ERR_PTR(-EINVAL);
  221. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  222. if (!blkcg)
  223. return ERR_PTR(-ENOMEM);
  224. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  225. done:
  226. spin_lock_init(&blkcg->lock);
  227. INIT_HLIST_HEAD(&blkcg->blkg_list);
  228. return &blkcg->css;
  229. }
  230. /*
  231. * We cannot support shared io contexts, as we have no mean to support
  232. * two tasks with the same ioc in two different groups without major rework
  233. * of the main cic data structures. For now we allow a task to change
  234. * its cgroup only if it's the only owner of its ioc.
  235. */
  236. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  237. struct cgroup *cgroup, struct task_struct *tsk,
  238. bool threadgroup)
  239. {
  240. struct io_context *ioc;
  241. int ret = 0;
  242. /* task_lock() is needed to avoid races with exit_io_context() */
  243. task_lock(tsk);
  244. ioc = tsk->io_context;
  245. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  246. ret = -EINVAL;
  247. task_unlock(tsk);
  248. return ret;
  249. }
  250. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  251. struct cgroup *prev, struct task_struct *tsk,
  252. bool threadgroup)
  253. {
  254. struct io_context *ioc;
  255. task_lock(tsk);
  256. ioc = tsk->io_context;
  257. if (ioc)
  258. ioc->cgroup_changed = 1;
  259. task_unlock(tsk);
  260. }
  261. struct cgroup_subsys blkio_subsys = {
  262. .name = "blkio",
  263. .create = blkiocg_create,
  264. .can_attach = blkiocg_can_attach,
  265. .attach = blkiocg_attach,
  266. .destroy = blkiocg_destroy,
  267. .populate = blkiocg_populate,
  268. .subsys_id = blkio_subsys_id,
  269. .use_id = 1,
  270. };