blk-cgroup.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include "blk-cgroup.h"
  20. static DEFINE_SPINLOCK(blkio_list_lock);
  21. static LIST_HEAD(blkio_list);
  22. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  23. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  24. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  25. struct cgroup *);
  26. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  27. struct task_struct *, bool);
  28. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  29. struct cgroup *, struct task_struct *, bool);
  30. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  31. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  32. struct cgroup_subsys blkio_subsys = {
  33. .name = "blkio",
  34. .create = blkiocg_create,
  35. .can_attach = blkiocg_can_attach,
  36. .attach = blkiocg_attach,
  37. .destroy = blkiocg_destroy,
  38. .populate = blkiocg_populate,
  39. #ifdef CONFIG_BLK_CGROUP
  40. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  41. .subsys_id = blkio_subsys_id,
  42. #endif
  43. .use_id = 1,
  44. .module = THIS_MODULE,
  45. };
  46. EXPORT_SYMBOL_GPL(blkio_subsys);
  47. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  48. {
  49. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  50. struct blkio_cgroup, css);
  51. }
  52. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  53. /*
  54. * Add to the appropriate stat variable depending on the request type.
  55. * This should be called with the blkg->stats_lock held.
  56. */
  57. void io_add_stat(uint64_t *stat, uint64_t add, unsigned int flags)
  58. {
  59. if (flags & REQ_RW)
  60. stat[IO_WRITE] += add;
  61. else
  62. stat[IO_READ] += add;
  63. /*
  64. * Everywhere in the block layer, an IO is treated as sync if it is a
  65. * read or a SYNC write. We follow the same norm.
  66. */
  67. if (!(flags & REQ_RW) || flags & REQ_RW_SYNC)
  68. stat[IO_SYNC] += add;
  69. else
  70. stat[IO_ASYNC] += add;
  71. }
  72. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
  73. {
  74. unsigned long flags;
  75. spin_lock_irqsave(&blkg->stats_lock, flags);
  76. blkg->stats.time += time;
  77. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  78. }
  79. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  80. void blkiocg_update_request_dispatch_stats(struct blkio_group *blkg,
  81. struct request *rq)
  82. {
  83. struct blkio_group_stats *stats;
  84. unsigned long flags;
  85. spin_lock_irqsave(&blkg->stats_lock, flags);
  86. stats = &blkg->stats;
  87. stats->sectors += blk_rq_sectors(rq);
  88. io_add_stat(stats->io_serviced, 1, rq->cmd_flags);
  89. io_add_stat(stats->io_service_bytes, blk_rq_sectors(rq) << 9,
  90. rq->cmd_flags);
  91. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  92. }
  93. void blkiocg_update_request_completion_stats(struct blkio_group *blkg,
  94. struct request *rq)
  95. {
  96. struct blkio_group_stats *stats;
  97. unsigned long flags;
  98. unsigned long long now = sched_clock();
  99. spin_lock_irqsave(&blkg->stats_lock, flags);
  100. stats = &blkg->stats;
  101. if (time_after64(now, rq->io_start_time_ns))
  102. io_add_stat(stats->io_service_time, now - rq->io_start_time_ns,
  103. rq->cmd_flags);
  104. if (time_after64(rq->io_start_time_ns, rq->start_time_ns))
  105. io_add_stat(stats->io_wait_time,
  106. rq->io_start_time_ns - rq->start_time_ns,
  107. rq->cmd_flags);
  108. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  109. }
  110. EXPORT_SYMBOL_GPL(blkiocg_update_request_completion_stats);
  111. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  112. struct blkio_group *blkg, void *key, dev_t dev)
  113. {
  114. unsigned long flags;
  115. spin_lock_irqsave(&blkcg->lock, flags);
  116. rcu_assign_pointer(blkg->key, key);
  117. blkg->blkcg_id = css_id(&blkcg->css);
  118. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  119. spin_unlock_irqrestore(&blkcg->lock, flags);
  120. #ifdef CONFIG_DEBUG_BLK_CGROUP
  121. /* Need to take css reference ? */
  122. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  123. #endif
  124. blkg->dev = dev;
  125. }
  126. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  127. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  128. {
  129. hlist_del_init_rcu(&blkg->blkcg_node);
  130. blkg->blkcg_id = 0;
  131. }
  132. /*
  133. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  134. * indicating that blk_group was unhashed by the time we got to it.
  135. */
  136. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  137. {
  138. struct blkio_cgroup *blkcg;
  139. unsigned long flags;
  140. struct cgroup_subsys_state *css;
  141. int ret = 1;
  142. rcu_read_lock();
  143. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  144. if (!css)
  145. goto out;
  146. blkcg = container_of(css, struct blkio_cgroup, css);
  147. spin_lock_irqsave(&blkcg->lock, flags);
  148. if (!hlist_unhashed(&blkg->blkcg_node)) {
  149. __blkiocg_del_blkio_group(blkg);
  150. ret = 0;
  151. }
  152. spin_unlock_irqrestore(&blkcg->lock, flags);
  153. out:
  154. rcu_read_unlock();
  155. return ret;
  156. }
  157. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  158. /* called under rcu_read_lock(). */
  159. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  160. {
  161. struct blkio_group *blkg;
  162. struct hlist_node *n;
  163. void *__key;
  164. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  165. __key = blkg->key;
  166. if (__key == key)
  167. return blkg;
  168. }
  169. return NULL;
  170. }
  171. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  172. #define SHOW_FUNCTION(__VAR) \
  173. static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  174. struct cftype *cftype) \
  175. { \
  176. struct blkio_cgroup *blkcg; \
  177. \
  178. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  179. return (u64)blkcg->__VAR; \
  180. }
  181. SHOW_FUNCTION(weight);
  182. #undef SHOW_FUNCTION
  183. static int
  184. blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  185. {
  186. struct blkio_cgroup *blkcg;
  187. struct blkio_group *blkg;
  188. struct hlist_node *n;
  189. struct blkio_policy_type *blkiop;
  190. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  191. return -EINVAL;
  192. blkcg = cgroup_to_blkio_cgroup(cgroup);
  193. spin_lock(&blkio_list_lock);
  194. spin_lock_irq(&blkcg->lock);
  195. blkcg->weight = (unsigned int)val;
  196. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  197. list_for_each_entry(blkiop, &blkio_list, list)
  198. blkiop->ops.blkio_update_group_weight_fn(blkg,
  199. blkcg->weight);
  200. }
  201. spin_unlock_irq(&blkcg->lock);
  202. spin_unlock(&blkio_list_lock);
  203. return 0;
  204. }
  205. static int
  206. blkiocg_reset_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  207. {
  208. struct blkio_cgroup *blkcg;
  209. struct blkio_group *blkg;
  210. struct hlist_node *n;
  211. struct blkio_group_stats *stats;
  212. blkcg = cgroup_to_blkio_cgroup(cgroup);
  213. spin_lock_irq(&blkcg->lock);
  214. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  215. spin_lock(&blkg->stats_lock);
  216. stats = &blkg->stats;
  217. memset(stats, 0, sizeof(struct blkio_group_stats));
  218. spin_unlock(&blkg->stats_lock);
  219. }
  220. spin_unlock_irq(&blkcg->lock);
  221. return 0;
  222. }
  223. void get_key_name(int type, char *disk_id, char *str, int chars_left)
  224. {
  225. strlcpy(str, disk_id, chars_left);
  226. chars_left -= strlen(str);
  227. if (chars_left <= 0) {
  228. printk(KERN_WARNING
  229. "Possibly incorrect cgroup stat display format");
  230. return;
  231. }
  232. switch (type) {
  233. case IO_READ:
  234. strlcat(str, " Read", chars_left);
  235. break;
  236. case IO_WRITE:
  237. strlcat(str, " Write", chars_left);
  238. break;
  239. case IO_SYNC:
  240. strlcat(str, " Sync", chars_left);
  241. break;
  242. case IO_ASYNC:
  243. strlcat(str, " Async", chars_left);
  244. break;
  245. case IO_TYPE_MAX:
  246. strlcat(str, " Total", chars_left);
  247. break;
  248. default:
  249. strlcat(str, " Invalid", chars_left);
  250. }
  251. }
  252. typedef uint64_t (get_var) (struct blkio_group *, int);
  253. #define MAX_KEY_LEN 100
  254. uint64_t get_typed_stat(struct blkio_group *blkg, struct cgroup_map_cb *cb,
  255. get_var *getvar, char *disk_id)
  256. {
  257. uint64_t disk_total;
  258. char key_str[MAX_KEY_LEN];
  259. int type;
  260. for (type = 0; type < IO_TYPE_MAX; type++) {
  261. get_key_name(type, disk_id, key_str, MAX_KEY_LEN);
  262. cb->fill(cb, key_str, getvar(blkg, type));
  263. }
  264. disk_total = getvar(blkg, IO_READ) + getvar(blkg, IO_WRITE);
  265. get_key_name(IO_TYPE_MAX, disk_id, key_str, MAX_KEY_LEN);
  266. cb->fill(cb, key_str, disk_total);
  267. return disk_total;
  268. }
  269. uint64_t get_stat(struct blkio_group *blkg, struct cgroup_map_cb *cb,
  270. get_var *getvar, char *disk_id)
  271. {
  272. uint64_t var = getvar(blkg, 0);
  273. cb->fill(cb, disk_id, var);
  274. return var;
  275. }
  276. #define GET_STAT_INDEXED(__VAR) \
  277. uint64_t get_##__VAR##_stat(struct blkio_group *blkg, int type) \
  278. { \
  279. return blkg->stats.__VAR[type]; \
  280. } \
  281. GET_STAT_INDEXED(io_service_bytes);
  282. GET_STAT_INDEXED(io_serviced);
  283. GET_STAT_INDEXED(io_service_time);
  284. GET_STAT_INDEXED(io_wait_time);
  285. #undef GET_STAT_INDEXED
  286. #define GET_STAT(__VAR, __CONV) \
  287. uint64_t get_##__VAR##_stat(struct blkio_group *blkg, int dummy) \
  288. { \
  289. uint64_t data = blkg->stats.__VAR; \
  290. if (__CONV) \
  291. data = (uint64_t)jiffies_to_msecs(data) * NSEC_PER_MSEC;\
  292. return data; \
  293. }
  294. GET_STAT(time, 1);
  295. GET_STAT(sectors, 0);
  296. #ifdef CONFIG_DEBUG_BLK_CGROUP
  297. GET_STAT(dequeue, 0);
  298. #endif
  299. #undef GET_STAT
  300. #define SHOW_FUNCTION_PER_GROUP(__VAR, get_stats, getvar, show_total) \
  301. static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  302. struct cftype *cftype, struct cgroup_map_cb *cb) \
  303. { \
  304. struct blkio_cgroup *blkcg; \
  305. struct blkio_group *blkg; \
  306. struct hlist_node *n; \
  307. uint64_t cgroup_total = 0; \
  308. char disk_id[10]; \
  309. \
  310. if (!cgroup_lock_live_group(cgroup)) \
  311. return -ENODEV; \
  312. \
  313. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  314. rcu_read_lock(); \
  315. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
  316. if (blkg->dev) { \
  317. spin_lock_irq(&blkg->stats_lock); \
  318. snprintf(disk_id, 10, "%u:%u", MAJOR(blkg->dev),\
  319. MINOR(blkg->dev)); \
  320. cgroup_total += get_stats(blkg, cb, getvar, \
  321. disk_id); \
  322. spin_unlock_irq(&blkg->stats_lock); \
  323. } \
  324. } \
  325. if (show_total) \
  326. cb->fill(cb, "Total", cgroup_total); \
  327. rcu_read_unlock(); \
  328. cgroup_unlock(); \
  329. return 0; \
  330. }
  331. SHOW_FUNCTION_PER_GROUP(time, get_stat, get_time_stat, 0);
  332. SHOW_FUNCTION_PER_GROUP(sectors, get_stat, get_sectors_stat, 0);
  333. SHOW_FUNCTION_PER_GROUP(io_service_bytes, get_typed_stat,
  334. get_io_service_bytes_stat, 1);
  335. SHOW_FUNCTION_PER_GROUP(io_serviced, get_typed_stat, get_io_serviced_stat, 1);
  336. SHOW_FUNCTION_PER_GROUP(io_service_time, get_typed_stat,
  337. get_io_service_time_stat, 1);
  338. SHOW_FUNCTION_PER_GROUP(io_wait_time, get_typed_stat, get_io_wait_time_stat, 1);
  339. #ifdef CONFIG_DEBUG_BLK_CGROUP
  340. SHOW_FUNCTION_PER_GROUP(dequeue, get_stat, get_dequeue_stat, 0);
  341. #endif
  342. #undef SHOW_FUNCTION_PER_GROUP
  343. #ifdef CONFIG_DEBUG_BLK_CGROUP
  344. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  345. unsigned long dequeue)
  346. {
  347. blkg->stats.dequeue += dequeue;
  348. }
  349. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  350. #endif
  351. struct cftype blkio_files[] = {
  352. {
  353. .name = "weight",
  354. .read_u64 = blkiocg_weight_read,
  355. .write_u64 = blkiocg_weight_write,
  356. },
  357. {
  358. .name = "time",
  359. .read_map = blkiocg_time_read,
  360. .write_u64 = blkiocg_reset_write,
  361. },
  362. {
  363. .name = "sectors",
  364. .read_map = blkiocg_sectors_read,
  365. .write_u64 = blkiocg_reset_write,
  366. },
  367. {
  368. .name = "io_service_bytes",
  369. .read_map = blkiocg_io_service_bytes_read,
  370. .write_u64 = blkiocg_reset_write,
  371. },
  372. {
  373. .name = "io_serviced",
  374. .read_map = blkiocg_io_serviced_read,
  375. .write_u64 = blkiocg_reset_write,
  376. },
  377. {
  378. .name = "io_service_time",
  379. .read_map = blkiocg_io_service_time_read,
  380. .write_u64 = blkiocg_reset_write,
  381. },
  382. {
  383. .name = "io_wait_time",
  384. .read_map = blkiocg_io_wait_time_read,
  385. .write_u64 = blkiocg_reset_write,
  386. },
  387. #ifdef CONFIG_DEBUG_BLK_CGROUP
  388. {
  389. .name = "dequeue",
  390. .read_map = blkiocg_dequeue_read,
  391. },
  392. #endif
  393. };
  394. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  395. {
  396. return cgroup_add_files(cgroup, subsys, blkio_files,
  397. ARRAY_SIZE(blkio_files));
  398. }
  399. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  400. {
  401. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  402. unsigned long flags;
  403. struct blkio_group *blkg;
  404. void *key;
  405. struct blkio_policy_type *blkiop;
  406. rcu_read_lock();
  407. remove_entry:
  408. spin_lock_irqsave(&blkcg->lock, flags);
  409. if (hlist_empty(&blkcg->blkg_list)) {
  410. spin_unlock_irqrestore(&blkcg->lock, flags);
  411. goto done;
  412. }
  413. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  414. blkcg_node);
  415. key = rcu_dereference(blkg->key);
  416. __blkiocg_del_blkio_group(blkg);
  417. spin_unlock_irqrestore(&blkcg->lock, flags);
  418. /*
  419. * This blkio_group is being unlinked as associated cgroup is going
  420. * away. Let all the IO controlling policies know about this event.
  421. *
  422. * Currently this is static call to one io controlling policy. Once
  423. * we have more policies in place, we need some dynamic registration
  424. * of callback function.
  425. */
  426. spin_lock(&blkio_list_lock);
  427. list_for_each_entry(blkiop, &blkio_list, list)
  428. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  429. spin_unlock(&blkio_list_lock);
  430. goto remove_entry;
  431. done:
  432. free_css_id(&blkio_subsys, &blkcg->css);
  433. rcu_read_unlock();
  434. if (blkcg != &blkio_root_cgroup)
  435. kfree(blkcg);
  436. }
  437. static struct cgroup_subsys_state *
  438. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  439. {
  440. struct blkio_cgroup *blkcg, *parent_blkcg;
  441. if (!cgroup->parent) {
  442. blkcg = &blkio_root_cgroup;
  443. goto done;
  444. }
  445. /* Currently we do not support hierarchy deeper than two level (0,1) */
  446. parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
  447. if (css_depth(&parent_blkcg->css) > 0)
  448. return ERR_PTR(-EINVAL);
  449. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  450. if (!blkcg)
  451. return ERR_PTR(-ENOMEM);
  452. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  453. done:
  454. spin_lock_init(&blkcg->lock);
  455. INIT_HLIST_HEAD(&blkcg->blkg_list);
  456. return &blkcg->css;
  457. }
  458. /*
  459. * We cannot support shared io contexts, as we have no mean to support
  460. * two tasks with the same ioc in two different groups without major rework
  461. * of the main cic data structures. For now we allow a task to change
  462. * its cgroup only if it's the only owner of its ioc.
  463. */
  464. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  465. struct cgroup *cgroup, struct task_struct *tsk,
  466. bool threadgroup)
  467. {
  468. struct io_context *ioc;
  469. int ret = 0;
  470. /* task_lock() is needed to avoid races with exit_io_context() */
  471. task_lock(tsk);
  472. ioc = tsk->io_context;
  473. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  474. ret = -EINVAL;
  475. task_unlock(tsk);
  476. return ret;
  477. }
  478. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  479. struct cgroup *prev, struct task_struct *tsk,
  480. bool threadgroup)
  481. {
  482. struct io_context *ioc;
  483. task_lock(tsk);
  484. ioc = tsk->io_context;
  485. if (ioc)
  486. ioc->cgroup_changed = 1;
  487. task_unlock(tsk);
  488. }
  489. void blkio_policy_register(struct blkio_policy_type *blkiop)
  490. {
  491. spin_lock(&blkio_list_lock);
  492. list_add_tail(&blkiop->list, &blkio_list);
  493. spin_unlock(&blkio_list_lock);
  494. }
  495. EXPORT_SYMBOL_GPL(blkio_policy_register);
  496. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  497. {
  498. spin_lock(&blkio_list_lock);
  499. list_del_init(&blkiop->list);
  500. spin_unlock(&blkio_list_lock);
  501. }
  502. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  503. static int __init init_cgroup_blkio(void)
  504. {
  505. return cgroup_load_subsys(&blkio_subsys);
  506. }
  507. static void __exit exit_cgroup_blkio(void)
  508. {
  509. cgroup_unload_subsys(&blkio_subsys);
  510. }
  511. module_init(init_cgroup_blkio);
  512. module_exit(exit_cgroup_blkio);
  513. MODULE_LICENSE("GPL");