blk-cgroup.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include "blk-cgroup.h"
  20. #define MAX_KEY_LEN 100
  21. static DEFINE_SPINLOCK(blkio_list_lock);
  22. static LIST_HEAD(blkio_list);
  23. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  24. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  25. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  26. struct cgroup *);
  27. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  28. struct task_struct *, bool);
  29. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  30. struct cgroup *, struct task_struct *, bool);
  31. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  32. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  33. struct cgroup_subsys blkio_subsys = {
  34. .name = "blkio",
  35. .create = blkiocg_create,
  36. .can_attach = blkiocg_can_attach,
  37. .attach = blkiocg_attach,
  38. .destroy = blkiocg_destroy,
  39. .populate = blkiocg_populate,
  40. #ifdef CONFIG_BLK_CGROUP
  41. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  42. .subsys_id = blkio_subsys_id,
  43. #endif
  44. .use_id = 1,
  45. .module = THIS_MODULE,
  46. };
  47. EXPORT_SYMBOL_GPL(blkio_subsys);
  48. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  49. {
  50. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  51. struct blkio_cgroup, css);
  52. }
  53. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  54. void blkio_group_init(struct blkio_group *blkg)
  55. {
  56. spin_lock_init(&blkg->stats_lock);
  57. }
  58. EXPORT_SYMBOL_GPL(blkio_group_init);
  59. /*
  60. * Add to the appropriate stat variable depending on the request type.
  61. * This should be called with the blkg->stats_lock held.
  62. */
  63. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  64. bool sync)
  65. {
  66. if (direction)
  67. stat[BLKIO_STAT_WRITE] += add;
  68. else
  69. stat[BLKIO_STAT_READ] += add;
  70. if (sync)
  71. stat[BLKIO_STAT_SYNC] += add;
  72. else
  73. stat[BLKIO_STAT_ASYNC] += add;
  74. }
  75. /*
  76. * Decrements the appropriate stat variable if non-zero depending on the
  77. * request type. Panics on value being zero.
  78. * This should be called with the blkg->stats_lock held.
  79. */
  80. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  81. {
  82. if (direction) {
  83. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  84. stat[BLKIO_STAT_WRITE]--;
  85. } else {
  86. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  87. stat[BLKIO_STAT_READ]--;
  88. }
  89. if (sync) {
  90. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  91. stat[BLKIO_STAT_SYNC]--;
  92. } else {
  93. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  94. stat[BLKIO_STAT_ASYNC]--;
  95. }
  96. }
  97. #ifdef CONFIG_DEBUG_BLK_CGROUP
  98. void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg)
  99. {
  100. unsigned long flags;
  101. struct blkio_group_stats *stats;
  102. spin_lock_irqsave(&blkg->stats_lock, flags);
  103. stats = &blkg->stats;
  104. stats->avg_queue_size_sum +=
  105. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  106. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  107. stats->avg_queue_size_samples++;
  108. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  109. }
  110. EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats);
  111. #endif
  112. void blkiocg_update_request_add_stats(struct blkio_group *blkg,
  113. struct blkio_group *curr_blkg, bool direction,
  114. bool sync)
  115. {
  116. unsigned long flags;
  117. spin_lock_irqsave(&blkg->stats_lock, flags);
  118. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  119. sync);
  120. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  121. }
  122. EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats);
  123. void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
  124. bool direction, bool sync)
  125. {
  126. unsigned long flags;
  127. spin_lock_irqsave(&blkg->stats_lock, flags);
  128. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  129. direction, sync);
  130. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  131. }
  132. EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats);
  133. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
  134. {
  135. unsigned long flags;
  136. spin_lock_irqsave(&blkg->stats_lock, flags);
  137. blkg->stats.time += time;
  138. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  139. }
  140. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  141. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  142. uint64_t bytes, bool direction, bool sync)
  143. {
  144. struct blkio_group_stats *stats;
  145. unsigned long flags;
  146. spin_lock_irqsave(&blkg->stats_lock, flags);
  147. stats = &blkg->stats;
  148. stats->sectors += bytes >> 9;
  149. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
  150. sync);
  151. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
  152. direction, sync);
  153. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  154. }
  155. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  156. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  157. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  158. {
  159. struct blkio_group_stats *stats;
  160. unsigned long flags;
  161. unsigned long long now = sched_clock();
  162. spin_lock_irqsave(&blkg->stats_lock, flags);
  163. stats = &blkg->stats;
  164. if (time_after64(now, io_start_time))
  165. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  166. now - io_start_time, direction, sync);
  167. if (time_after64(io_start_time, start_time))
  168. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  169. io_start_time - start_time, direction, sync);
  170. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  171. }
  172. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  173. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  174. bool sync)
  175. {
  176. unsigned long flags;
  177. spin_lock_irqsave(&blkg->stats_lock, flags);
  178. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
  179. sync);
  180. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  181. }
  182. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  183. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  184. struct blkio_group *blkg, void *key, dev_t dev)
  185. {
  186. unsigned long flags;
  187. spin_lock_irqsave(&blkcg->lock, flags);
  188. rcu_assign_pointer(blkg->key, key);
  189. blkg->blkcg_id = css_id(&blkcg->css);
  190. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  191. spin_unlock_irqrestore(&blkcg->lock, flags);
  192. #ifdef CONFIG_DEBUG_BLK_CGROUP
  193. /* Need to take css reference ? */
  194. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  195. #endif
  196. blkg->dev = dev;
  197. }
  198. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  199. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  200. {
  201. hlist_del_init_rcu(&blkg->blkcg_node);
  202. blkg->blkcg_id = 0;
  203. }
  204. /*
  205. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  206. * indicating that blk_group was unhashed by the time we got to it.
  207. */
  208. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  209. {
  210. struct blkio_cgroup *blkcg;
  211. unsigned long flags;
  212. struct cgroup_subsys_state *css;
  213. int ret = 1;
  214. rcu_read_lock();
  215. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  216. if (!css)
  217. goto out;
  218. blkcg = container_of(css, struct blkio_cgroup, css);
  219. spin_lock_irqsave(&blkcg->lock, flags);
  220. if (!hlist_unhashed(&blkg->blkcg_node)) {
  221. __blkiocg_del_blkio_group(blkg);
  222. ret = 0;
  223. }
  224. spin_unlock_irqrestore(&blkcg->lock, flags);
  225. out:
  226. rcu_read_unlock();
  227. return ret;
  228. }
  229. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  230. /* called under rcu_read_lock(). */
  231. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  232. {
  233. struct blkio_group *blkg;
  234. struct hlist_node *n;
  235. void *__key;
  236. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  237. __key = blkg->key;
  238. if (__key == key)
  239. return blkg;
  240. }
  241. return NULL;
  242. }
  243. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  244. #define SHOW_FUNCTION(__VAR) \
  245. static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  246. struct cftype *cftype) \
  247. { \
  248. struct blkio_cgroup *blkcg; \
  249. \
  250. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  251. return (u64)blkcg->__VAR; \
  252. }
  253. SHOW_FUNCTION(weight);
  254. #undef SHOW_FUNCTION
  255. static int
  256. blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  257. {
  258. struct blkio_cgroup *blkcg;
  259. struct blkio_group *blkg;
  260. struct hlist_node *n;
  261. struct blkio_policy_type *blkiop;
  262. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  263. return -EINVAL;
  264. blkcg = cgroup_to_blkio_cgroup(cgroup);
  265. spin_lock(&blkio_list_lock);
  266. spin_lock_irq(&blkcg->lock);
  267. blkcg->weight = (unsigned int)val;
  268. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  269. list_for_each_entry(blkiop, &blkio_list, list)
  270. blkiop->ops.blkio_update_group_weight_fn(blkg,
  271. blkcg->weight);
  272. }
  273. spin_unlock_irq(&blkcg->lock);
  274. spin_unlock(&blkio_list_lock);
  275. return 0;
  276. }
  277. static int
  278. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  279. {
  280. struct blkio_cgroup *blkcg;
  281. struct blkio_group *blkg;
  282. struct hlist_node *n;
  283. uint64_t queued[BLKIO_STAT_TOTAL];
  284. int i;
  285. blkcg = cgroup_to_blkio_cgroup(cgroup);
  286. spin_lock_irq(&blkcg->lock);
  287. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  288. spin_lock(&blkg->stats_lock);
  289. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  290. queued[i] = blkg->stats.stat_arr[BLKIO_STAT_QUEUED][i];
  291. memset(&blkg->stats, 0, sizeof(struct blkio_group_stats));
  292. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  293. blkg->stats.stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  294. spin_unlock(&blkg->stats_lock);
  295. }
  296. spin_unlock_irq(&blkcg->lock);
  297. return 0;
  298. }
  299. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  300. int chars_left, bool diskname_only)
  301. {
  302. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  303. chars_left -= strlen(str);
  304. if (chars_left <= 0) {
  305. printk(KERN_WARNING
  306. "Possibly incorrect cgroup stat display format");
  307. return;
  308. }
  309. if (diskname_only)
  310. return;
  311. switch (type) {
  312. case BLKIO_STAT_READ:
  313. strlcat(str, " Read", chars_left);
  314. break;
  315. case BLKIO_STAT_WRITE:
  316. strlcat(str, " Write", chars_left);
  317. break;
  318. case BLKIO_STAT_SYNC:
  319. strlcat(str, " Sync", chars_left);
  320. break;
  321. case BLKIO_STAT_ASYNC:
  322. strlcat(str, " Async", chars_left);
  323. break;
  324. case BLKIO_STAT_TOTAL:
  325. strlcat(str, " Total", chars_left);
  326. break;
  327. default:
  328. strlcat(str, " Invalid", chars_left);
  329. }
  330. }
  331. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  332. struct cgroup_map_cb *cb, dev_t dev)
  333. {
  334. blkio_get_key_name(0, dev, str, chars_left, true);
  335. cb->fill(cb, str, val);
  336. return val;
  337. }
  338. /* This should be called with blkg->stats_lock held */
  339. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  340. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  341. {
  342. uint64_t disk_total;
  343. char key_str[MAX_KEY_LEN];
  344. enum stat_sub_type sub_type;
  345. if (type == BLKIO_STAT_TIME)
  346. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  347. blkg->stats.time, cb, dev);
  348. if (type == BLKIO_STAT_SECTORS)
  349. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  350. blkg->stats.sectors, cb, dev);
  351. #ifdef CONFIG_DEBUG_BLK_CGROUP
  352. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  353. uint64_t sum = blkg->stats.avg_queue_size_sum;
  354. uint64_t samples = blkg->stats.avg_queue_size_samples;
  355. if (samples)
  356. do_div(sum, samples);
  357. else
  358. sum = 0;
  359. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  360. }
  361. if (type == BLKIO_STAT_DEQUEUE)
  362. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  363. blkg->stats.dequeue, cb, dev);
  364. #endif
  365. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  366. sub_type++) {
  367. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  368. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  369. }
  370. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  371. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  372. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  373. cb->fill(cb, key_str, disk_total);
  374. return disk_total;
  375. }
  376. #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
  377. static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  378. struct cftype *cftype, struct cgroup_map_cb *cb) \
  379. { \
  380. struct blkio_cgroup *blkcg; \
  381. struct blkio_group *blkg; \
  382. struct hlist_node *n; \
  383. uint64_t cgroup_total = 0; \
  384. \
  385. if (!cgroup_lock_live_group(cgroup)) \
  386. return -ENODEV; \
  387. \
  388. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  389. rcu_read_lock(); \
  390. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
  391. if (blkg->dev) { \
  392. spin_lock_irq(&blkg->stats_lock); \
  393. cgroup_total += blkio_get_stat(blkg, cb, \
  394. blkg->dev, type); \
  395. spin_unlock_irq(&blkg->stats_lock); \
  396. } \
  397. } \
  398. if (show_total) \
  399. cb->fill(cb, "Total", cgroup_total); \
  400. rcu_read_unlock(); \
  401. cgroup_unlock(); \
  402. return 0; \
  403. }
  404. SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
  405. SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
  406. SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
  407. SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
  408. SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
  409. SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
  410. SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
  411. SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
  412. #ifdef CONFIG_DEBUG_BLK_CGROUP
  413. SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
  414. SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
  415. #endif
  416. #undef SHOW_FUNCTION_PER_GROUP
  417. #ifdef CONFIG_DEBUG_BLK_CGROUP
  418. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  419. unsigned long dequeue)
  420. {
  421. blkg->stats.dequeue += dequeue;
  422. }
  423. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  424. #endif
  425. struct cftype blkio_files[] = {
  426. {
  427. .name = "weight",
  428. .read_u64 = blkiocg_weight_read,
  429. .write_u64 = blkiocg_weight_write,
  430. },
  431. {
  432. .name = "time",
  433. .read_map = blkiocg_time_read,
  434. },
  435. {
  436. .name = "sectors",
  437. .read_map = blkiocg_sectors_read,
  438. },
  439. {
  440. .name = "io_service_bytes",
  441. .read_map = blkiocg_io_service_bytes_read,
  442. },
  443. {
  444. .name = "io_serviced",
  445. .read_map = blkiocg_io_serviced_read,
  446. },
  447. {
  448. .name = "io_service_time",
  449. .read_map = blkiocg_io_service_time_read,
  450. },
  451. {
  452. .name = "io_wait_time",
  453. .read_map = blkiocg_io_wait_time_read,
  454. },
  455. {
  456. .name = "io_merged",
  457. .read_map = blkiocg_io_merged_read,
  458. },
  459. {
  460. .name = "io_queued",
  461. .read_map = blkiocg_io_queued_read,
  462. },
  463. {
  464. .name = "reset_stats",
  465. .write_u64 = blkiocg_reset_stats,
  466. },
  467. #ifdef CONFIG_DEBUG_BLK_CGROUP
  468. {
  469. .name = "avg_queue_size",
  470. .read_map = blkiocg_avg_queue_size_read,
  471. },
  472. {
  473. .name = "dequeue",
  474. .read_map = blkiocg_dequeue_read,
  475. },
  476. #endif
  477. };
  478. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  479. {
  480. return cgroup_add_files(cgroup, subsys, blkio_files,
  481. ARRAY_SIZE(blkio_files));
  482. }
  483. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  484. {
  485. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  486. unsigned long flags;
  487. struct blkio_group *blkg;
  488. void *key;
  489. struct blkio_policy_type *blkiop;
  490. rcu_read_lock();
  491. remove_entry:
  492. spin_lock_irqsave(&blkcg->lock, flags);
  493. if (hlist_empty(&blkcg->blkg_list)) {
  494. spin_unlock_irqrestore(&blkcg->lock, flags);
  495. goto done;
  496. }
  497. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  498. blkcg_node);
  499. key = rcu_dereference(blkg->key);
  500. __blkiocg_del_blkio_group(blkg);
  501. spin_unlock_irqrestore(&blkcg->lock, flags);
  502. /*
  503. * This blkio_group is being unlinked as associated cgroup is going
  504. * away. Let all the IO controlling policies know about this event.
  505. *
  506. * Currently this is static call to one io controlling policy. Once
  507. * we have more policies in place, we need some dynamic registration
  508. * of callback function.
  509. */
  510. spin_lock(&blkio_list_lock);
  511. list_for_each_entry(blkiop, &blkio_list, list)
  512. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  513. spin_unlock(&blkio_list_lock);
  514. goto remove_entry;
  515. done:
  516. free_css_id(&blkio_subsys, &blkcg->css);
  517. rcu_read_unlock();
  518. if (blkcg != &blkio_root_cgroup)
  519. kfree(blkcg);
  520. }
  521. static struct cgroup_subsys_state *
  522. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  523. {
  524. struct blkio_cgroup *blkcg, *parent_blkcg;
  525. if (!cgroup->parent) {
  526. blkcg = &blkio_root_cgroup;
  527. goto done;
  528. }
  529. /* Currently we do not support hierarchy deeper than two level (0,1) */
  530. parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
  531. if (css_depth(&parent_blkcg->css) > 0)
  532. return ERR_PTR(-EINVAL);
  533. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  534. if (!blkcg)
  535. return ERR_PTR(-ENOMEM);
  536. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  537. done:
  538. spin_lock_init(&blkcg->lock);
  539. INIT_HLIST_HEAD(&blkcg->blkg_list);
  540. return &blkcg->css;
  541. }
  542. /*
  543. * We cannot support shared io contexts, as we have no mean to support
  544. * two tasks with the same ioc in two different groups without major rework
  545. * of the main cic data structures. For now we allow a task to change
  546. * its cgroup only if it's the only owner of its ioc.
  547. */
  548. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  549. struct cgroup *cgroup, struct task_struct *tsk,
  550. bool threadgroup)
  551. {
  552. struct io_context *ioc;
  553. int ret = 0;
  554. /* task_lock() is needed to avoid races with exit_io_context() */
  555. task_lock(tsk);
  556. ioc = tsk->io_context;
  557. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  558. ret = -EINVAL;
  559. task_unlock(tsk);
  560. return ret;
  561. }
  562. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  563. struct cgroup *prev, struct task_struct *tsk,
  564. bool threadgroup)
  565. {
  566. struct io_context *ioc;
  567. task_lock(tsk);
  568. ioc = tsk->io_context;
  569. if (ioc)
  570. ioc->cgroup_changed = 1;
  571. task_unlock(tsk);
  572. }
  573. void blkio_policy_register(struct blkio_policy_type *blkiop)
  574. {
  575. spin_lock(&blkio_list_lock);
  576. list_add_tail(&blkiop->list, &blkio_list);
  577. spin_unlock(&blkio_list_lock);
  578. }
  579. EXPORT_SYMBOL_GPL(blkio_policy_register);
  580. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  581. {
  582. spin_lock(&blkio_list_lock);
  583. list_del_init(&blkiop->list);
  584. spin_unlock(&blkio_list_lock);
  585. }
  586. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  587. static int __init init_cgroup_blkio(void)
  588. {
  589. return cgroup_load_subsys(&blkio_subsys);
  590. }
  591. static void __exit exit_cgroup_blkio(void)
  592. {
  593. cgroup_unload_subsys(&blkio_subsys);
  594. }
  595. module_init(init_cgroup_blkio);
  596. module_exit(exit_cgroup_blkio);
  597. MODULE_LICENSE("GPL");