blk-cgroup.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include "blk-cgroup.h"
  21. #include <linux/genhd.h>
  22. #define MAX_KEY_LEN 100
  23. static DEFINE_SPINLOCK(blkio_list_lock);
  24. static LIST_HEAD(blkio_list);
  25. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  26. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  27. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  28. struct cgroup *);
  29. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  30. struct task_struct *, bool);
  31. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  32. struct cgroup *, struct task_struct *, bool);
  33. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  34. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  35. struct cgroup_subsys blkio_subsys = {
  36. .name = "blkio",
  37. .create = blkiocg_create,
  38. .can_attach = blkiocg_can_attach,
  39. .attach = blkiocg_attach,
  40. .destroy = blkiocg_destroy,
  41. .populate = blkiocg_populate,
  42. #ifdef CONFIG_BLK_CGROUP
  43. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  44. .subsys_id = blkio_subsys_id,
  45. #endif
  46. .use_id = 1,
  47. .module = THIS_MODULE,
  48. };
  49. EXPORT_SYMBOL_GPL(blkio_subsys);
  50. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  51. struct blkio_policy_node *pn)
  52. {
  53. list_add(&pn->node, &blkcg->policy_list);
  54. }
  55. /* Must be called with blkcg->lock held */
  56. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  57. {
  58. list_del(&pn->node);
  59. }
  60. /* Must be called with blkcg->lock held */
  61. static struct blkio_policy_node *
  62. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
  63. {
  64. struct blkio_policy_node *pn;
  65. list_for_each_entry(pn, &blkcg->policy_list, node) {
  66. if (pn->dev == dev)
  67. return pn;
  68. }
  69. return NULL;
  70. }
  71. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  72. {
  73. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  74. struct blkio_cgroup, css);
  75. }
  76. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  77. /*
  78. * Add to the appropriate stat variable depending on the request type.
  79. * This should be called with the blkg->stats_lock held.
  80. */
  81. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  82. bool sync)
  83. {
  84. if (direction)
  85. stat[BLKIO_STAT_WRITE] += add;
  86. else
  87. stat[BLKIO_STAT_READ] += add;
  88. if (sync)
  89. stat[BLKIO_STAT_SYNC] += add;
  90. else
  91. stat[BLKIO_STAT_ASYNC] += add;
  92. }
  93. /*
  94. * Decrements the appropriate stat variable if non-zero depending on the
  95. * request type. Panics on value being zero.
  96. * This should be called with the blkg->stats_lock held.
  97. */
  98. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  99. {
  100. if (direction) {
  101. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  102. stat[BLKIO_STAT_WRITE]--;
  103. } else {
  104. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  105. stat[BLKIO_STAT_READ]--;
  106. }
  107. if (sync) {
  108. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  109. stat[BLKIO_STAT_SYNC]--;
  110. } else {
  111. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  112. stat[BLKIO_STAT_ASYNC]--;
  113. }
  114. }
  115. #ifdef CONFIG_DEBUG_BLK_CGROUP
  116. /* This should be called with the blkg->stats_lock held. */
  117. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  118. struct blkio_group *curr_blkg)
  119. {
  120. if (blkio_blkg_waiting(&blkg->stats))
  121. return;
  122. if (blkg == curr_blkg)
  123. return;
  124. blkg->stats.start_group_wait_time = sched_clock();
  125. blkio_mark_blkg_waiting(&blkg->stats);
  126. }
  127. /* This should be called with the blkg->stats_lock held. */
  128. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  129. {
  130. unsigned long long now;
  131. if (!blkio_blkg_waiting(stats))
  132. return;
  133. now = sched_clock();
  134. if (time_after64(now, stats->start_group_wait_time))
  135. stats->group_wait_time += now - stats->start_group_wait_time;
  136. blkio_clear_blkg_waiting(stats);
  137. }
  138. /* This should be called with the blkg->stats_lock held. */
  139. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  140. {
  141. unsigned long long now;
  142. if (!blkio_blkg_empty(stats))
  143. return;
  144. now = sched_clock();
  145. if (time_after64(now, stats->start_empty_time))
  146. stats->empty_time += now - stats->start_empty_time;
  147. blkio_clear_blkg_empty(stats);
  148. }
  149. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  150. {
  151. unsigned long flags;
  152. spin_lock_irqsave(&blkg->stats_lock, flags);
  153. BUG_ON(blkio_blkg_idling(&blkg->stats));
  154. blkg->stats.start_idle_time = sched_clock();
  155. blkio_mark_blkg_idling(&blkg->stats);
  156. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  157. }
  158. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  159. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  160. {
  161. unsigned long flags;
  162. unsigned long long now;
  163. struct blkio_group_stats *stats;
  164. spin_lock_irqsave(&blkg->stats_lock, flags);
  165. stats = &blkg->stats;
  166. if (blkio_blkg_idling(stats)) {
  167. now = sched_clock();
  168. if (time_after64(now, stats->start_idle_time))
  169. stats->idle_time += now - stats->start_idle_time;
  170. blkio_clear_blkg_idling(stats);
  171. }
  172. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  173. }
  174. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  175. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  176. {
  177. unsigned long flags;
  178. struct blkio_group_stats *stats;
  179. spin_lock_irqsave(&blkg->stats_lock, flags);
  180. stats = &blkg->stats;
  181. stats->avg_queue_size_sum +=
  182. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  183. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  184. stats->avg_queue_size_samples++;
  185. blkio_update_group_wait_time(stats);
  186. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  187. }
  188. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  189. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  190. {
  191. unsigned long flags;
  192. struct blkio_group_stats *stats;
  193. spin_lock_irqsave(&blkg->stats_lock, flags);
  194. stats = &blkg->stats;
  195. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  196. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  197. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  198. return;
  199. }
  200. /*
  201. * group is already marked empty. This can happen if cfqq got new
  202. * request in parent group and moved to this group while being added
  203. * to service tree. Just ignore the event and move on.
  204. */
  205. if(blkio_blkg_empty(stats)) {
  206. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  207. return;
  208. }
  209. stats->start_empty_time = sched_clock();
  210. blkio_mark_blkg_empty(stats);
  211. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  212. }
  213. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  214. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  215. unsigned long dequeue)
  216. {
  217. blkg->stats.dequeue += dequeue;
  218. }
  219. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  220. #else
  221. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  222. struct blkio_group *curr_blkg) {}
  223. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  224. #endif
  225. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  226. struct blkio_group *curr_blkg, bool direction,
  227. bool sync)
  228. {
  229. unsigned long flags;
  230. spin_lock_irqsave(&blkg->stats_lock, flags);
  231. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  232. sync);
  233. blkio_end_empty_time(&blkg->stats);
  234. blkio_set_start_group_wait_time(blkg, curr_blkg);
  235. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  236. }
  237. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  238. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  239. bool direction, bool sync)
  240. {
  241. unsigned long flags;
  242. spin_lock_irqsave(&blkg->stats_lock, flags);
  243. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  244. direction, sync);
  245. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  246. }
  247. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  248. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
  249. {
  250. unsigned long flags;
  251. spin_lock_irqsave(&blkg->stats_lock, flags);
  252. blkg->stats.time += time;
  253. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  254. }
  255. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  256. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  257. uint64_t bytes, bool direction, bool sync)
  258. {
  259. struct blkio_group_stats *stats;
  260. unsigned long flags;
  261. spin_lock_irqsave(&blkg->stats_lock, flags);
  262. stats = &blkg->stats;
  263. stats->sectors += bytes >> 9;
  264. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
  265. sync);
  266. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
  267. direction, sync);
  268. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  269. }
  270. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  271. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  272. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  273. {
  274. struct blkio_group_stats *stats;
  275. unsigned long flags;
  276. unsigned long long now = sched_clock();
  277. spin_lock_irqsave(&blkg->stats_lock, flags);
  278. stats = &blkg->stats;
  279. if (time_after64(now, io_start_time))
  280. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  281. now - io_start_time, direction, sync);
  282. if (time_after64(io_start_time, start_time))
  283. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  284. io_start_time - start_time, direction, sync);
  285. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  286. }
  287. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  288. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  289. bool sync)
  290. {
  291. unsigned long flags;
  292. spin_lock_irqsave(&blkg->stats_lock, flags);
  293. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
  294. sync);
  295. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  296. }
  297. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  298. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  299. struct blkio_group *blkg, void *key, dev_t dev)
  300. {
  301. unsigned long flags;
  302. spin_lock_irqsave(&blkcg->lock, flags);
  303. spin_lock_init(&blkg->stats_lock);
  304. rcu_assign_pointer(blkg->key, key);
  305. blkg->blkcg_id = css_id(&blkcg->css);
  306. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  307. spin_unlock_irqrestore(&blkcg->lock, flags);
  308. #ifdef CONFIG_DEBUG_BLK_CGROUP
  309. /* Need to take css reference ? */
  310. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  311. #endif
  312. blkg->dev = dev;
  313. }
  314. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  315. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  316. {
  317. hlist_del_init_rcu(&blkg->blkcg_node);
  318. blkg->blkcg_id = 0;
  319. }
  320. /*
  321. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  322. * indicating that blk_group was unhashed by the time we got to it.
  323. */
  324. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  325. {
  326. struct blkio_cgroup *blkcg;
  327. unsigned long flags;
  328. struct cgroup_subsys_state *css;
  329. int ret = 1;
  330. rcu_read_lock();
  331. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  332. if (!css)
  333. goto out;
  334. blkcg = container_of(css, struct blkio_cgroup, css);
  335. spin_lock_irqsave(&blkcg->lock, flags);
  336. if (!hlist_unhashed(&blkg->blkcg_node)) {
  337. __blkiocg_del_blkio_group(blkg);
  338. ret = 0;
  339. }
  340. spin_unlock_irqrestore(&blkcg->lock, flags);
  341. out:
  342. rcu_read_unlock();
  343. return ret;
  344. }
  345. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  346. /* called under rcu_read_lock(). */
  347. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  348. {
  349. struct blkio_group *blkg;
  350. struct hlist_node *n;
  351. void *__key;
  352. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  353. __key = blkg->key;
  354. if (__key == key)
  355. return blkg;
  356. }
  357. return NULL;
  358. }
  359. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  360. #define SHOW_FUNCTION(__VAR) \
  361. static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  362. struct cftype *cftype) \
  363. { \
  364. struct blkio_cgroup *blkcg; \
  365. \
  366. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  367. return (u64)blkcg->__VAR; \
  368. }
  369. SHOW_FUNCTION(weight);
  370. #undef SHOW_FUNCTION
  371. static int
  372. blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  373. {
  374. struct blkio_cgroup *blkcg;
  375. struct blkio_group *blkg;
  376. struct hlist_node *n;
  377. struct blkio_policy_type *blkiop;
  378. struct blkio_policy_node *pn;
  379. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  380. return -EINVAL;
  381. blkcg = cgroup_to_blkio_cgroup(cgroup);
  382. spin_lock(&blkio_list_lock);
  383. spin_lock_irq(&blkcg->lock);
  384. blkcg->weight = (unsigned int)val;
  385. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  386. pn = blkio_policy_search_node(blkcg, blkg->dev);
  387. if (pn)
  388. continue;
  389. list_for_each_entry(blkiop, &blkio_list, list)
  390. blkiop->ops.blkio_update_group_weight_fn(blkg,
  391. blkcg->weight);
  392. }
  393. spin_unlock_irq(&blkcg->lock);
  394. spin_unlock(&blkio_list_lock);
  395. return 0;
  396. }
  397. static int
  398. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  399. {
  400. struct blkio_cgroup *blkcg;
  401. struct blkio_group *blkg;
  402. struct blkio_group_stats *stats;
  403. struct hlist_node *n;
  404. uint64_t queued[BLKIO_STAT_TOTAL];
  405. int i;
  406. #ifdef CONFIG_DEBUG_BLK_CGROUP
  407. bool idling, waiting, empty;
  408. unsigned long long now = sched_clock();
  409. #endif
  410. blkcg = cgroup_to_blkio_cgroup(cgroup);
  411. spin_lock_irq(&blkcg->lock);
  412. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  413. spin_lock(&blkg->stats_lock);
  414. stats = &blkg->stats;
  415. #ifdef CONFIG_DEBUG_BLK_CGROUP
  416. idling = blkio_blkg_idling(stats);
  417. waiting = blkio_blkg_waiting(stats);
  418. empty = blkio_blkg_empty(stats);
  419. #endif
  420. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  421. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  422. memset(stats, 0, sizeof(struct blkio_group_stats));
  423. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  424. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  425. #ifdef CONFIG_DEBUG_BLK_CGROUP
  426. if (idling) {
  427. blkio_mark_blkg_idling(stats);
  428. stats->start_idle_time = now;
  429. }
  430. if (waiting) {
  431. blkio_mark_blkg_waiting(stats);
  432. stats->start_group_wait_time = now;
  433. }
  434. if (empty) {
  435. blkio_mark_blkg_empty(stats);
  436. stats->start_empty_time = now;
  437. }
  438. #endif
  439. spin_unlock(&blkg->stats_lock);
  440. }
  441. spin_unlock_irq(&blkcg->lock);
  442. return 0;
  443. }
  444. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  445. int chars_left, bool diskname_only)
  446. {
  447. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  448. chars_left -= strlen(str);
  449. if (chars_left <= 0) {
  450. printk(KERN_WARNING
  451. "Possibly incorrect cgroup stat display format");
  452. return;
  453. }
  454. if (diskname_only)
  455. return;
  456. switch (type) {
  457. case BLKIO_STAT_READ:
  458. strlcat(str, " Read", chars_left);
  459. break;
  460. case BLKIO_STAT_WRITE:
  461. strlcat(str, " Write", chars_left);
  462. break;
  463. case BLKIO_STAT_SYNC:
  464. strlcat(str, " Sync", chars_left);
  465. break;
  466. case BLKIO_STAT_ASYNC:
  467. strlcat(str, " Async", chars_left);
  468. break;
  469. case BLKIO_STAT_TOTAL:
  470. strlcat(str, " Total", chars_left);
  471. break;
  472. default:
  473. strlcat(str, " Invalid", chars_left);
  474. }
  475. }
  476. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  477. struct cgroup_map_cb *cb, dev_t dev)
  478. {
  479. blkio_get_key_name(0, dev, str, chars_left, true);
  480. cb->fill(cb, str, val);
  481. return val;
  482. }
  483. /* This should be called with blkg->stats_lock held */
  484. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  485. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  486. {
  487. uint64_t disk_total;
  488. char key_str[MAX_KEY_LEN];
  489. enum stat_sub_type sub_type;
  490. if (type == BLKIO_STAT_TIME)
  491. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  492. blkg->stats.time, cb, dev);
  493. if (type == BLKIO_STAT_SECTORS)
  494. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  495. blkg->stats.sectors, cb, dev);
  496. #ifdef CONFIG_DEBUG_BLK_CGROUP
  497. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  498. uint64_t sum = blkg->stats.avg_queue_size_sum;
  499. uint64_t samples = blkg->stats.avg_queue_size_samples;
  500. if (samples)
  501. do_div(sum, samples);
  502. else
  503. sum = 0;
  504. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  505. }
  506. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  507. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  508. blkg->stats.group_wait_time, cb, dev);
  509. if (type == BLKIO_STAT_IDLE_TIME)
  510. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  511. blkg->stats.idle_time, cb, dev);
  512. if (type == BLKIO_STAT_EMPTY_TIME)
  513. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  514. blkg->stats.empty_time, cb, dev);
  515. if (type == BLKIO_STAT_DEQUEUE)
  516. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  517. blkg->stats.dequeue, cb, dev);
  518. #endif
  519. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  520. sub_type++) {
  521. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  522. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  523. }
  524. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  525. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  526. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  527. cb->fill(cb, key_str, disk_total);
  528. return disk_total;
  529. }
  530. #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
  531. static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
  532. struct cftype *cftype, struct cgroup_map_cb *cb) \
  533. { \
  534. struct blkio_cgroup *blkcg; \
  535. struct blkio_group *blkg; \
  536. struct hlist_node *n; \
  537. uint64_t cgroup_total = 0; \
  538. \
  539. if (!cgroup_lock_live_group(cgroup)) \
  540. return -ENODEV; \
  541. \
  542. blkcg = cgroup_to_blkio_cgroup(cgroup); \
  543. rcu_read_lock(); \
  544. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
  545. if (blkg->dev) { \
  546. spin_lock_irq(&blkg->stats_lock); \
  547. cgroup_total += blkio_get_stat(blkg, cb, \
  548. blkg->dev, type); \
  549. spin_unlock_irq(&blkg->stats_lock); \
  550. } \
  551. } \
  552. if (show_total) \
  553. cb->fill(cb, "Total", cgroup_total); \
  554. rcu_read_unlock(); \
  555. cgroup_unlock(); \
  556. return 0; \
  557. }
  558. SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
  559. SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
  560. SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
  561. SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
  562. SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
  563. SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
  564. SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
  565. SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
  566. #ifdef CONFIG_DEBUG_BLK_CGROUP
  567. SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
  568. SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
  569. SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
  570. SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
  571. SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
  572. #endif
  573. #undef SHOW_FUNCTION_PER_GROUP
  574. static int blkio_check_dev_num(dev_t dev)
  575. {
  576. int part = 0;
  577. struct gendisk *disk;
  578. disk = get_gendisk(dev, &part);
  579. if (!disk || part)
  580. return -ENODEV;
  581. return 0;
  582. }
  583. static int blkio_policy_parse_and_set(char *buf,
  584. struct blkio_policy_node *newpn)
  585. {
  586. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  587. int ret;
  588. unsigned long major, minor, temp;
  589. int i = 0;
  590. dev_t dev;
  591. memset(s, 0, sizeof(s));
  592. while ((p = strsep(&buf, " ")) != NULL) {
  593. if (!*p)
  594. continue;
  595. s[i++] = p;
  596. /* Prevent from inputing too many things */
  597. if (i == 3)
  598. break;
  599. }
  600. if (i != 2)
  601. return -EINVAL;
  602. p = strsep(&s[0], ":");
  603. if (p != NULL)
  604. major_s = p;
  605. else
  606. return -EINVAL;
  607. minor_s = s[0];
  608. if (!minor_s)
  609. return -EINVAL;
  610. ret = strict_strtoul(major_s, 10, &major);
  611. if (ret)
  612. return -EINVAL;
  613. ret = strict_strtoul(minor_s, 10, &minor);
  614. if (ret)
  615. return -EINVAL;
  616. dev = MKDEV(major, minor);
  617. ret = blkio_check_dev_num(dev);
  618. if (ret)
  619. return ret;
  620. newpn->dev = dev;
  621. if (s[1] == NULL)
  622. return -EINVAL;
  623. ret = strict_strtoul(s[1], 10, &temp);
  624. if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  625. temp > BLKIO_WEIGHT_MAX)
  626. return -EINVAL;
  627. newpn->weight = temp;
  628. return 0;
  629. }
  630. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  631. dev_t dev)
  632. {
  633. struct blkio_policy_node *pn;
  634. pn = blkio_policy_search_node(blkcg, dev);
  635. if (pn)
  636. return pn->weight;
  637. else
  638. return blkcg->weight;
  639. }
  640. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  641. static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
  642. const char *buffer)
  643. {
  644. int ret = 0;
  645. char *buf;
  646. struct blkio_policy_node *newpn, *pn;
  647. struct blkio_cgroup *blkcg;
  648. struct blkio_group *blkg;
  649. int keep_newpn = 0;
  650. struct hlist_node *n;
  651. struct blkio_policy_type *blkiop;
  652. buf = kstrdup(buffer, GFP_KERNEL);
  653. if (!buf)
  654. return -ENOMEM;
  655. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  656. if (!newpn) {
  657. ret = -ENOMEM;
  658. goto free_buf;
  659. }
  660. ret = blkio_policy_parse_and_set(buf, newpn);
  661. if (ret)
  662. goto free_newpn;
  663. blkcg = cgroup_to_blkio_cgroup(cgrp);
  664. spin_lock_irq(&blkcg->lock);
  665. pn = blkio_policy_search_node(blkcg, newpn->dev);
  666. if (!pn) {
  667. if (newpn->weight != 0) {
  668. blkio_policy_insert_node(blkcg, newpn);
  669. keep_newpn = 1;
  670. }
  671. spin_unlock_irq(&blkcg->lock);
  672. goto update_io_group;
  673. }
  674. if (newpn->weight == 0) {
  675. /* weight == 0 means deleteing a specific weight */
  676. blkio_policy_delete_node(pn);
  677. spin_unlock_irq(&blkcg->lock);
  678. goto update_io_group;
  679. }
  680. spin_unlock_irq(&blkcg->lock);
  681. pn->weight = newpn->weight;
  682. update_io_group:
  683. /* update weight for each cfqg */
  684. spin_lock(&blkio_list_lock);
  685. spin_lock_irq(&blkcg->lock);
  686. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  687. if (newpn->dev == blkg->dev) {
  688. list_for_each_entry(blkiop, &blkio_list, list)
  689. blkiop->ops.blkio_update_group_weight_fn(blkg,
  690. newpn->weight ?
  691. newpn->weight :
  692. blkcg->weight);
  693. }
  694. }
  695. spin_unlock_irq(&blkcg->lock);
  696. spin_unlock(&blkio_list_lock);
  697. free_newpn:
  698. if (!keep_newpn)
  699. kfree(newpn);
  700. free_buf:
  701. kfree(buf);
  702. return ret;
  703. }
  704. static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
  705. struct seq_file *m)
  706. {
  707. struct blkio_cgroup *blkcg;
  708. struct blkio_policy_node *pn;
  709. seq_printf(m, "dev\tweight\n");
  710. blkcg = cgroup_to_blkio_cgroup(cgrp);
  711. if (list_empty(&blkcg->policy_list))
  712. goto out;
  713. spin_lock_irq(&blkcg->lock);
  714. list_for_each_entry(pn, &blkcg->policy_list, node) {
  715. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  716. MINOR(pn->dev), pn->weight);
  717. }
  718. spin_unlock_irq(&blkcg->lock);
  719. out:
  720. return 0;
  721. }
  722. struct cftype blkio_files[] = {
  723. {
  724. .name = "weight_device",
  725. .read_seq_string = blkiocg_weight_device_read,
  726. .write_string = blkiocg_weight_device_write,
  727. .max_write_len = 256,
  728. },
  729. {
  730. .name = "weight",
  731. .read_u64 = blkiocg_weight_read,
  732. .write_u64 = blkiocg_weight_write,
  733. },
  734. {
  735. .name = "time",
  736. .read_map = blkiocg_time_read,
  737. },
  738. {
  739. .name = "sectors",
  740. .read_map = blkiocg_sectors_read,
  741. },
  742. {
  743. .name = "io_service_bytes",
  744. .read_map = blkiocg_io_service_bytes_read,
  745. },
  746. {
  747. .name = "io_serviced",
  748. .read_map = blkiocg_io_serviced_read,
  749. },
  750. {
  751. .name = "io_service_time",
  752. .read_map = blkiocg_io_service_time_read,
  753. },
  754. {
  755. .name = "io_wait_time",
  756. .read_map = blkiocg_io_wait_time_read,
  757. },
  758. {
  759. .name = "io_merged",
  760. .read_map = blkiocg_io_merged_read,
  761. },
  762. {
  763. .name = "io_queued",
  764. .read_map = blkiocg_io_queued_read,
  765. },
  766. {
  767. .name = "reset_stats",
  768. .write_u64 = blkiocg_reset_stats,
  769. },
  770. #ifdef CONFIG_DEBUG_BLK_CGROUP
  771. {
  772. .name = "avg_queue_size",
  773. .read_map = blkiocg_avg_queue_size_read,
  774. },
  775. {
  776. .name = "group_wait_time",
  777. .read_map = blkiocg_group_wait_time_read,
  778. },
  779. {
  780. .name = "idle_time",
  781. .read_map = blkiocg_idle_time_read,
  782. },
  783. {
  784. .name = "empty_time",
  785. .read_map = blkiocg_empty_time_read,
  786. },
  787. {
  788. .name = "dequeue",
  789. .read_map = blkiocg_dequeue_read,
  790. },
  791. #endif
  792. };
  793. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  794. {
  795. return cgroup_add_files(cgroup, subsys, blkio_files,
  796. ARRAY_SIZE(blkio_files));
  797. }
  798. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  799. {
  800. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  801. unsigned long flags;
  802. struct blkio_group *blkg;
  803. void *key;
  804. struct blkio_policy_type *blkiop;
  805. struct blkio_policy_node *pn, *pntmp;
  806. rcu_read_lock();
  807. remove_entry:
  808. spin_lock_irqsave(&blkcg->lock, flags);
  809. if (hlist_empty(&blkcg->blkg_list)) {
  810. spin_unlock_irqrestore(&blkcg->lock, flags);
  811. goto done;
  812. }
  813. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  814. blkcg_node);
  815. key = rcu_dereference(blkg->key);
  816. __blkiocg_del_blkio_group(blkg);
  817. spin_unlock_irqrestore(&blkcg->lock, flags);
  818. /*
  819. * This blkio_group is being unlinked as associated cgroup is going
  820. * away. Let all the IO controlling policies know about this event.
  821. *
  822. * Currently this is static call to one io controlling policy. Once
  823. * we have more policies in place, we need some dynamic registration
  824. * of callback function.
  825. */
  826. spin_lock(&blkio_list_lock);
  827. list_for_each_entry(blkiop, &blkio_list, list)
  828. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  829. spin_unlock(&blkio_list_lock);
  830. goto remove_entry;
  831. done:
  832. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  833. blkio_policy_delete_node(pn);
  834. kfree(pn);
  835. }
  836. free_css_id(&blkio_subsys, &blkcg->css);
  837. rcu_read_unlock();
  838. if (blkcg != &blkio_root_cgroup)
  839. kfree(blkcg);
  840. }
  841. static struct cgroup_subsys_state *
  842. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  843. {
  844. struct blkio_cgroup *blkcg, *parent_blkcg;
  845. if (!cgroup->parent) {
  846. blkcg = &blkio_root_cgroup;
  847. goto done;
  848. }
  849. /* Currently we do not support hierarchy deeper than two level (0,1) */
  850. parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
  851. if (css_depth(&parent_blkcg->css) > 0)
  852. return ERR_PTR(-EINVAL);
  853. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  854. if (!blkcg)
  855. return ERR_PTR(-ENOMEM);
  856. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  857. done:
  858. spin_lock_init(&blkcg->lock);
  859. INIT_HLIST_HEAD(&blkcg->blkg_list);
  860. INIT_LIST_HEAD(&blkcg->policy_list);
  861. return &blkcg->css;
  862. }
  863. /*
  864. * We cannot support shared io contexts, as we have no mean to support
  865. * two tasks with the same ioc in two different groups without major rework
  866. * of the main cic data structures. For now we allow a task to change
  867. * its cgroup only if it's the only owner of its ioc.
  868. */
  869. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  870. struct cgroup *cgroup, struct task_struct *tsk,
  871. bool threadgroup)
  872. {
  873. struct io_context *ioc;
  874. int ret = 0;
  875. /* task_lock() is needed to avoid races with exit_io_context() */
  876. task_lock(tsk);
  877. ioc = tsk->io_context;
  878. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  879. ret = -EINVAL;
  880. task_unlock(tsk);
  881. return ret;
  882. }
  883. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  884. struct cgroup *prev, struct task_struct *tsk,
  885. bool threadgroup)
  886. {
  887. struct io_context *ioc;
  888. task_lock(tsk);
  889. ioc = tsk->io_context;
  890. if (ioc)
  891. ioc->cgroup_changed = 1;
  892. task_unlock(tsk);
  893. }
  894. void blkio_policy_register(struct blkio_policy_type *blkiop)
  895. {
  896. spin_lock(&blkio_list_lock);
  897. list_add_tail(&blkiop->list, &blkio_list);
  898. spin_unlock(&blkio_list_lock);
  899. }
  900. EXPORT_SYMBOL_GPL(blkio_policy_register);
  901. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  902. {
  903. spin_lock(&blkio_list_lock);
  904. list_del_init(&blkiop->list);
  905. spin_unlock(&blkio_list_lock);
  906. }
  907. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  908. static int __init init_cgroup_blkio(void)
  909. {
  910. return cgroup_load_subsys(&blkio_subsys);
  911. }
  912. static void __exit exit_cgroup_blkio(void)
  913. {
  914. cgroup_unload_subsys(&blkio_subsys);
  915. }
  916. module_init(init_cgroup_blkio);
  917. module_exit(exit_cgroup_blkio);
  918. MODULE_LICENSE("GPL");