blk-cgroup.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include "blk-cgroup.h"
  21. #include <linux/genhd.h>
  22. #define MAX_KEY_LEN 100
  23. static DEFINE_SPINLOCK(blkio_list_lock);
  24. static LIST_HEAD(blkio_list);
  25. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  26. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  27. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  28. struct cgroup *);
  29. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  30. struct task_struct *, bool);
  31. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  32. struct cgroup *, struct task_struct *, bool);
  33. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  34. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  35. /* for encoding cft->private value on file */
  36. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  37. /* What policy owns the file, proportional or throttle */
  38. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  39. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  40. struct cgroup_subsys blkio_subsys = {
  41. .name = "blkio",
  42. .create = blkiocg_create,
  43. .can_attach = blkiocg_can_attach,
  44. .attach = blkiocg_attach,
  45. .destroy = blkiocg_destroy,
  46. .populate = blkiocg_populate,
  47. #ifdef CONFIG_BLK_CGROUP
  48. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  49. .subsys_id = blkio_subsys_id,
  50. #endif
  51. .use_id = 1,
  52. .module = THIS_MODULE,
  53. };
  54. EXPORT_SYMBOL_GPL(blkio_subsys);
  55. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  56. struct blkio_policy_node *pn)
  57. {
  58. list_add(&pn->node, &blkcg->policy_list);
  59. }
  60. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  61. struct blkio_group *blkg)
  62. {
  63. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  64. if (blkg->plid == plid)
  65. return 1;
  66. return 0;
  67. }
  68. /* Determines if policy node matches cgroup file being accessed */
  69. static inline bool pn_matches_cftype(struct cftype *cft,
  70. struct blkio_policy_node *pn)
  71. {
  72. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  73. int fileid = BLKIOFILE_ATTR(cft->private);
  74. return (plid == pn->plid && fileid == pn->fileid);
  75. }
  76. /* Must be called with blkcg->lock held */
  77. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  78. {
  79. list_del(&pn->node);
  80. }
  81. /* Must be called with blkcg->lock held */
  82. static struct blkio_policy_node *
  83. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  84. enum blkio_policy_id plid, int fileid)
  85. {
  86. struct blkio_policy_node *pn;
  87. list_for_each_entry(pn, &blkcg->policy_list, node) {
  88. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  89. return pn;
  90. }
  91. return NULL;
  92. }
  93. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  94. {
  95. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  96. struct blkio_cgroup, css);
  97. }
  98. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  99. static inline void
  100. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  101. {
  102. struct blkio_policy_type *blkiop;
  103. list_for_each_entry(blkiop, &blkio_list, list) {
  104. /* If this policy does not own the blkg, do not send updates */
  105. if (blkiop->plid != blkg->plid)
  106. continue;
  107. if (blkiop->ops.blkio_update_group_weight_fn)
  108. blkiop->ops.blkio_update_group_weight_fn(blkg, weight);
  109. }
  110. }
  111. /*
  112. * Add to the appropriate stat variable depending on the request type.
  113. * This should be called with the blkg->stats_lock held.
  114. */
  115. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  116. bool sync)
  117. {
  118. if (direction)
  119. stat[BLKIO_STAT_WRITE] += add;
  120. else
  121. stat[BLKIO_STAT_READ] += add;
  122. if (sync)
  123. stat[BLKIO_STAT_SYNC] += add;
  124. else
  125. stat[BLKIO_STAT_ASYNC] += add;
  126. }
  127. /*
  128. * Decrements the appropriate stat variable if non-zero depending on the
  129. * request type. Panics on value being zero.
  130. * This should be called with the blkg->stats_lock held.
  131. */
  132. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  133. {
  134. if (direction) {
  135. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  136. stat[BLKIO_STAT_WRITE]--;
  137. } else {
  138. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  139. stat[BLKIO_STAT_READ]--;
  140. }
  141. if (sync) {
  142. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  143. stat[BLKIO_STAT_SYNC]--;
  144. } else {
  145. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  146. stat[BLKIO_STAT_ASYNC]--;
  147. }
  148. }
  149. #ifdef CONFIG_DEBUG_BLK_CGROUP
  150. /* This should be called with the blkg->stats_lock held. */
  151. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  152. struct blkio_group *curr_blkg)
  153. {
  154. if (blkio_blkg_waiting(&blkg->stats))
  155. return;
  156. if (blkg == curr_blkg)
  157. return;
  158. blkg->stats.start_group_wait_time = sched_clock();
  159. blkio_mark_blkg_waiting(&blkg->stats);
  160. }
  161. /* This should be called with the blkg->stats_lock held. */
  162. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  163. {
  164. unsigned long long now;
  165. if (!blkio_blkg_waiting(stats))
  166. return;
  167. now = sched_clock();
  168. if (time_after64(now, stats->start_group_wait_time))
  169. stats->group_wait_time += now - stats->start_group_wait_time;
  170. blkio_clear_blkg_waiting(stats);
  171. }
  172. /* This should be called with the blkg->stats_lock held. */
  173. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  174. {
  175. unsigned long long now;
  176. if (!blkio_blkg_empty(stats))
  177. return;
  178. now = sched_clock();
  179. if (time_after64(now, stats->start_empty_time))
  180. stats->empty_time += now - stats->start_empty_time;
  181. blkio_clear_blkg_empty(stats);
  182. }
  183. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  184. {
  185. unsigned long flags;
  186. spin_lock_irqsave(&blkg->stats_lock, flags);
  187. BUG_ON(blkio_blkg_idling(&blkg->stats));
  188. blkg->stats.start_idle_time = sched_clock();
  189. blkio_mark_blkg_idling(&blkg->stats);
  190. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  191. }
  192. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  193. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  194. {
  195. unsigned long flags;
  196. unsigned long long now;
  197. struct blkio_group_stats *stats;
  198. spin_lock_irqsave(&blkg->stats_lock, flags);
  199. stats = &blkg->stats;
  200. if (blkio_blkg_idling(stats)) {
  201. now = sched_clock();
  202. if (time_after64(now, stats->start_idle_time))
  203. stats->idle_time += now - stats->start_idle_time;
  204. blkio_clear_blkg_idling(stats);
  205. }
  206. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  207. }
  208. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  209. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  210. {
  211. unsigned long flags;
  212. struct blkio_group_stats *stats;
  213. spin_lock_irqsave(&blkg->stats_lock, flags);
  214. stats = &blkg->stats;
  215. stats->avg_queue_size_sum +=
  216. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  217. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  218. stats->avg_queue_size_samples++;
  219. blkio_update_group_wait_time(stats);
  220. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  221. }
  222. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  223. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  224. {
  225. unsigned long flags;
  226. struct blkio_group_stats *stats;
  227. spin_lock_irqsave(&blkg->stats_lock, flags);
  228. stats = &blkg->stats;
  229. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  230. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  231. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  232. return;
  233. }
  234. /*
  235. * group is already marked empty. This can happen if cfqq got new
  236. * request in parent group and moved to this group while being added
  237. * to service tree. Just ignore the event and move on.
  238. */
  239. if(blkio_blkg_empty(stats)) {
  240. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  241. return;
  242. }
  243. stats->start_empty_time = sched_clock();
  244. blkio_mark_blkg_empty(stats);
  245. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  246. }
  247. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  248. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  249. unsigned long dequeue)
  250. {
  251. blkg->stats.dequeue += dequeue;
  252. }
  253. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  254. #else
  255. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  256. struct blkio_group *curr_blkg) {}
  257. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  258. #endif
  259. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  260. struct blkio_group *curr_blkg, bool direction,
  261. bool sync)
  262. {
  263. unsigned long flags;
  264. spin_lock_irqsave(&blkg->stats_lock, flags);
  265. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  266. sync);
  267. blkio_end_empty_time(&blkg->stats);
  268. blkio_set_start_group_wait_time(blkg, curr_blkg);
  269. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  270. }
  271. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  272. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  273. bool direction, bool sync)
  274. {
  275. unsigned long flags;
  276. spin_lock_irqsave(&blkg->stats_lock, flags);
  277. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  278. direction, sync);
  279. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  280. }
  281. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  282. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
  283. {
  284. unsigned long flags;
  285. spin_lock_irqsave(&blkg->stats_lock, flags);
  286. blkg->stats.time += time;
  287. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  288. }
  289. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  290. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  291. uint64_t bytes, bool direction, bool sync)
  292. {
  293. struct blkio_group_stats *stats;
  294. unsigned long flags;
  295. spin_lock_irqsave(&blkg->stats_lock, flags);
  296. stats = &blkg->stats;
  297. stats->sectors += bytes >> 9;
  298. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
  299. sync);
  300. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
  301. direction, sync);
  302. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  303. }
  304. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  305. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  306. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  307. {
  308. struct blkio_group_stats *stats;
  309. unsigned long flags;
  310. unsigned long long now = sched_clock();
  311. spin_lock_irqsave(&blkg->stats_lock, flags);
  312. stats = &blkg->stats;
  313. if (time_after64(now, io_start_time))
  314. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  315. now - io_start_time, direction, sync);
  316. if (time_after64(io_start_time, start_time))
  317. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  318. io_start_time - start_time, direction, sync);
  319. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  320. }
  321. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  322. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  323. bool sync)
  324. {
  325. unsigned long flags;
  326. spin_lock_irqsave(&blkg->stats_lock, flags);
  327. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
  328. sync);
  329. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  330. }
  331. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  332. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  333. struct blkio_group *blkg, void *key, dev_t dev,
  334. enum blkio_policy_id plid)
  335. {
  336. unsigned long flags;
  337. spin_lock_irqsave(&blkcg->lock, flags);
  338. spin_lock_init(&blkg->stats_lock);
  339. rcu_assign_pointer(blkg->key, key);
  340. blkg->blkcg_id = css_id(&blkcg->css);
  341. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  342. blkg->plid = plid;
  343. spin_unlock_irqrestore(&blkcg->lock, flags);
  344. /* Need to take css reference ? */
  345. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  346. blkg->dev = dev;
  347. }
  348. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  349. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  350. {
  351. hlist_del_init_rcu(&blkg->blkcg_node);
  352. blkg->blkcg_id = 0;
  353. }
  354. /*
  355. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  356. * indicating that blk_group was unhashed by the time we got to it.
  357. */
  358. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  359. {
  360. struct blkio_cgroup *blkcg;
  361. unsigned long flags;
  362. struct cgroup_subsys_state *css;
  363. int ret = 1;
  364. rcu_read_lock();
  365. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  366. if (css) {
  367. blkcg = container_of(css, struct blkio_cgroup, css);
  368. spin_lock_irqsave(&blkcg->lock, flags);
  369. if (!hlist_unhashed(&blkg->blkcg_node)) {
  370. __blkiocg_del_blkio_group(blkg);
  371. ret = 0;
  372. }
  373. spin_unlock_irqrestore(&blkcg->lock, flags);
  374. }
  375. rcu_read_unlock();
  376. return ret;
  377. }
  378. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  379. /* called under rcu_read_lock(). */
  380. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  381. {
  382. struct blkio_group *blkg;
  383. struct hlist_node *n;
  384. void *__key;
  385. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  386. __key = blkg->key;
  387. if (__key == key)
  388. return blkg;
  389. }
  390. return NULL;
  391. }
  392. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  393. static int
  394. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  395. {
  396. struct blkio_cgroup *blkcg;
  397. struct blkio_group *blkg;
  398. struct blkio_group_stats *stats;
  399. struct hlist_node *n;
  400. uint64_t queued[BLKIO_STAT_TOTAL];
  401. int i;
  402. #ifdef CONFIG_DEBUG_BLK_CGROUP
  403. bool idling, waiting, empty;
  404. unsigned long long now = sched_clock();
  405. #endif
  406. blkcg = cgroup_to_blkio_cgroup(cgroup);
  407. spin_lock_irq(&blkcg->lock);
  408. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  409. spin_lock(&blkg->stats_lock);
  410. stats = &blkg->stats;
  411. #ifdef CONFIG_DEBUG_BLK_CGROUP
  412. idling = blkio_blkg_idling(stats);
  413. waiting = blkio_blkg_waiting(stats);
  414. empty = blkio_blkg_empty(stats);
  415. #endif
  416. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  417. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  418. memset(stats, 0, sizeof(struct blkio_group_stats));
  419. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  420. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  421. #ifdef CONFIG_DEBUG_BLK_CGROUP
  422. if (idling) {
  423. blkio_mark_blkg_idling(stats);
  424. stats->start_idle_time = now;
  425. }
  426. if (waiting) {
  427. blkio_mark_blkg_waiting(stats);
  428. stats->start_group_wait_time = now;
  429. }
  430. if (empty) {
  431. blkio_mark_blkg_empty(stats);
  432. stats->start_empty_time = now;
  433. }
  434. #endif
  435. spin_unlock(&blkg->stats_lock);
  436. }
  437. spin_unlock_irq(&blkcg->lock);
  438. return 0;
  439. }
  440. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  441. int chars_left, bool diskname_only)
  442. {
  443. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  444. chars_left -= strlen(str);
  445. if (chars_left <= 0) {
  446. printk(KERN_WARNING
  447. "Possibly incorrect cgroup stat display format");
  448. return;
  449. }
  450. if (diskname_only)
  451. return;
  452. switch (type) {
  453. case BLKIO_STAT_READ:
  454. strlcat(str, " Read", chars_left);
  455. break;
  456. case BLKIO_STAT_WRITE:
  457. strlcat(str, " Write", chars_left);
  458. break;
  459. case BLKIO_STAT_SYNC:
  460. strlcat(str, " Sync", chars_left);
  461. break;
  462. case BLKIO_STAT_ASYNC:
  463. strlcat(str, " Async", chars_left);
  464. break;
  465. case BLKIO_STAT_TOTAL:
  466. strlcat(str, " Total", chars_left);
  467. break;
  468. default:
  469. strlcat(str, " Invalid", chars_left);
  470. }
  471. }
  472. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  473. struct cgroup_map_cb *cb, dev_t dev)
  474. {
  475. blkio_get_key_name(0, dev, str, chars_left, true);
  476. cb->fill(cb, str, val);
  477. return val;
  478. }
  479. /* This should be called with blkg->stats_lock held */
  480. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  481. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  482. {
  483. uint64_t disk_total;
  484. char key_str[MAX_KEY_LEN];
  485. enum stat_sub_type sub_type;
  486. if (type == BLKIO_STAT_TIME)
  487. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  488. blkg->stats.time, cb, dev);
  489. if (type == BLKIO_STAT_SECTORS)
  490. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  491. blkg->stats.sectors, cb, dev);
  492. #ifdef CONFIG_DEBUG_BLK_CGROUP
  493. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  494. uint64_t sum = blkg->stats.avg_queue_size_sum;
  495. uint64_t samples = blkg->stats.avg_queue_size_samples;
  496. if (samples)
  497. do_div(sum, samples);
  498. else
  499. sum = 0;
  500. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  501. }
  502. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  503. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  504. blkg->stats.group_wait_time, cb, dev);
  505. if (type == BLKIO_STAT_IDLE_TIME)
  506. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  507. blkg->stats.idle_time, cb, dev);
  508. if (type == BLKIO_STAT_EMPTY_TIME)
  509. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  510. blkg->stats.empty_time, cb, dev);
  511. if (type == BLKIO_STAT_DEQUEUE)
  512. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  513. blkg->stats.dequeue, cb, dev);
  514. #endif
  515. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  516. sub_type++) {
  517. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  518. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  519. }
  520. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  521. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  522. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  523. cb->fill(cb, key_str, disk_total);
  524. return disk_total;
  525. }
  526. static int blkio_check_dev_num(dev_t dev)
  527. {
  528. int part = 0;
  529. struct gendisk *disk;
  530. disk = get_gendisk(dev, &part);
  531. if (!disk || part)
  532. return -ENODEV;
  533. return 0;
  534. }
  535. static int blkio_policy_parse_and_set(char *buf,
  536. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  537. {
  538. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  539. int ret;
  540. unsigned long major, minor, temp;
  541. int i = 0;
  542. dev_t dev;
  543. memset(s, 0, sizeof(s));
  544. while ((p = strsep(&buf, " ")) != NULL) {
  545. if (!*p)
  546. continue;
  547. s[i++] = p;
  548. /* Prevent from inputing too many things */
  549. if (i == 3)
  550. break;
  551. }
  552. if (i != 2)
  553. return -EINVAL;
  554. p = strsep(&s[0], ":");
  555. if (p != NULL)
  556. major_s = p;
  557. else
  558. return -EINVAL;
  559. minor_s = s[0];
  560. if (!minor_s)
  561. return -EINVAL;
  562. ret = strict_strtoul(major_s, 10, &major);
  563. if (ret)
  564. return -EINVAL;
  565. ret = strict_strtoul(minor_s, 10, &minor);
  566. if (ret)
  567. return -EINVAL;
  568. dev = MKDEV(major, minor);
  569. ret = blkio_check_dev_num(dev);
  570. if (ret)
  571. return ret;
  572. newpn->dev = dev;
  573. if (s[1] == NULL)
  574. return -EINVAL;
  575. switch (plid) {
  576. case BLKIO_POLICY_PROP:
  577. ret = strict_strtoul(s[1], 10, &temp);
  578. if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  579. temp > BLKIO_WEIGHT_MAX)
  580. return -EINVAL;
  581. newpn->plid = plid;
  582. newpn->fileid = fileid;
  583. newpn->weight = temp;
  584. break;
  585. default:
  586. BUG();
  587. }
  588. return 0;
  589. }
  590. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  591. dev_t dev)
  592. {
  593. struct blkio_policy_node *pn;
  594. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  595. BLKIO_PROP_weight_device);
  596. if (pn)
  597. return pn->weight;
  598. else
  599. return blkcg->weight;
  600. }
  601. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  602. /* Checks whether user asked for deleting a policy rule */
  603. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  604. {
  605. switch(pn->plid) {
  606. case BLKIO_POLICY_PROP:
  607. if (pn->weight == 0)
  608. return 1;
  609. break;
  610. default:
  611. BUG();
  612. }
  613. return 0;
  614. }
  615. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  616. struct blkio_policy_node *newpn)
  617. {
  618. switch(oldpn->plid) {
  619. case BLKIO_POLICY_PROP:
  620. oldpn->weight = newpn->weight;
  621. break;
  622. default:
  623. BUG();
  624. }
  625. }
  626. /*
  627. * Some rules/values in blkg have changed. Propogate those to respective
  628. * policies.
  629. */
  630. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  631. struct blkio_group *blkg, struct blkio_policy_node *pn)
  632. {
  633. unsigned int weight;
  634. switch(pn->plid) {
  635. case BLKIO_POLICY_PROP:
  636. weight = pn->weight ? pn->weight :
  637. blkcg->weight;
  638. blkio_update_group_weight(blkg, weight);
  639. break;
  640. default:
  641. BUG();
  642. }
  643. }
  644. /*
  645. * A policy node rule has been updated. Propogate this update to all the
  646. * block groups which might be affected by this update.
  647. */
  648. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  649. struct blkio_policy_node *pn)
  650. {
  651. struct blkio_group *blkg;
  652. struct hlist_node *n;
  653. spin_lock(&blkio_list_lock);
  654. spin_lock_irq(&blkcg->lock);
  655. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  656. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  657. continue;
  658. blkio_update_blkg_policy(blkcg, blkg, pn);
  659. }
  660. spin_unlock_irq(&blkcg->lock);
  661. spin_unlock(&blkio_list_lock);
  662. }
  663. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  664. const char *buffer)
  665. {
  666. int ret = 0;
  667. char *buf;
  668. struct blkio_policy_node *newpn, *pn;
  669. struct blkio_cgroup *blkcg;
  670. int keep_newpn = 0;
  671. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  672. int fileid = BLKIOFILE_ATTR(cft->private);
  673. buf = kstrdup(buffer, GFP_KERNEL);
  674. if (!buf)
  675. return -ENOMEM;
  676. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  677. if (!newpn) {
  678. ret = -ENOMEM;
  679. goto free_buf;
  680. }
  681. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  682. if (ret)
  683. goto free_newpn;
  684. blkcg = cgroup_to_blkio_cgroup(cgrp);
  685. spin_lock_irq(&blkcg->lock);
  686. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  687. if (!pn) {
  688. if (!blkio_delete_rule_command(newpn)) {
  689. blkio_policy_insert_node(blkcg, newpn);
  690. keep_newpn = 1;
  691. }
  692. spin_unlock_irq(&blkcg->lock);
  693. goto update_io_group;
  694. }
  695. if (blkio_delete_rule_command(newpn)) {
  696. blkio_policy_delete_node(pn);
  697. spin_unlock_irq(&blkcg->lock);
  698. goto update_io_group;
  699. }
  700. spin_unlock_irq(&blkcg->lock);
  701. blkio_update_policy_rule(pn, newpn);
  702. update_io_group:
  703. blkio_update_policy_node_blkg(blkcg, newpn);
  704. free_newpn:
  705. if (!keep_newpn)
  706. kfree(newpn);
  707. free_buf:
  708. kfree(buf);
  709. return ret;
  710. }
  711. static void
  712. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  713. {
  714. switch(pn->plid) {
  715. case BLKIO_POLICY_PROP:
  716. if (pn->fileid == BLKIO_PROP_weight_device)
  717. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  718. MINOR(pn->dev), pn->weight);
  719. break;
  720. default:
  721. BUG();
  722. }
  723. }
  724. /* cgroup files which read their data from policy nodes end up here */
  725. static void blkio_read_policy_node_files(struct cftype *cft,
  726. struct blkio_cgroup *blkcg, struct seq_file *m)
  727. {
  728. struct blkio_policy_node *pn;
  729. if (!list_empty(&blkcg->policy_list)) {
  730. spin_lock_irq(&blkcg->lock);
  731. list_for_each_entry(pn, &blkcg->policy_list, node) {
  732. if (!pn_matches_cftype(cft, pn))
  733. continue;
  734. blkio_print_policy_node(m, pn);
  735. }
  736. spin_unlock_irq(&blkcg->lock);
  737. }
  738. }
  739. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  740. struct seq_file *m)
  741. {
  742. struct blkio_cgroup *blkcg;
  743. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  744. int name = BLKIOFILE_ATTR(cft->private);
  745. blkcg = cgroup_to_blkio_cgroup(cgrp);
  746. switch(plid) {
  747. case BLKIO_POLICY_PROP:
  748. switch(name) {
  749. case BLKIO_PROP_weight_device:
  750. blkio_read_policy_node_files(cft, blkcg, m);
  751. return 0;
  752. default:
  753. BUG();
  754. }
  755. break;
  756. default:
  757. BUG();
  758. }
  759. return 0;
  760. }
  761. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  762. struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
  763. bool show_total)
  764. {
  765. struct blkio_group *blkg;
  766. struct hlist_node *n;
  767. uint64_t cgroup_total = 0;
  768. rcu_read_lock();
  769. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  770. if (blkg->dev) {
  771. if (!cftype_blkg_same_policy(cft, blkg))
  772. continue;
  773. spin_lock_irq(&blkg->stats_lock);
  774. cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
  775. type);
  776. spin_unlock_irq(&blkg->stats_lock);
  777. }
  778. }
  779. if (show_total)
  780. cb->fill(cb, "Total", cgroup_total);
  781. rcu_read_unlock();
  782. return 0;
  783. }
  784. /* All map kind of cgroup file get serviced by this function */
  785. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  786. struct cgroup_map_cb *cb)
  787. {
  788. struct blkio_cgroup *blkcg;
  789. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  790. int name = BLKIOFILE_ATTR(cft->private);
  791. blkcg = cgroup_to_blkio_cgroup(cgrp);
  792. switch(plid) {
  793. case BLKIO_POLICY_PROP:
  794. switch(name) {
  795. case BLKIO_PROP_time:
  796. return blkio_read_blkg_stats(blkcg, cft, cb,
  797. BLKIO_STAT_TIME, 0);
  798. case BLKIO_PROP_sectors:
  799. return blkio_read_blkg_stats(blkcg, cft, cb,
  800. BLKIO_STAT_SECTORS, 0);
  801. case BLKIO_PROP_io_service_bytes:
  802. return blkio_read_blkg_stats(blkcg, cft, cb,
  803. BLKIO_STAT_SERVICE_BYTES, 1);
  804. case BLKIO_PROP_io_serviced:
  805. return blkio_read_blkg_stats(blkcg, cft, cb,
  806. BLKIO_STAT_SERVICED, 1);
  807. case BLKIO_PROP_io_service_time:
  808. return blkio_read_blkg_stats(blkcg, cft, cb,
  809. BLKIO_STAT_SERVICE_TIME, 1);
  810. case BLKIO_PROP_io_wait_time:
  811. return blkio_read_blkg_stats(blkcg, cft, cb,
  812. BLKIO_STAT_WAIT_TIME, 1);
  813. case BLKIO_PROP_io_merged:
  814. return blkio_read_blkg_stats(blkcg, cft, cb,
  815. BLKIO_STAT_MERGED, 1);
  816. case BLKIO_PROP_io_queued:
  817. return blkio_read_blkg_stats(blkcg, cft, cb,
  818. BLKIO_STAT_QUEUED, 1);
  819. #ifdef CONFIG_DEBUG_BLK_CGROUP
  820. case BLKIO_PROP_dequeue:
  821. return blkio_read_blkg_stats(blkcg, cft, cb,
  822. BLKIO_STAT_DEQUEUE, 0);
  823. case BLKIO_PROP_avg_queue_size:
  824. return blkio_read_blkg_stats(blkcg, cft, cb,
  825. BLKIO_STAT_AVG_QUEUE_SIZE, 0);
  826. case BLKIO_PROP_group_wait_time:
  827. return blkio_read_blkg_stats(blkcg, cft, cb,
  828. BLKIO_STAT_GROUP_WAIT_TIME, 0);
  829. case BLKIO_PROP_idle_time:
  830. return blkio_read_blkg_stats(blkcg, cft, cb,
  831. BLKIO_STAT_IDLE_TIME, 0);
  832. case BLKIO_PROP_empty_time:
  833. return blkio_read_blkg_stats(blkcg, cft, cb,
  834. BLKIO_STAT_EMPTY_TIME, 0);
  835. #endif
  836. default:
  837. BUG();
  838. }
  839. break;
  840. default:
  841. BUG();
  842. }
  843. return 0;
  844. }
  845. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  846. {
  847. struct blkio_group *blkg;
  848. struct hlist_node *n;
  849. struct blkio_policy_node *pn;
  850. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  851. return -EINVAL;
  852. spin_lock(&blkio_list_lock);
  853. spin_lock_irq(&blkcg->lock);
  854. blkcg->weight = (unsigned int)val;
  855. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  856. pn = blkio_policy_search_node(blkcg, blkg->dev,
  857. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  858. if (pn)
  859. continue;
  860. blkio_update_group_weight(blkg, blkcg->weight);
  861. }
  862. spin_unlock_irq(&blkcg->lock);
  863. spin_unlock(&blkio_list_lock);
  864. return 0;
  865. }
  866. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  867. struct blkio_cgroup *blkcg;
  868. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  869. int name = BLKIOFILE_ATTR(cft->private);
  870. blkcg = cgroup_to_blkio_cgroup(cgrp);
  871. switch(plid) {
  872. case BLKIO_POLICY_PROP:
  873. switch(name) {
  874. case BLKIO_PROP_weight:
  875. return (u64)blkcg->weight;
  876. }
  877. break;
  878. default:
  879. BUG();
  880. }
  881. return 0;
  882. }
  883. static int
  884. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  885. {
  886. struct blkio_cgroup *blkcg;
  887. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  888. int name = BLKIOFILE_ATTR(cft->private);
  889. blkcg = cgroup_to_blkio_cgroup(cgrp);
  890. switch(plid) {
  891. case BLKIO_POLICY_PROP:
  892. switch(name) {
  893. case BLKIO_PROP_weight:
  894. return blkio_weight_write(blkcg, val);
  895. }
  896. break;
  897. default:
  898. BUG();
  899. }
  900. return 0;
  901. }
  902. struct cftype blkio_files[] = {
  903. {
  904. .name = "weight_device",
  905. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  906. BLKIO_PROP_weight_device),
  907. .read_seq_string = blkiocg_file_read,
  908. .write_string = blkiocg_file_write,
  909. .max_write_len = 256,
  910. },
  911. {
  912. .name = "weight",
  913. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  914. BLKIO_PROP_weight),
  915. .read_u64 = blkiocg_file_read_u64,
  916. .write_u64 = blkiocg_file_write_u64,
  917. },
  918. {
  919. .name = "time",
  920. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  921. BLKIO_PROP_time),
  922. .read_map = blkiocg_file_read_map,
  923. },
  924. {
  925. .name = "sectors",
  926. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  927. BLKIO_PROP_sectors),
  928. .read_map = blkiocg_file_read_map,
  929. },
  930. {
  931. .name = "io_service_bytes",
  932. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  933. BLKIO_PROP_io_service_bytes),
  934. .read_map = blkiocg_file_read_map,
  935. },
  936. {
  937. .name = "io_serviced",
  938. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  939. BLKIO_PROP_io_serviced),
  940. .read_map = blkiocg_file_read_map,
  941. },
  942. {
  943. .name = "io_service_time",
  944. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  945. BLKIO_PROP_io_service_time),
  946. .read_map = blkiocg_file_read_map,
  947. },
  948. {
  949. .name = "io_wait_time",
  950. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  951. BLKIO_PROP_io_wait_time),
  952. .read_map = blkiocg_file_read_map,
  953. },
  954. {
  955. .name = "io_merged",
  956. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  957. BLKIO_PROP_io_merged),
  958. .read_map = blkiocg_file_read_map,
  959. },
  960. {
  961. .name = "io_queued",
  962. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  963. BLKIO_PROP_io_queued),
  964. .read_map = blkiocg_file_read_map,
  965. },
  966. {
  967. .name = "reset_stats",
  968. .write_u64 = blkiocg_reset_stats,
  969. },
  970. #ifdef CONFIG_DEBUG_BLK_CGROUP
  971. {
  972. .name = "avg_queue_size",
  973. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  974. BLKIO_PROP_avg_queue_size),
  975. .read_map = blkiocg_file_read_map,
  976. },
  977. {
  978. .name = "group_wait_time",
  979. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  980. BLKIO_PROP_group_wait_time),
  981. .read_map = blkiocg_file_read_map,
  982. },
  983. {
  984. .name = "idle_time",
  985. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  986. BLKIO_PROP_idle_time),
  987. .read_map = blkiocg_file_read_map,
  988. },
  989. {
  990. .name = "empty_time",
  991. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  992. BLKIO_PROP_empty_time),
  993. .read_map = blkiocg_file_read_map,
  994. },
  995. {
  996. .name = "dequeue",
  997. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  998. BLKIO_PROP_dequeue),
  999. .read_map = blkiocg_file_read_map,
  1000. },
  1001. #endif
  1002. };
  1003. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1004. {
  1005. return cgroup_add_files(cgroup, subsys, blkio_files,
  1006. ARRAY_SIZE(blkio_files));
  1007. }
  1008. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1009. {
  1010. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1011. unsigned long flags;
  1012. struct blkio_group *blkg;
  1013. void *key;
  1014. struct blkio_policy_type *blkiop;
  1015. struct blkio_policy_node *pn, *pntmp;
  1016. rcu_read_lock();
  1017. do {
  1018. spin_lock_irqsave(&blkcg->lock, flags);
  1019. if (hlist_empty(&blkcg->blkg_list)) {
  1020. spin_unlock_irqrestore(&blkcg->lock, flags);
  1021. break;
  1022. }
  1023. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1024. blkcg_node);
  1025. key = rcu_dereference(blkg->key);
  1026. __blkiocg_del_blkio_group(blkg);
  1027. spin_unlock_irqrestore(&blkcg->lock, flags);
  1028. /*
  1029. * This blkio_group is being unlinked as associated cgroup is
  1030. * going away. Let all the IO controlling policies know about
  1031. * this event. Currently this is static call to one io
  1032. * controlling policy. Once we have more policies in place, we
  1033. * need some dynamic registration of callback function.
  1034. */
  1035. spin_lock(&blkio_list_lock);
  1036. list_for_each_entry(blkiop, &blkio_list, list)
  1037. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1038. spin_unlock(&blkio_list_lock);
  1039. } while (1);
  1040. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1041. blkio_policy_delete_node(pn);
  1042. kfree(pn);
  1043. }
  1044. free_css_id(&blkio_subsys, &blkcg->css);
  1045. rcu_read_unlock();
  1046. if (blkcg != &blkio_root_cgroup)
  1047. kfree(blkcg);
  1048. }
  1049. static struct cgroup_subsys_state *
  1050. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1051. {
  1052. struct blkio_cgroup *blkcg;
  1053. struct cgroup *parent = cgroup->parent;
  1054. if (!parent) {
  1055. blkcg = &blkio_root_cgroup;
  1056. goto done;
  1057. }
  1058. /* Currently we do not support hierarchy deeper than two level (0,1) */
  1059. if (parent != cgroup->top_cgroup)
  1060. return ERR_PTR(-EINVAL);
  1061. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1062. if (!blkcg)
  1063. return ERR_PTR(-ENOMEM);
  1064. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1065. done:
  1066. spin_lock_init(&blkcg->lock);
  1067. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1068. INIT_LIST_HEAD(&blkcg->policy_list);
  1069. return &blkcg->css;
  1070. }
  1071. /*
  1072. * We cannot support shared io contexts, as we have no mean to support
  1073. * two tasks with the same ioc in two different groups without major rework
  1074. * of the main cic data structures. For now we allow a task to change
  1075. * its cgroup only if it's the only owner of its ioc.
  1076. */
  1077. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  1078. struct cgroup *cgroup, struct task_struct *tsk,
  1079. bool threadgroup)
  1080. {
  1081. struct io_context *ioc;
  1082. int ret = 0;
  1083. /* task_lock() is needed to avoid races with exit_io_context() */
  1084. task_lock(tsk);
  1085. ioc = tsk->io_context;
  1086. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1087. ret = -EINVAL;
  1088. task_unlock(tsk);
  1089. return ret;
  1090. }
  1091. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  1092. struct cgroup *prev, struct task_struct *tsk,
  1093. bool threadgroup)
  1094. {
  1095. struct io_context *ioc;
  1096. task_lock(tsk);
  1097. ioc = tsk->io_context;
  1098. if (ioc)
  1099. ioc->cgroup_changed = 1;
  1100. task_unlock(tsk);
  1101. }
  1102. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1103. {
  1104. spin_lock(&blkio_list_lock);
  1105. list_add_tail(&blkiop->list, &blkio_list);
  1106. spin_unlock(&blkio_list_lock);
  1107. }
  1108. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1109. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1110. {
  1111. spin_lock(&blkio_list_lock);
  1112. list_del_init(&blkiop->list);
  1113. spin_unlock(&blkio_list_lock);
  1114. }
  1115. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  1116. static int __init init_cgroup_blkio(void)
  1117. {
  1118. return cgroup_load_subsys(&blkio_subsys);
  1119. }
  1120. static void __exit exit_cgroup_blkio(void)
  1121. {
  1122. cgroup_unload_subsys(&blkio_subsys);
  1123. }
  1124. module_init(init_cgroup_blkio);
  1125. module_exit(exit_cgroup_blkio);
  1126. MODULE_LICENSE("GPL");