blk-cgroup.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include "blk-cgroup.h"
  21. #include <linux/genhd.h>
  22. #define MAX_KEY_LEN 100
  23. static DEFINE_SPINLOCK(blkio_list_lock);
  24. static LIST_HEAD(blkio_list);
  25. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  26. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  27. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  28. struct cgroup *);
  29. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  30. struct task_struct *, bool);
  31. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  32. struct cgroup *, struct task_struct *, bool);
  33. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  34. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  35. /* for encoding cft->private value on file */
  36. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  37. /* What policy owns the file, proportional or throttle */
  38. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  39. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  40. struct cgroup_subsys blkio_subsys = {
  41. .name = "blkio",
  42. .create = blkiocg_create,
  43. .can_attach = blkiocg_can_attach,
  44. .attach = blkiocg_attach,
  45. .destroy = blkiocg_destroy,
  46. .populate = blkiocg_populate,
  47. #ifdef CONFIG_BLK_CGROUP
  48. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  49. .subsys_id = blkio_subsys_id,
  50. #endif
  51. .use_id = 1,
  52. .module = THIS_MODULE,
  53. };
  54. EXPORT_SYMBOL_GPL(blkio_subsys);
  55. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  56. struct blkio_policy_node *pn)
  57. {
  58. list_add(&pn->node, &blkcg->policy_list);
  59. }
  60. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  61. struct blkio_group *blkg)
  62. {
  63. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  64. if (blkg->plid == plid)
  65. return 1;
  66. return 0;
  67. }
  68. /* Determines if policy node matches cgroup file being accessed */
  69. static inline bool pn_matches_cftype(struct cftype *cft,
  70. struct blkio_policy_node *pn)
  71. {
  72. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  73. int fileid = BLKIOFILE_ATTR(cft->private);
  74. return (plid == pn->plid && fileid == pn->fileid);
  75. }
  76. /* Must be called with blkcg->lock held */
  77. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  78. {
  79. list_del(&pn->node);
  80. }
  81. /* Must be called with blkcg->lock held */
  82. static struct blkio_policy_node *
  83. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  84. enum blkio_policy_id plid, int fileid)
  85. {
  86. struct blkio_policy_node *pn;
  87. list_for_each_entry(pn, &blkcg->policy_list, node) {
  88. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  89. return pn;
  90. }
  91. return NULL;
  92. }
  93. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  94. {
  95. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  96. struct blkio_cgroup, css);
  97. }
  98. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  99. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  100. {
  101. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  102. struct blkio_cgroup, css);
  103. }
  104. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  105. static inline void
  106. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  107. {
  108. struct blkio_policy_type *blkiop;
  109. list_for_each_entry(blkiop, &blkio_list, list) {
  110. /* If this policy does not own the blkg, do not send updates */
  111. if (blkiop->plid != blkg->plid)
  112. continue;
  113. if (blkiop->ops.blkio_update_group_weight_fn)
  114. blkiop->ops.blkio_update_group_weight_fn(blkg->key,
  115. blkg, weight);
  116. }
  117. }
  118. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  119. int fileid)
  120. {
  121. struct blkio_policy_type *blkiop;
  122. list_for_each_entry(blkiop, &blkio_list, list) {
  123. /* If this policy does not own the blkg, do not send updates */
  124. if (blkiop->plid != blkg->plid)
  125. continue;
  126. if (fileid == BLKIO_THROTL_read_bps_device
  127. && blkiop->ops.blkio_update_group_read_bps_fn)
  128. blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
  129. blkg, bps);
  130. if (fileid == BLKIO_THROTL_write_bps_device
  131. && blkiop->ops.blkio_update_group_write_bps_fn)
  132. blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
  133. blkg, bps);
  134. }
  135. }
  136. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  137. unsigned int iops, int fileid)
  138. {
  139. struct blkio_policy_type *blkiop;
  140. list_for_each_entry(blkiop, &blkio_list, list) {
  141. /* If this policy does not own the blkg, do not send updates */
  142. if (blkiop->plid != blkg->plid)
  143. continue;
  144. if (fileid == BLKIO_THROTL_read_iops_device
  145. && blkiop->ops.blkio_update_group_read_iops_fn)
  146. blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
  147. blkg, iops);
  148. if (fileid == BLKIO_THROTL_write_iops_device
  149. && blkiop->ops.blkio_update_group_write_iops_fn)
  150. blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
  151. blkg,iops);
  152. }
  153. }
  154. /*
  155. * Add to the appropriate stat variable depending on the request type.
  156. * This should be called with the blkg->stats_lock held.
  157. */
  158. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  159. bool sync)
  160. {
  161. if (direction)
  162. stat[BLKIO_STAT_WRITE] += add;
  163. else
  164. stat[BLKIO_STAT_READ] += add;
  165. if (sync)
  166. stat[BLKIO_STAT_SYNC] += add;
  167. else
  168. stat[BLKIO_STAT_ASYNC] += add;
  169. }
  170. /*
  171. * Decrements the appropriate stat variable if non-zero depending on the
  172. * request type. Panics on value being zero.
  173. * This should be called with the blkg->stats_lock held.
  174. */
  175. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  176. {
  177. if (direction) {
  178. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  179. stat[BLKIO_STAT_WRITE]--;
  180. } else {
  181. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  182. stat[BLKIO_STAT_READ]--;
  183. }
  184. if (sync) {
  185. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  186. stat[BLKIO_STAT_SYNC]--;
  187. } else {
  188. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  189. stat[BLKIO_STAT_ASYNC]--;
  190. }
  191. }
  192. #ifdef CONFIG_DEBUG_BLK_CGROUP
  193. /* This should be called with the blkg->stats_lock held. */
  194. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  195. struct blkio_group *curr_blkg)
  196. {
  197. if (blkio_blkg_waiting(&blkg->stats))
  198. return;
  199. if (blkg == curr_blkg)
  200. return;
  201. blkg->stats.start_group_wait_time = sched_clock();
  202. blkio_mark_blkg_waiting(&blkg->stats);
  203. }
  204. /* This should be called with the blkg->stats_lock held. */
  205. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  206. {
  207. unsigned long long now;
  208. if (!blkio_blkg_waiting(stats))
  209. return;
  210. now = sched_clock();
  211. if (time_after64(now, stats->start_group_wait_time))
  212. stats->group_wait_time += now - stats->start_group_wait_time;
  213. blkio_clear_blkg_waiting(stats);
  214. }
  215. /* This should be called with the blkg->stats_lock held. */
  216. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  217. {
  218. unsigned long long now;
  219. if (!blkio_blkg_empty(stats))
  220. return;
  221. now = sched_clock();
  222. if (time_after64(now, stats->start_empty_time))
  223. stats->empty_time += now - stats->start_empty_time;
  224. blkio_clear_blkg_empty(stats);
  225. }
  226. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  227. {
  228. unsigned long flags;
  229. spin_lock_irqsave(&blkg->stats_lock, flags);
  230. BUG_ON(blkio_blkg_idling(&blkg->stats));
  231. blkg->stats.start_idle_time = sched_clock();
  232. blkio_mark_blkg_idling(&blkg->stats);
  233. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  234. }
  235. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  236. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  237. {
  238. unsigned long flags;
  239. unsigned long long now;
  240. struct blkio_group_stats *stats;
  241. spin_lock_irqsave(&blkg->stats_lock, flags);
  242. stats = &blkg->stats;
  243. if (blkio_blkg_idling(stats)) {
  244. now = sched_clock();
  245. if (time_after64(now, stats->start_idle_time))
  246. stats->idle_time += now - stats->start_idle_time;
  247. blkio_clear_blkg_idling(stats);
  248. }
  249. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  250. }
  251. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  252. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  253. {
  254. unsigned long flags;
  255. struct blkio_group_stats *stats;
  256. spin_lock_irqsave(&blkg->stats_lock, flags);
  257. stats = &blkg->stats;
  258. stats->avg_queue_size_sum +=
  259. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  260. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  261. stats->avg_queue_size_samples++;
  262. blkio_update_group_wait_time(stats);
  263. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  264. }
  265. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  266. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  267. {
  268. unsigned long flags;
  269. struct blkio_group_stats *stats;
  270. spin_lock_irqsave(&blkg->stats_lock, flags);
  271. stats = &blkg->stats;
  272. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  273. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  274. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  275. return;
  276. }
  277. /*
  278. * group is already marked empty. This can happen if cfqq got new
  279. * request in parent group and moved to this group while being added
  280. * to service tree. Just ignore the event and move on.
  281. */
  282. if(blkio_blkg_empty(stats)) {
  283. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  284. return;
  285. }
  286. stats->start_empty_time = sched_clock();
  287. blkio_mark_blkg_empty(stats);
  288. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  289. }
  290. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  291. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  292. unsigned long dequeue)
  293. {
  294. blkg->stats.dequeue += dequeue;
  295. }
  296. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  297. #else
  298. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  299. struct blkio_group *curr_blkg) {}
  300. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  301. #endif
  302. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  303. struct blkio_group *curr_blkg, bool direction,
  304. bool sync)
  305. {
  306. unsigned long flags;
  307. spin_lock_irqsave(&blkg->stats_lock, flags);
  308. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  309. sync);
  310. blkio_end_empty_time(&blkg->stats);
  311. blkio_set_start_group_wait_time(blkg, curr_blkg);
  312. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  313. }
  314. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  315. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  316. bool direction, bool sync)
  317. {
  318. unsigned long flags;
  319. spin_lock_irqsave(&blkg->stats_lock, flags);
  320. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  321. direction, sync);
  322. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  323. }
  324. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  325. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  326. unsigned long unaccounted_time)
  327. {
  328. unsigned long flags;
  329. spin_lock_irqsave(&blkg->stats_lock, flags);
  330. blkg->stats.time += time;
  331. #ifdef CONFIG_DEBUG_BLK_CGROUP
  332. blkg->stats.unaccounted_time += unaccounted_time;
  333. #endif
  334. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  335. }
  336. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  337. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  338. uint64_t bytes, bool direction, bool sync)
  339. {
  340. struct blkio_group_stats *stats;
  341. unsigned long flags;
  342. spin_lock_irqsave(&blkg->stats_lock, flags);
  343. stats = &blkg->stats;
  344. stats->sectors += bytes >> 9;
  345. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
  346. sync);
  347. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
  348. direction, sync);
  349. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  350. }
  351. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  352. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  353. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  354. {
  355. struct blkio_group_stats *stats;
  356. unsigned long flags;
  357. unsigned long long now = sched_clock();
  358. spin_lock_irqsave(&blkg->stats_lock, flags);
  359. stats = &blkg->stats;
  360. if (time_after64(now, io_start_time))
  361. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  362. now - io_start_time, direction, sync);
  363. if (time_after64(io_start_time, start_time))
  364. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  365. io_start_time - start_time, direction, sync);
  366. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  367. }
  368. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  369. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  370. bool sync)
  371. {
  372. unsigned long flags;
  373. spin_lock_irqsave(&blkg->stats_lock, flags);
  374. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
  375. sync);
  376. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  377. }
  378. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  379. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  380. struct blkio_group *blkg, void *key, dev_t dev,
  381. enum blkio_policy_id plid)
  382. {
  383. unsigned long flags;
  384. spin_lock_irqsave(&blkcg->lock, flags);
  385. spin_lock_init(&blkg->stats_lock);
  386. rcu_assign_pointer(blkg->key, key);
  387. blkg->blkcg_id = css_id(&blkcg->css);
  388. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  389. blkg->plid = plid;
  390. spin_unlock_irqrestore(&blkcg->lock, flags);
  391. /* Need to take css reference ? */
  392. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  393. blkg->dev = dev;
  394. }
  395. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  396. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  397. {
  398. hlist_del_init_rcu(&blkg->blkcg_node);
  399. blkg->blkcg_id = 0;
  400. }
  401. /*
  402. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  403. * indicating that blk_group was unhashed by the time we got to it.
  404. */
  405. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  406. {
  407. struct blkio_cgroup *blkcg;
  408. unsigned long flags;
  409. struct cgroup_subsys_state *css;
  410. int ret = 1;
  411. rcu_read_lock();
  412. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  413. if (css) {
  414. blkcg = container_of(css, struct blkio_cgroup, css);
  415. spin_lock_irqsave(&blkcg->lock, flags);
  416. if (!hlist_unhashed(&blkg->blkcg_node)) {
  417. __blkiocg_del_blkio_group(blkg);
  418. ret = 0;
  419. }
  420. spin_unlock_irqrestore(&blkcg->lock, flags);
  421. }
  422. rcu_read_unlock();
  423. return ret;
  424. }
  425. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  426. /* called under rcu_read_lock(). */
  427. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  428. {
  429. struct blkio_group *blkg;
  430. struct hlist_node *n;
  431. void *__key;
  432. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  433. __key = blkg->key;
  434. if (__key == key)
  435. return blkg;
  436. }
  437. return NULL;
  438. }
  439. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  440. static int
  441. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  442. {
  443. struct blkio_cgroup *blkcg;
  444. struct blkio_group *blkg;
  445. struct blkio_group_stats *stats;
  446. struct hlist_node *n;
  447. uint64_t queued[BLKIO_STAT_TOTAL];
  448. int i;
  449. #ifdef CONFIG_DEBUG_BLK_CGROUP
  450. bool idling, waiting, empty;
  451. unsigned long long now = sched_clock();
  452. #endif
  453. blkcg = cgroup_to_blkio_cgroup(cgroup);
  454. spin_lock_irq(&blkcg->lock);
  455. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  456. spin_lock(&blkg->stats_lock);
  457. stats = &blkg->stats;
  458. #ifdef CONFIG_DEBUG_BLK_CGROUP
  459. idling = blkio_blkg_idling(stats);
  460. waiting = blkio_blkg_waiting(stats);
  461. empty = blkio_blkg_empty(stats);
  462. #endif
  463. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  464. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  465. memset(stats, 0, sizeof(struct blkio_group_stats));
  466. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  467. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  468. #ifdef CONFIG_DEBUG_BLK_CGROUP
  469. if (idling) {
  470. blkio_mark_blkg_idling(stats);
  471. stats->start_idle_time = now;
  472. }
  473. if (waiting) {
  474. blkio_mark_blkg_waiting(stats);
  475. stats->start_group_wait_time = now;
  476. }
  477. if (empty) {
  478. blkio_mark_blkg_empty(stats);
  479. stats->start_empty_time = now;
  480. }
  481. #endif
  482. spin_unlock(&blkg->stats_lock);
  483. }
  484. spin_unlock_irq(&blkcg->lock);
  485. return 0;
  486. }
  487. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  488. int chars_left, bool diskname_only)
  489. {
  490. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  491. chars_left -= strlen(str);
  492. if (chars_left <= 0) {
  493. printk(KERN_WARNING
  494. "Possibly incorrect cgroup stat display format");
  495. return;
  496. }
  497. if (diskname_only)
  498. return;
  499. switch (type) {
  500. case BLKIO_STAT_READ:
  501. strlcat(str, " Read", chars_left);
  502. break;
  503. case BLKIO_STAT_WRITE:
  504. strlcat(str, " Write", chars_left);
  505. break;
  506. case BLKIO_STAT_SYNC:
  507. strlcat(str, " Sync", chars_left);
  508. break;
  509. case BLKIO_STAT_ASYNC:
  510. strlcat(str, " Async", chars_left);
  511. break;
  512. case BLKIO_STAT_TOTAL:
  513. strlcat(str, " Total", chars_left);
  514. break;
  515. default:
  516. strlcat(str, " Invalid", chars_left);
  517. }
  518. }
  519. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  520. struct cgroup_map_cb *cb, dev_t dev)
  521. {
  522. blkio_get_key_name(0, dev, str, chars_left, true);
  523. cb->fill(cb, str, val);
  524. return val;
  525. }
  526. /* This should be called with blkg->stats_lock held */
  527. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  528. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  529. {
  530. uint64_t disk_total;
  531. char key_str[MAX_KEY_LEN];
  532. enum stat_sub_type sub_type;
  533. if (type == BLKIO_STAT_TIME)
  534. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  535. blkg->stats.time, cb, dev);
  536. if (type == BLKIO_STAT_SECTORS)
  537. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  538. blkg->stats.sectors, cb, dev);
  539. #ifdef CONFIG_DEBUG_BLK_CGROUP
  540. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  541. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  542. blkg->stats.unaccounted_time, cb, dev);
  543. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  544. uint64_t sum = blkg->stats.avg_queue_size_sum;
  545. uint64_t samples = blkg->stats.avg_queue_size_samples;
  546. if (samples)
  547. do_div(sum, samples);
  548. else
  549. sum = 0;
  550. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  551. }
  552. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  553. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  554. blkg->stats.group_wait_time, cb, dev);
  555. if (type == BLKIO_STAT_IDLE_TIME)
  556. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  557. blkg->stats.idle_time, cb, dev);
  558. if (type == BLKIO_STAT_EMPTY_TIME)
  559. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  560. blkg->stats.empty_time, cb, dev);
  561. if (type == BLKIO_STAT_DEQUEUE)
  562. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  563. blkg->stats.dequeue, cb, dev);
  564. #endif
  565. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  566. sub_type++) {
  567. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  568. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  569. }
  570. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  571. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  572. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  573. cb->fill(cb, key_str, disk_total);
  574. return disk_total;
  575. }
  576. static int blkio_check_dev_num(dev_t dev)
  577. {
  578. int part = 0;
  579. struct gendisk *disk;
  580. disk = get_gendisk(dev, &part);
  581. if (!disk || part)
  582. return -ENODEV;
  583. return 0;
  584. }
  585. static int blkio_policy_parse_and_set(char *buf,
  586. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  587. {
  588. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  589. int ret;
  590. unsigned long major, minor, temp;
  591. int i = 0;
  592. dev_t dev;
  593. u64 bps, iops;
  594. memset(s, 0, sizeof(s));
  595. while ((p = strsep(&buf, " ")) != NULL) {
  596. if (!*p)
  597. continue;
  598. s[i++] = p;
  599. /* Prevent from inputing too many things */
  600. if (i == 3)
  601. break;
  602. }
  603. if (i != 2)
  604. return -EINVAL;
  605. p = strsep(&s[0], ":");
  606. if (p != NULL)
  607. major_s = p;
  608. else
  609. return -EINVAL;
  610. minor_s = s[0];
  611. if (!minor_s)
  612. return -EINVAL;
  613. ret = strict_strtoul(major_s, 10, &major);
  614. if (ret)
  615. return -EINVAL;
  616. ret = strict_strtoul(minor_s, 10, &minor);
  617. if (ret)
  618. return -EINVAL;
  619. dev = MKDEV(major, minor);
  620. ret = blkio_check_dev_num(dev);
  621. if (ret)
  622. return ret;
  623. newpn->dev = dev;
  624. if (s[1] == NULL)
  625. return -EINVAL;
  626. switch (plid) {
  627. case BLKIO_POLICY_PROP:
  628. ret = strict_strtoul(s[1], 10, &temp);
  629. if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  630. temp > BLKIO_WEIGHT_MAX)
  631. return -EINVAL;
  632. newpn->plid = plid;
  633. newpn->fileid = fileid;
  634. newpn->val.weight = temp;
  635. break;
  636. case BLKIO_POLICY_THROTL:
  637. switch(fileid) {
  638. case BLKIO_THROTL_read_bps_device:
  639. case BLKIO_THROTL_write_bps_device:
  640. ret = strict_strtoull(s[1], 10, &bps);
  641. if (ret)
  642. return -EINVAL;
  643. newpn->plid = plid;
  644. newpn->fileid = fileid;
  645. newpn->val.bps = bps;
  646. break;
  647. case BLKIO_THROTL_read_iops_device:
  648. case BLKIO_THROTL_write_iops_device:
  649. ret = strict_strtoull(s[1], 10, &iops);
  650. if (ret)
  651. return -EINVAL;
  652. if (iops > THROTL_IOPS_MAX)
  653. return -EINVAL;
  654. newpn->plid = plid;
  655. newpn->fileid = fileid;
  656. newpn->val.iops = (unsigned int)iops;
  657. break;
  658. }
  659. break;
  660. default:
  661. BUG();
  662. }
  663. return 0;
  664. }
  665. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  666. dev_t dev)
  667. {
  668. struct blkio_policy_node *pn;
  669. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  670. BLKIO_PROP_weight_device);
  671. if (pn)
  672. return pn->val.weight;
  673. else
  674. return blkcg->weight;
  675. }
  676. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  677. uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
  678. {
  679. struct blkio_policy_node *pn;
  680. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  681. BLKIO_THROTL_read_bps_device);
  682. if (pn)
  683. return pn->val.bps;
  684. else
  685. return -1;
  686. }
  687. uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
  688. {
  689. struct blkio_policy_node *pn;
  690. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  691. BLKIO_THROTL_write_bps_device);
  692. if (pn)
  693. return pn->val.bps;
  694. else
  695. return -1;
  696. }
  697. unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
  698. {
  699. struct blkio_policy_node *pn;
  700. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  701. BLKIO_THROTL_read_iops_device);
  702. if (pn)
  703. return pn->val.iops;
  704. else
  705. return -1;
  706. }
  707. unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
  708. {
  709. struct blkio_policy_node *pn;
  710. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  711. BLKIO_THROTL_write_iops_device);
  712. if (pn)
  713. return pn->val.iops;
  714. else
  715. return -1;
  716. }
  717. /* Checks whether user asked for deleting a policy rule */
  718. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  719. {
  720. switch(pn->plid) {
  721. case BLKIO_POLICY_PROP:
  722. if (pn->val.weight == 0)
  723. return 1;
  724. break;
  725. case BLKIO_POLICY_THROTL:
  726. switch(pn->fileid) {
  727. case BLKIO_THROTL_read_bps_device:
  728. case BLKIO_THROTL_write_bps_device:
  729. if (pn->val.bps == 0)
  730. return 1;
  731. break;
  732. case BLKIO_THROTL_read_iops_device:
  733. case BLKIO_THROTL_write_iops_device:
  734. if (pn->val.iops == 0)
  735. return 1;
  736. }
  737. break;
  738. default:
  739. BUG();
  740. }
  741. return 0;
  742. }
  743. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  744. struct blkio_policy_node *newpn)
  745. {
  746. switch(oldpn->plid) {
  747. case BLKIO_POLICY_PROP:
  748. oldpn->val.weight = newpn->val.weight;
  749. break;
  750. case BLKIO_POLICY_THROTL:
  751. switch(newpn->fileid) {
  752. case BLKIO_THROTL_read_bps_device:
  753. case BLKIO_THROTL_write_bps_device:
  754. oldpn->val.bps = newpn->val.bps;
  755. break;
  756. case BLKIO_THROTL_read_iops_device:
  757. case BLKIO_THROTL_write_iops_device:
  758. oldpn->val.iops = newpn->val.iops;
  759. }
  760. break;
  761. default:
  762. BUG();
  763. }
  764. }
  765. /*
  766. * Some rules/values in blkg have changed. Propagate those to respective
  767. * policies.
  768. */
  769. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  770. struct blkio_group *blkg, struct blkio_policy_node *pn)
  771. {
  772. unsigned int weight, iops;
  773. u64 bps;
  774. switch(pn->plid) {
  775. case BLKIO_POLICY_PROP:
  776. weight = pn->val.weight ? pn->val.weight :
  777. blkcg->weight;
  778. blkio_update_group_weight(blkg, weight);
  779. break;
  780. case BLKIO_POLICY_THROTL:
  781. switch(pn->fileid) {
  782. case BLKIO_THROTL_read_bps_device:
  783. case BLKIO_THROTL_write_bps_device:
  784. bps = pn->val.bps ? pn->val.bps : (-1);
  785. blkio_update_group_bps(blkg, bps, pn->fileid);
  786. break;
  787. case BLKIO_THROTL_read_iops_device:
  788. case BLKIO_THROTL_write_iops_device:
  789. iops = pn->val.iops ? pn->val.iops : (-1);
  790. blkio_update_group_iops(blkg, iops, pn->fileid);
  791. break;
  792. }
  793. break;
  794. default:
  795. BUG();
  796. }
  797. }
  798. /*
  799. * A policy node rule has been updated. Propagate this update to all the
  800. * block groups which might be affected by this update.
  801. */
  802. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  803. struct blkio_policy_node *pn)
  804. {
  805. struct blkio_group *blkg;
  806. struct hlist_node *n;
  807. spin_lock(&blkio_list_lock);
  808. spin_lock_irq(&blkcg->lock);
  809. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  810. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  811. continue;
  812. blkio_update_blkg_policy(blkcg, blkg, pn);
  813. }
  814. spin_unlock_irq(&blkcg->lock);
  815. spin_unlock(&blkio_list_lock);
  816. }
  817. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  818. const char *buffer)
  819. {
  820. int ret = 0;
  821. char *buf;
  822. struct blkio_policy_node *newpn, *pn;
  823. struct blkio_cgroup *blkcg;
  824. int keep_newpn = 0;
  825. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  826. int fileid = BLKIOFILE_ATTR(cft->private);
  827. buf = kstrdup(buffer, GFP_KERNEL);
  828. if (!buf)
  829. return -ENOMEM;
  830. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  831. if (!newpn) {
  832. ret = -ENOMEM;
  833. goto free_buf;
  834. }
  835. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  836. if (ret)
  837. goto free_newpn;
  838. blkcg = cgroup_to_blkio_cgroup(cgrp);
  839. spin_lock_irq(&blkcg->lock);
  840. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  841. if (!pn) {
  842. if (!blkio_delete_rule_command(newpn)) {
  843. blkio_policy_insert_node(blkcg, newpn);
  844. keep_newpn = 1;
  845. }
  846. spin_unlock_irq(&blkcg->lock);
  847. goto update_io_group;
  848. }
  849. if (blkio_delete_rule_command(newpn)) {
  850. blkio_policy_delete_node(pn);
  851. spin_unlock_irq(&blkcg->lock);
  852. goto update_io_group;
  853. }
  854. spin_unlock_irq(&blkcg->lock);
  855. blkio_update_policy_rule(pn, newpn);
  856. update_io_group:
  857. blkio_update_policy_node_blkg(blkcg, newpn);
  858. free_newpn:
  859. if (!keep_newpn)
  860. kfree(newpn);
  861. free_buf:
  862. kfree(buf);
  863. return ret;
  864. }
  865. static void
  866. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  867. {
  868. switch(pn->plid) {
  869. case BLKIO_POLICY_PROP:
  870. if (pn->fileid == BLKIO_PROP_weight_device)
  871. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  872. MINOR(pn->dev), pn->val.weight);
  873. break;
  874. case BLKIO_POLICY_THROTL:
  875. switch(pn->fileid) {
  876. case BLKIO_THROTL_read_bps_device:
  877. case BLKIO_THROTL_write_bps_device:
  878. seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
  879. MINOR(pn->dev), pn->val.bps);
  880. break;
  881. case BLKIO_THROTL_read_iops_device:
  882. case BLKIO_THROTL_write_iops_device:
  883. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  884. MINOR(pn->dev), pn->val.iops);
  885. break;
  886. }
  887. break;
  888. default:
  889. BUG();
  890. }
  891. }
  892. /* cgroup files which read their data from policy nodes end up here */
  893. static void blkio_read_policy_node_files(struct cftype *cft,
  894. struct blkio_cgroup *blkcg, struct seq_file *m)
  895. {
  896. struct blkio_policy_node *pn;
  897. if (!list_empty(&blkcg->policy_list)) {
  898. spin_lock_irq(&blkcg->lock);
  899. list_for_each_entry(pn, &blkcg->policy_list, node) {
  900. if (!pn_matches_cftype(cft, pn))
  901. continue;
  902. blkio_print_policy_node(m, pn);
  903. }
  904. spin_unlock_irq(&blkcg->lock);
  905. }
  906. }
  907. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  908. struct seq_file *m)
  909. {
  910. struct blkio_cgroup *blkcg;
  911. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  912. int name = BLKIOFILE_ATTR(cft->private);
  913. blkcg = cgroup_to_blkio_cgroup(cgrp);
  914. switch(plid) {
  915. case BLKIO_POLICY_PROP:
  916. switch(name) {
  917. case BLKIO_PROP_weight_device:
  918. blkio_read_policy_node_files(cft, blkcg, m);
  919. return 0;
  920. default:
  921. BUG();
  922. }
  923. break;
  924. case BLKIO_POLICY_THROTL:
  925. switch(name){
  926. case BLKIO_THROTL_read_bps_device:
  927. case BLKIO_THROTL_write_bps_device:
  928. case BLKIO_THROTL_read_iops_device:
  929. case BLKIO_THROTL_write_iops_device:
  930. blkio_read_policy_node_files(cft, blkcg, m);
  931. return 0;
  932. default:
  933. BUG();
  934. }
  935. break;
  936. default:
  937. BUG();
  938. }
  939. return 0;
  940. }
  941. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  942. struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
  943. bool show_total)
  944. {
  945. struct blkio_group *blkg;
  946. struct hlist_node *n;
  947. uint64_t cgroup_total = 0;
  948. rcu_read_lock();
  949. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  950. if (blkg->dev) {
  951. if (!cftype_blkg_same_policy(cft, blkg))
  952. continue;
  953. spin_lock_irq(&blkg->stats_lock);
  954. cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
  955. type);
  956. spin_unlock_irq(&blkg->stats_lock);
  957. }
  958. }
  959. if (show_total)
  960. cb->fill(cb, "Total", cgroup_total);
  961. rcu_read_unlock();
  962. return 0;
  963. }
  964. /* All map kind of cgroup file get serviced by this function */
  965. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  966. struct cgroup_map_cb *cb)
  967. {
  968. struct blkio_cgroup *blkcg;
  969. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  970. int name = BLKIOFILE_ATTR(cft->private);
  971. blkcg = cgroup_to_blkio_cgroup(cgrp);
  972. switch(plid) {
  973. case BLKIO_POLICY_PROP:
  974. switch(name) {
  975. case BLKIO_PROP_time:
  976. return blkio_read_blkg_stats(blkcg, cft, cb,
  977. BLKIO_STAT_TIME, 0);
  978. case BLKIO_PROP_sectors:
  979. return blkio_read_blkg_stats(blkcg, cft, cb,
  980. BLKIO_STAT_SECTORS, 0);
  981. case BLKIO_PROP_io_service_bytes:
  982. return blkio_read_blkg_stats(blkcg, cft, cb,
  983. BLKIO_STAT_SERVICE_BYTES, 1);
  984. case BLKIO_PROP_io_serviced:
  985. return blkio_read_blkg_stats(blkcg, cft, cb,
  986. BLKIO_STAT_SERVICED, 1);
  987. case BLKIO_PROP_io_service_time:
  988. return blkio_read_blkg_stats(blkcg, cft, cb,
  989. BLKIO_STAT_SERVICE_TIME, 1);
  990. case BLKIO_PROP_io_wait_time:
  991. return blkio_read_blkg_stats(blkcg, cft, cb,
  992. BLKIO_STAT_WAIT_TIME, 1);
  993. case BLKIO_PROP_io_merged:
  994. return blkio_read_blkg_stats(blkcg, cft, cb,
  995. BLKIO_STAT_MERGED, 1);
  996. case BLKIO_PROP_io_queued:
  997. return blkio_read_blkg_stats(blkcg, cft, cb,
  998. BLKIO_STAT_QUEUED, 1);
  999. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1000. case BLKIO_PROP_unaccounted_time:
  1001. return blkio_read_blkg_stats(blkcg, cft, cb,
  1002. BLKIO_STAT_UNACCOUNTED_TIME, 0);
  1003. case BLKIO_PROP_dequeue:
  1004. return blkio_read_blkg_stats(blkcg, cft, cb,
  1005. BLKIO_STAT_DEQUEUE, 0);
  1006. case BLKIO_PROP_avg_queue_size:
  1007. return blkio_read_blkg_stats(blkcg, cft, cb,
  1008. BLKIO_STAT_AVG_QUEUE_SIZE, 0);
  1009. case BLKIO_PROP_group_wait_time:
  1010. return blkio_read_blkg_stats(blkcg, cft, cb,
  1011. BLKIO_STAT_GROUP_WAIT_TIME, 0);
  1012. case BLKIO_PROP_idle_time:
  1013. return blkio_read_blkg_stats(blkcg, cft, cb,
  1014. BLKIO_STAT_IDLE_TIME, 0);
  1015. case BLKIO_PROP_empty_time:
  1016. return blkio_read_blkg_stats(blkcg, cft, cb,
  1017. BLKIO_STAT_EMPTY_TIME, 0);
  1018. #endif
  1019. default:
  1020. BUG();
  1021. }
  1022. break;
  1023. case BLKIO_POLICY_THROTL:
  1024. switch(name){
  1025. case BLKIO_THROTL_io_service_bytes:
  1026. return blkio_read_blkg_stats(blkcg, cft, cb,
  1027. BLKIO_STAT_SERVICE_BYTES, 1);
  1028. case BLKIO_THROTL_io_serviced:
  1029. return blkio_read_blkg_stats(blkcg, cft, cb,
  1030. BLKIO_STAT_SERVICED, 1);
  1031. default:
  1032. BUG();
  1033. }
  1034. break;
  1035. default:
  1036. BUG();
  1037. }
  1038. return 0;
  1039. }
  1040. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  1041. {
  1042. struct blkio_group *blkg;
  1043. struct hlist_node *n;
  1044. struct blkio_policy_node *pn;
  1045. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1046. return -EINVAL;
  1047. spin_lock(&blkio_list_lock);
  1048. spin_lock_irq(&blkcg->lock);
  1049. blkcg->weight = (unsigned int)val;
  1050. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1051. pn = blkio_policy_search_node(blkcg, blkg->dev,
  1052. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  1053. if (pn)
  1054. continue;
  1055. blkio_update_group_weight(blkg, blkcg->weight);
  1056. }
  1057. spin_unlock_irq(&blkcg->lock);
  1058. spin_unlock(&blkio_list_lock);
  1059. return 0;
  1060. }
  1061. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1062. struct blkio_cgroup *blkcg;
  1063. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1064. int name = BLKIOFILE_ATTR(cft->private);
  1065. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1066. switch(plid) {
  1067. case BLKIO_POLICY_PROP:
  1068. switch(name) {
  1069. case BLKIO_PROP_weight:
  1070. return (u64)blkcg->weight;
  1071. }
  1072. break;
  1073. default:
  1074. BUG();
  1075. }
  1076. return 0;
  1077. }
  1078. static int
  1079. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1080. {
  1081. struct blkio_cgroup *blkcg;
  1082. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1083. int name = BLKIOFILE_ATTR(cft->private);
  1084. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1085. switch(plid) {
  1086. case BLKIO_POLICY_PROP:
  1087. switch(name) {
  1088. case BLKIO_PROP_weight:
  1089. return blkio_weight_write(blkcg, val);
  1090. }
  1091. break;
  1092. default:
  1093. BUG();
  1094. }
  1095. return 0;
  1096. }
  1097. struct cftype blkio_files[] = {
  1098. {
  1099. .name = "weight_device",
  1100. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1101. BLKIO_PROP_weight_device),
  1102. .read_seq_string = blkiocg_file_read,
  1103. .write_string = blkiocg_file_write,
  1104. .max_write_len = 256,
  1105. },
  1106. {
  1107. .name = "weight",
  1108. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1109. BLKIO_PROP_weight),
  1110. .read_u64 = blkiocg_file_read_u64,
  1111. .write_u64 = blkiocg_file_write_u64,
  1112. },
  1113. {
  1114. .name = "time",
  1115. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1116. BLKIO_PROP_time),
  1117. .read_map = blkiocg_file_read_map,
  1118. },
  1119. {
  1120. .name = "sectors",
  1121. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1122. BLKIO_PROP_sectors),
  1123. .read_map = blkiocg_file_read_map,
  1124. },
  1125. {
  1126. .name = "io_service_bytes",
  1127. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1128. BLKIO_PROP_io_service_bytes),
  1129. .read_map = blkiocg_file_read_map,
  1130. },
  1131. {
  1132. .name = "io_serviced",
  1133. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1134. BLKIO_PROP_io_serviced),
  1135. .read_map = blkiocg_file_read_map,
  1136. },
  1137. {
  1138. .name = "io_service_time",
  1139. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1140. BLKIO_PROP_io_service_time),
  1141. .read_map = blkiocg_file_read_map,
  1142. },
  1143. {
  1144. .name = "io_wait_time",
  1145. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1146. BLKIO_PROP_io_wait_time),
  1147. .read_map = blkiocg_file_read_map,
  1148. },
  1149. {
  1150. .name = "io_merged",
  1151. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1152. BLKIO_PROP_io_merged),
  1153. .read_map = blkiocg_file_read_map,
  1154. },
  1155. {
  1156. .name = "io_queued",
  1157. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1158. BLKIO_PROP_io_queued),
  1159. .read_map = blkiocg_file_read_map,
  1160. },
  1161. {
  1162. .name = "reset_stats",
  1163. .write_u64 = blkiocg_reset_stats,
  1164. },
  1165. #ifdef CONFIG_BLK_DEV_THROTTLING
  1166. {
  1167. .name = "throttle.read_bps_device",
  1168. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1169. BLKIO_THROTL_read_bps_device),
  1170. .read_seq_string = blkiocg_file_read,
  1171. .write_string = blkiocg_file_write,
  1172. .max_write_len = 256,
  1173. },
  1174. {
  1175. .name = "throttle.write_bps_device",
  1176. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1177. BLKIO_THROTL_write_bps_device),
  1178. .read_seq_string = blkiocg_file_read,
  1179. .write_string = blkiocg_file_write,
  1180. .max_write_len = 256,
  1181. },
  1182. {
  1183. .name = "throttle.read_iops_device",
  1184. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1185. BLKIO_THROTL_read_iops_device),
  1186. .read_seq_string = blkiocg_file_read,
  1187. .write_string = blkiocg_file_write,
  1188. .max_write_len = 256,
  1189. },
  1190. {
  1191. .name = "throttle.write_iops_device",
  1192. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1193. BLKIO_THROTL_write_iops_device),
  1194. .read_seq_string = blkiocg_file_read,
  1195. .write_string = blkiocg_file_write,
  1196. .max_write_len = 256,
  1197. },
  1198. {
  1199. .name = "throttle.io_service_bytes",
  1200. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1201. BLKIO_THROTL_io_service_bytes),
  1202. .read_map = blkiocg_file_read_map,
  1203. },
  1204. {
  1205. .name = "throttle.io_serviced",
  1206. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1207. BLKIO_THROTL_io_serviced),
  1208. .read_map = blkiocg_file_read_map,
  1209. },
  1210. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1211. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1212. {
  1213. .name = "avg_queue_size",
  1214. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1215. BLKIO_PROP_avg_queue_size),
  1216. .read_map = blkiocg_file_read_map,
  1217. },
  1218. {
  1219. .name = "group_wait_time",
  1220. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1221. BLKIO_PROP_group_wait_time),
  1222. .read_map = blkiocg_file_read_map,
  1223. },
  1224. {
  1225. .name = "idle_time",
  1226. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1227. BLKIO_PROP_idle_time),
  1228. .read_map = blkiocg_file_read_map,
  1229. },
  1230. {
  1231. .name = "empty_time",
  1232. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1233. BLKIO_PROP_empty_time),
  1234. .read_map = blkiocg_file_read_map,
  1235. },
  1236. {
  1237. .name = "dequeue",
  1238. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1239. BLKIO_PROP_dequeue),
  1240. .read_map = blkiocg_file_read_map,
  1241. },
  1242. {
  1243. .name = "unaccounted_time",
  1244. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1245. BLKIO_PROP_unaccounted_time),
  1246. .read_map = blkiocg_file_read_map,
  1247. },
  1248. #endif
  1249. };
  1250. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1251. {
  1252. return cgroup_add_files(cgroup, subsys, blkio_files,
  1253. ARRAY_SIZE(blkio_files));
  1254. }
  1255. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1256. {
  1257. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1258. unsigned long flags;
  1259. struct blkio_group *blkg;
  1260. void *key;
  1261. struct blkio_policy_type *blkiop;
  1262. struct blkio_policy_node *pn, *pntmp;
  1263. rcu_read_lock();
  1264. do {
  1265. spin_lock_irqsave(&blkcg->lock, flags);
  1266. if (hlist_empty(&blkcg->blkg_list)) {
  1267. spin_unlock_irqrestore(&blkcg->lock, flags);
  1268. break;
  1269. }
  1270. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1271. blkcg_node);
  1272. key = rcu_dereference(blkg->key);
  1273. __blkiocg_del_blkio_group(blkg);
  1274. spin_unlock_irqrestore(&blkcg->lock, flags);
  1275. /*
  1276. * This blkio_group is being unlinked as associated cgroup is
  1277. * going away. Let all the IO controlling policies know about
  1278. * this event.
  1279. */
  1280. spin_lock(&blkio_list_lock);
  1281. list_for_each_entry(blkiop, &blkio_list, list) {
  1282. if (blkiop->plid != blkg->plid)
  1283. continue;
  1284. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1285. }
  1286. spin_unlock(&blkio_list_lock);
  1287. } while (1);
  1288. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1289. blkio_policy_delete_node(pn);
  1290. kfree(pn);
  1291. }
  1292. free_css_id(&blkio_subsys, &blkcg->css);
  1293. rcu_read_unlock();
  1294. if (blkcg != &blkio_root_cgroup)
  1295. kfree(blkcg);
  1296. }
  1297. static struct cgroup_subsys_state *
  1298. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1299. {
  1300. struct blkio_cgroup *blkcg;
  1301. struct cgroup *parent = cgroup->parent;
  1302. if (!parent) {
  1303. blkcg = &blkio_root_cgroup;
  1304. goto done;
  1305. }
  1306. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1307. if (!blkcg)
  1308. return ERR_PTR(-ENOMEM);
  1309. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1310. done:
  1311. spin_lock_init(&blkcg->lock);
  1312. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1313. INIT_LIST_HEAD(&blkcg->policy_list);
  1314. return &blkcg->css;
  1315. }
  1316. /*
  1317. * We cannot support shared io contexts, as we have no mean to support
  1318. * two tasks with the same ioc in two different groups without major rework
  1319. * of the main cic data structures. For now we allow a task to change
  1320. * its cgroup only if it's the only owner of its ioc.
  1321. */
  1322. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  1323. struct cgroup *cgroup, struct task_struct *tsk,
  1324. bool threadgroup)
  1325. {
  1326. struct io_context *ioc;
  1327. int ret = 0;
  1328. /* task_lock() is needed to avoid races with exit_io_context() */
  1329. task_lock(tsk);
  1330. ioc = tsk->io_context;
  1331. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1332. ret = -EINVAL;
  1333. task_unlock(tsk);
  1334. return ret;
  1335. }
  1336. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  1337. struct cgroup *prev, struct task_struct *tsk,
  1338. bool threadgroup)
  1339. {
  1340. struct io_context *ioc;
  1341. task_lock(tsk);
  1342. ioc = tsk->io_context;
  1343. if (ioc)
  1344. ioc->cgroup_changed = 1;
  1345. task_unlock(tsk);
  1346. }
  1347. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1348. {
  1349. spin_lock(&blkio_list_lock);
  1350. list_add_tail(&blkiop->list, &blkio_list);
  1351. spin_unlock(&blkio_list_lock);
  1352. }
  1353. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1354. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1355. {
  1356. spin_lock(&blkio_list_lock);
  1357. list_del_init(&blkiop->list);
  1358. spin_unlock(&blkio_list_lock);
  1359. }
  1360. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  1361. static int __init init_cgroup_blkio(void)
  1362. {
  1363. return cgroup_load_subsys(&blkio_subsys);
  1364. }
  1365. static void __exit exit_cgroup_blkio(void)
  1366. {
  1367. cgroup_unload_subsys(&blkio_subsys);
  1368. }
  1369. module_init(init_cgroup_blkio);
  1370. module_exit(exit_cgroup_blkio);
  1371. MODULE_LICENSE("GPL");