blk-cgroup.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include "blk-cgroup.h"
  23. #define MAX_KEY_LEN 100
  24. static DEFINE_SPINLOCK(blkio_list_lock);
  25. static LIST_HEAD(blkio_list);
  26. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  27. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  28. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  29. struct cgroup *);
  30. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  31. struct cgroup_taskset *);
  32. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  33. struct cgroup_taskset *);
  34. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  35. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  36. /* for encoding cft->private value on file */
  37. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  38. /* What policy owns the file, proportional or throttle */
  39. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  40. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  41. struct cgroup_subsys blkio_subsys = {
  42. .name = "blkio",
  43. .create = blkiocg_create,
  44. .can_attach = blkiocg_can_attach,
  45. .attach = blkiocg_attach,
  46. .destroy = blkiocg_destroy,
  47. .populate = blkiocg_populate,
  48. .subsys_id = blkio_subsys_id,
  49. .use_id = 1,
  50. .module = THIS_MODULE,
  51. };
  52. EXPORT_SYMBOL_GPL(blkio_subsys);
  53. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  54. struct blkio_policy_node *pn)
  55. {
  56. list_add(&pn->node, &blkcg->policy_list);
  57. }
  58. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  59. struct blkio_group *blkg)
  60. {
  61. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  62. if (blkg->plid == plid)
  63. return 1;
  64. return 0;
  65. }
  66. /* Determines if policy node matches cgroup file being accessed */
  67. static inline bool pn_matches_cftype(struct cftype *cft,
  68. struct blkio_policy_node *pn)
  69. {
  70. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  71. int fileid = BLKIOFILE_ATTR(cft->private);
  72. return (plid == pn->plid && fileid == pn->fileid);
  73. }
  74. /* Must be called with blkcg->lock held */
  75. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  76. {
  77. list_del(&pn->node);
  78. }
  79. /* Must be called with blkcg->lock held */
  80. static struct blkio_policy_node *
  81. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  82. enum blkio_policy_id plid, int fileid)
  83. {
  84. struct blkio_policy_node *pn;
  85. list_for_each_entry(pn, &blkcg->policy_list, node) {
  86. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  87. return pn;
  88. }
  89. return NULL;
  90. }
  91. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  92. {
  93. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  94. struct blkio_cgroup, css);
  95. }
  96. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  97. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  98. {
  99. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  100. struct blkio_cgroup, css);
  101. }
  102. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  103. static inline void
  104. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  105. {
  106. struct blkio_policy_type *blkiop;
  107. list_for_each_entry(blkiop, &blkio_list, list) {
  108. /* If this policy does not own the blkg, do not send updates */
  109. if (blkiop->plid != blkg->plid)
  110. continue;
  111. if (blkiop->ops.blkio_update_group_weight_fn)
  112. blkiop->ops.blkio_update_group_weight_fn(blkg->q,
  113. blkg, weight);
  114. }
  115. }
  116. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  117. int fileid)
  118. {
  119. struct blkio_policy_type *blkiop;
  120. list_for_each_entry(blkiop, &blkio_list, list) {
  121. /* If this policy does not own the blkg, do not send updates */
  122. if (blkiop->plid != blkg->plid)
  123. continue;
  124. if (fileid == BLKIO_THROTL_read_bps_device
  125. && blkiop->ops.blkio_update_group_read_bps_fn)
  126. blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
  127. blkg, bps);
  128. if (fileid == BLKIO_THROTL_write_bps_device
  129. && blkiop->ops.blkio_update_group_write_bps_fn)
  130. blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
  131. blkg, bps);
  132. }
  133. }
  134. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  135. unsigned int iops, int fileid)
  136. {
  137. struct blkio_policy_type *blkiop;
  138. list_for_each_entry(blkiop, &blkio_list, list) {
  139. /* If this policy does not own the blkg, do not send updates */
  140. if (blkiop->plid != blkg->plid)
  141. continue;
  142. if (fileid == BLKIO_THROTL_read_iops_device
  143. && blkiop->ops.blkio_update_group_read_iops_fn)
  144. blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
  145. blkg, iops);
  146. if (fileid == BLKIO_THROTL_write_iops_device
  147. && blkiop->ops.blkio_update_group_write_iops_fn)
  148. blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
  149. blkg,iops);
  150. }
  151. }
  152. /*
  153. * Add to the appropriate stat variable depending on the request type.
  154. * This should be called with the blkg->stats_lock held.
  155. */
  156. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  157. bool sync)
  158. {
  159. if (direction)
  160. stat[BLKIO_STAT_WRITE] += add;
  161. else
  162. stat[BLKIO_STAT_READ] += add;
  163. if (sync)
  164. stat[BLKIO_STAT_SYNC] += add;
  165. else
  166. stat[BLKIO_STAT_ASYNC] += add;
  167. }
  168. /*
  169. * Decrements the appropriate stat variable if non-zero depending on the
  170. * request type. Panics on value being zero.
  171. * This should be called with the blkg->stats_lock held.
  172. */
  173. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  174. {
  175. if (direction) {
  176. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  177. stat[BLKIO_STAT_WRITE]--;
  178. } else {
  179. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  180. stat[BLKIO_STAT_READ]--;
  181. }
  182. if (sync) {
  183. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  184. stat[BLKIO_STAT_SYNC]--;
  185. } else {
  186. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  187. stat[BLKIO_STAT_ASYNC]--;
  188. }
  189. }
  190. #ifdef CONFIG_DEBUG_BLK_CGROUP
  191. /* This should be called with the blkg->stats_lock held. */
  192. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  193. struct blkio_group *curr_blkg)
  194. {
  195. if (blkio_blkg_waiting(&blkg->stats))
  196. return;
  197. if (blkg == curr_blkg)
  198. return;
  199. blkg->stats.start_group_wait_time = sched_clock();
  200. blkio_mark_blkg_waiting(&blkg->stats);
  201. }
  202. /* This should be called with the blkg->stats_lock held. */
  203. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  204. {
  205. unsigned long long now;
  206. if (!blkio_blkg_waiting(stats))
  207. return;
  208. now = sched_clock();
  209. if (time_after64(now, stats->start_group_wait_time))
  210. stats->group_wait_time += now - stats->start_group_wait_time;
  211. blkio_clear_blkg_waiting(stats);
  212. }
  213. /* This should be called with the blkg->stats_lock held. */
  214. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  215. {
  216. unsigned long long now;
  217. if (!blkio_blkg_empty(stats))
  218. return;
  219. now = sched_clock();
  220. if (time_after64(now, stats->start_empty_time))
  221. stats->empty_time += now - stats->start_empty_time;
  222. blkio_clear_blkg_empty(stats);
  223. }
  224. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  225. {
  226. unsigned long flags;
  227. spin_lock_irqsave(&blkg->stats_lock, flags);
  228. BUG_ON(blkio_blkg_idling(&blkg->stats));
  229. blkg->stats.start_idle_time = sched_clock();
  230. blkio_mark_blkg_idling(&blkg->stats);
  231. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  232. }
  233. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  234. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  235. {
  236. unsigned long flags;
  237. unsigned long long now;
  238. struct blkio_group_stats *stats;
  239. spin_lock_irqsave(&blkg->stats_lock, flags);
  240. stats = &blkg->stats;
  241. if (blkio_blkg_idling(stats)) {
  242. now = sched_clock();
  243. if (time_after64(now, stats->start_idle_time))
  244. stats->idle_time += now - stats->start_idle_time;
  245. blkio_clear_blkg_idling(stats);
  246. }
  247. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  248. }
  249. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  250. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  251. {
  252. unsigned long flags;
  253. struct blkio_group_stats *stats;
  254. spin_lock_irqsave(&blkg->stats_lock, flags);
  255. stats = &blkg->stats;
  256. stats->avg_queue_size_sum +=
  257. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  258. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  259. stats->avg_queue_size_samples++;
  260. blkio_update_group_wait_time(stats);
  261. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  262. }
  263. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  264. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  265. {
  266. unsigned long flags;
  267. struct blkio_group_stats *stats;
  268. spin_lock_irqsave(&blkg->stats_lock, flags);
  269. stats = &blkg->stats;
  270. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  271. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  272. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  273. return;
  274. }
  275. /*
  276. * group is already marked empty. This can happen if cfqq got new
  277. * request in parent group and moved to this group while being added
  278. * to service tree. Just ignore the event and move on.
  279. */
  280. if(blkio_blkg_empty(stats)) {
  281. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  282. return;
  283. }
  284. stats->start_empty_time = sched_clock();
  285. blkio_mark_blkg_empty(stats);
  286. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  287. }
  288. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  289. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  290. unsigned long dequeue)
  291. {
  292. blkg->stats.dequeue += dequeue;
  293. }
  294. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  295. #else
  296. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  297. struct blkio_group *curr_blkg) {}
  298. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  299. #endif
  300. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  301. struct blkio_group *curr_blkg, bool direction,
  302. bool sync)
  303. {
  304. unsigned long flags;
  305. spin_lock_irqsave(&blkg->stats_lock, flags);
  306. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  307. sync);
  308. blkio_end_empty_time(&blkg->stats);
  309. blkio_set_start_group_wait_time(blkg, curr_blkg);
  310. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  311. }
  312. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  313. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  314. bool direction, bool sync)
  315. {
  316. unsigned long flags;
  317. spin_lock_irqsave(&blkg->stats_lock, flags);
  318. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  319. direction, sync);
  320. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  321. }
  322. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  323. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  324. unsigned long unaccounted_time)
  325. {
  326. unsigned long flags;
  327. spin_lock_irqsave(&blkg->stats_lock, flags);
  328. blkg->stats.time += time;
  329. #ifdef CONFIG_DEBUG_BLK_CGROUP
  330. blkg->stats.unaccounted_time += unaccounted_time;
  331. #endif
  332. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  333. }
  334. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  335. /*
  336. * should be called under rcu read lock or queue lock to make sure blkg pointer
  337. * is valid.
  338. */
  339. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  340. uint64_t bytes, bool direction, bool sync)
  341. {
  342. struct blkio_group_stats_cpu *stats_cpu;
  343. unsigned long flags;
  344. /*
  345. * Disabling interrupts to provide mutual exclusion between two
  346. * writes on same cpu. It probably is not needed for 64bit. Not
  347. * optimizing that case yet.
  348. */
  349. local_irq_save(flags);
  350. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  351. u64_stats_update_begin(&stats_cpu->syncp);
  352. stats_cpu->sectors += bytes >> 9;
  353. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  354. 1, direction, sync);
  355. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  356. bytes, direction, sync);
  357. u64_stats_update_end(&stats_cpu->syncp);
  358. local_irq_restore(flags);
  359. }
  360. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  361. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  362. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  363. {
  364. struct blkio_group_stats *stats;
  365. unsigned long flags;
  366. unsigned long long now = sched_clock();
  367. spin_lock_irqsave(&blkg->stats_lock, flags);
  368. stats = &blkg->stats;
  369. if (time_after64(now, io_start_time))
  370. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  371. now - io_start_time, direction, sync);
  372. if (time_after64(io_start_time, start_time))
  373. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  374. io_start_time - start_time, direction, sync);
  375. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  376. }
  377. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  378. /* Merged stats are per cpu. */
  379. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  380. bool sync)
  381. {
  382. struct blkio_group_stats_cpu *stats_cpu;
  383. unsigned long flags;
  384. /*
  385. * Disabling interrupts to provide mutual exclusion between two
  386. * writes on same cpu. It probably is not needed for 64bit. Not
  387. * optimizing that case yet.
  388. */
  389. local_irq_save(flags);
  390. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  391. u64_stats_update_begin(&stats_cpu->syncp);
  392. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  393. direction, sync);
  394. u64_stats_update_end(&stats_cpu->syncp);
  395. local_irq_restore(flags);
  396. }
  397. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  398. /*
  399. * This function allocates the per cpu stats for blkio_group. Should be called
  400. * from sleepable context as alloc_per_cpu() requires that.
  401. */
  402. int blkio_alloc_blkg_stats(struct blkio_group *blkg)
  403. {
  404. /* Allocate memory for per cpu stats */
  405. blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  406. if (!blkg->stats_cpu)
  407. return -ENOMEM;
  408. return 0;
  409. }
  410. EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
  411. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  412. struct blkio_group *blkg, struct request_queue *q, dev_t dev,
  413. enum blkio_policy_id plid)
  414. {
  415. unsigned long flags;
  416. spin_lock_irqsave(&blkcg->lock, flags);
  417. spin_lock_init(&blkg->stats_lock);
  418. rcu_assign_pointer(blkg->q, q);
  419. blkg->blkcg_id = css_id(&blkcg->css);
  420. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  421. blkg->plid = plid;
  422. spin_unlock_irqrestore(&blkcg->lock, flags);
  423. /* Need to take css reference ? */
  424. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  425. blkg->dev = dev;
  426. }
  427. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  428. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  429. {
  430. hlist_del_init_rcu(&blkg->blkcg_node);
  431. blkg->blkcg_id = 0;
  432. }
  433. /*
  434. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  435. * indicating that blk_group was unhashed by the time we got to it.
  436. */
  437. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  438. {
  439. struct blkio_cgroup *blkcg;
  440. unsigned long flags;
  441. struct cgroup_subsys_state *css;
  442. int ret = 1;
  443. rcu_read_lock();
  444. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  445. if (css) {
  446. blkcg = container_of(css, struct blkio_cgroup, css);
  447. spin_lock_irqsave(&blkcg->lock, flags);
  448. if (!hlist_unhashed(&blkg->blkcg_node)) {
  449. __blkiocg_del_blkio_group(blkg);
  450. ret = 0;
  451. }
  452. spin_unlock_irqrestore(&blkcg->lock, flags);
  453. }
  454. rcu_read_unlock();
  455. return ret;
  456. }
  457. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  458. /* called under rcu_read_lock(). */
  459. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
  460. struct request_queue *q,
  461. enum blkio_policy_id plid)
  462. {
  463. struct blkio_group *blkg;
  464. struct hlist_node *n;
  465. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  466. if (blkg->q == q && blkg->plid == plid)
  467. return blkg;
  468. return NULL;
  469. }
  470. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  471. void blkg_destroy_all(struct request_queue *q)
  472. {
  473. struct blkio_policy_type *pol;
  474. while (true) {
  475. bool done = true;
  476. spin_lock(&blkio_list_lock);
  477. spin_lock_irq(q->queue_lock);
  478. /*
  479. * clear_queue_fn() might return with non-empty group list
  480. * if it raced cgroup removal and lost. cgroup removal is
  481. * guaranteed to make forward progress and retrying after a
  482. * while is enough. This ugliness is scheduled to be
  483. * removed after locking update.
  484. */
  485. list_for_each_entry(pol, &blkio_list, list)
  486. if (!pol->ops.blkio_clear_queue_fn(q))
  487. done = false;
  488. spin_unlock_irq(q->queue_lock);
  489. spin_unlock(&blkio_list_lock);
  490. if (done)
  491. break;
  492. msleep(10); /* just some random duration I like */
  493. }
  494. }
  495. static void blkio_reset_stats_cpu(struct blkio_group *blkg)
  496. {
  497. struct blkio_group_stats_cpu *stats_cpu;
  498. int i, j, k;
  499. /*
  500. * Note: On 64 bit arch this should not be an issue. This has the
  501. * possibility of returning some inconsistent value on 32bit arch
  502. * as 64bit update on 32bit is non atomic. Taking care of this
  503. * corner case makes code very complicated, like sending IPIs to
  504. * cpus, taking care of stats of offline cpus etc.
  505. *
  506. * reset stats is anyway more of a debug feature and this sounds a
  507. * corner case. So I am not complicating the code yet until and
  508. * unless this becomes a real issue.
  509. */
  510. for_each_possible_cpu(i) {
  511. stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
  512. stats_cpu->sectors = 0;
  513. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  514. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  515. stats_cpu->stat_arr_cpu[j][k] = 0;
  516. }
  517. }
  518. static int
  519. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  520. {
  521. struct blkio_cgroup *blkcg;
  522. struct blkio_group *blkg;
  523. struct blkio_group_stats *stats;
  524. struct hlist_node *n;
  525. uint64_t queued[BLKIO_STAT_TOTAL];
  526. int i;
  527. #ifdef CONFIG_DEBUG_BLK_CGROUP
  528. bool idling, waiting, empty;
  529. unsigned long long now = sched_clock();
  530. #endif
  531. blkcg = cgroup_to_blkio_cgroup(cgroup);
  532. spin_lock_irq(&blkcg->lock);
  533. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  534. spin_lock(&blkg->stats_lock);
  535. stats = &blkg->stats;
  536. #ifdef CONFIG_DEBUG_BLK_CGROUP
  537. idling = blkio_blkg_idling(stats);
  538. waiting = blkio_blkg_waiting(stats);
  539. empty = blkio_blkg_empty(stats);
  540. #endif
  541. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  542. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  543. memset(stats, 0, sizeof(struct blkio_group_stats));
  544. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  545. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  546. #ifdef CONFIG_DEBUG_BLK_CGROUP
  547. if (idling) {
  548. blkio_mark_blkg_idling(stats);
  549. stats->start_idle_time = now;
  550. }
  551. if (waiting) {
  552. blkio_mark_blkg_waiting(stats);
  553. stats->start_group_wait_time = now;
  554. }
  555. if (empty) {
  556. blkio_mark_blkg_empty(stats);
  557. stats->start_empty_time = now;
  558. }
  559. #endif
  560. spin_unlock(&blkg->stats_lock);
  561. /* Reset Per cpu stats which don't take blkg->stats_lock */
  562. blkio_reset_stats_cpu(blkg);
  563. }
  564. spin_unlock_irq(&blkcg->lock);
  565. return 0;
  566. }
  567. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  568. int chars_left, bool diskname_only)
  569. {
  570. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  571. chars_left -= strlen(str);
  572. if (chars_left <= 0) {
  573. printk(KERN_WARNING
  574. "Possibly incorrect cgroup stat display format");
  575. return;
  576. }
  577. if (diskname_only)
  578. return;
  579. switch (type) {
  580. case BLKIO_STAT_READ:
  581. strlcat(str, " Read", chars_left);
  582. break;
  583. case BLKIO_STAT_WRITE:
  584. strlcat(str, " Write", chars_left);
  585. break;
  586. case BLKIO_STAT_SYNC:
  587. strlcat(str, " Sync", chars_left);
  588. break;
  589. case BLKIO_STAT_ASYNC:
  590. strlcat(str, " Async", chars_left);
  591. break;
  592. case BLKIO_STAT_TOTAL:
  593. strlcat(str, " Total", chars_left);
  594. break;
  595. default:
  596. strlcat(str, " Invalid", chars_left);
  597. }
  598. }
  599. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  600. struct cgroup_map_cb *cb, dev_t dev)
  601. {
  602. blkio_get_key_name(0, dev, str, chars_left, true);
  603. cb->fill(cb, str, val);
  604. return val;
  605. }
  606. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
  607. enum stat_type_cpu type, enum stat_sub_type sub_type)
  608. {
  609. int cpu;
  610. struct blkio_group_stats_cpu *stats_cpu;
  611. u64 val = 0, tval;
  612. for_each_possible_cpu(cpu) {
  613. unsigned int start;
  614. stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
  615. do {
  616. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  617. if (type == BLKIO_STAT_CPU_SECTORS)
  618. tval = stats_cpu->sectors;
  619. else
  620. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  621. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  622. val += tval;
  623. }
  624. return val;
  625. }
  626. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
  627. struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
  628. {
  629. uint64_t disk_total, val;
  630. char key_str[MAX_KEY_LEN];
  631. enum stat_sub_type sub_type;
  632. if (type == BLKIO_STAT_CPU_SECTORS) {
  633. val = blkio_read_stat_cpu(blkg, type, 0);
  634. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
  635. }
  636. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  637. sub_type++) {
  638. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  639. val = blkio_read_stat_cpu(blkg, type, sub_type);
  640. cb->fill(cb, key_str, val);
  641. }
  642. disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
  643. blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
  644. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  645. cb->fill(cb, key_str, disk_total);
  646. return disk_total;
  647. }
  648. /* This should be called with blkg->stats_lock held */
  649. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  650. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  651. {
  652. uint64_t disk_total;
  653. char key_str[MAX_KEY_LEN];
  654. enum stat_sub_type sub_type;
  655. if (type == BLKIO_STAT_TIME)
  656. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  657. blkg->stats.time, cb, dev);
  658. #ifdef CONFIG_DEBUG_BLK_CGROUP
  659. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  660. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  661. blkg->stats.unaccounted_time, cb, dev);
  662. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  663. uint64_t sum = blkg->stats.avg_queue_size_sum;
  664. uint64_t samples = blkg->stats.avg_queue_size_samples;
  665. if (samples)
  666. do_div(sum, samples);
  667. else
  668. sum = 0;
  669. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  670. }
  671. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  672. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  673. blkg->stats.group_wait_time, cb, dev);
  674. if (type == BLKIO_STAT_IDLE_TIME)
  675. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  676. blkg->stats.idle_time, cb, dev);
  677. if (type == BLKIO_STAT_EMPTY_TIME)
  678. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  679. blkg->stats.empty_time, cb, dev);
  680. if (type == BLKIO_STAT_DEQUEUE)
  681. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  682. blkg->stats.dequeue, cb, dev);
  683. #endif
  684. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  685. sub_type++) {
  686. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  687. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  688. }
  689. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  690. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  691. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  692. cb->fill(cb, key_str, disk_total);
  693. return disk_total;
  694. }
  695. static int blkio_policy_parse_and_set(char *buf,
  696. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  697. {
  698. struct gendisk *disk = NULL;
  699. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  700. unsigned long major, minor;
  701. int i = 0, ret = -EINVAL;
  702. int part;
  703. dev_t dev;
  704. u64 temp;
  705. memset(s, 0, sizeof(s));
  706. while ((p = strsep(&buf, " ")) != NULL) {
  707. if (!*p)
  708. continue;
  709. s[i++] = p;
  710. /* Prevent from inputing too many things */
  711. if (i == 3)
  712. break;
  713. }
  714. if (i != 2)
  715. goto out;
  716. p = strsep(&s[0], ":");
  717. if (p != NULL)
  718. major_s = p;
  719. else
  720. goto out;
  721. minor_s = s[0];
  722. if (!minor_s)
  723. goto out;
  724. if (strict_strtoul(major_s, 10, &major))
  725. goto out;
  726. if (strict_strtoul(minor_s, 10, &minor))
  727. goto out;
  728. dev = MKDEV(major, minor);
  729. if (strict_strtoull(s[1], 10, &temp))
  730. goto out;
  731. /* For rule removal, do not check for device presence. */
  732. if (temp) {
  733. disk = get_gendisk(dev, &part);
  734. if (!disk || part) {
  735. ret = -ENODEV;
  736. goto out;
  737. }
  738. }
  739. newpn->dev = dev;
  740. switch (plid) {
  741. case BLKIO_POLICY_PROP:
  742. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  743. temp > BLKIO_WEIGHT_MAX)
  744. goto out;
  745. newpn->plid = plid;
  746. newpn->fileid = fileid;
  747. newpn->val.weight = temp;
  748. break;
  749. case BLKIO_POLICY_THROTL:
  750. switch(fileid) {
  751. case BLKIO_THROTL_read_bps_device:
  752. case BLKIO_THROTL_write_bps_device:
  753. newpn->plid = plid;
  754. newpn->fileid = fileid;
  755. newpn->val.bps = temp;
  756. break;
  757. case BLKIO_THROTL_read_iops_device:
  758. case BLKIO_THROTL_write_iops_device:
  759. if (temp > THROTL_IOPS_MAX)
  760. goto out;
  761. newpn->plid = plid;
  762. newpn->fileid = fileid;
  763. newpn->val.iops = (unsigned int)temp;
  764. break;
  765. }
  766. break;
  767. default:
  768. BUG();
  769. }
  770. ret = 0;
  771. out:
  772. put_disk(disk);
  773. return ret;
  774. }
  775. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  776. dev_t dev)
  777. {
  778. struct blkio_policy_node *pn;
  779. unsigned long flags;
  780. unsigned int weight;
  781. spin_lock_irqsave(&blkcg->lock, flags);
  782. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  783. BLKIO_PROP_weight_device);
  784. if (pn)
  785. weight = pn->val.weight;
  786. else
  787. weight = blkcg->weight;
  788. spin_unlock_irqrestore(&blkcg->lock, flags);
  789. return weight;
  790. }
  791. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  792. uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
  793. {
  794. struct blkio_policy_node *pn;
  795. unsigned long flags;
  796. uint64_t bps = -1;
  797. spin_lock_irqsave(&blkcg->lock, flags);
  798. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  799. BLKIO_THROTL_read_bps_device);
  800. if (pn)
  801. bps = pn->val.bps;
  802. spin_unlock_irqrestore(&blkcg->lock, flags);
  803. return bps;
  804. }
  805. uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
  806. {
  807. struct blkio_policy_node *pn;
  808. unsigned long flags;
  809. uint64_t bps = -1;
  810. spin_lock_irqsave(&blkcg->lock, flags);
  811. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  812. BLKIO_THROTL_write_bps_device);
  813. if (pn)
  814. bps = pn->val.bps;
  815. spin_unlock_irqrestore(&blkcg->lock, flags);
  816. return bps;
  817. }
  818. unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
  819. {
  820. struct blkio_policy_node *pn;
  821. unsigned long flags;
  822. unsigned int iops = -1;
  823. spin_lock_irqsave(&blkcg->lock, flags);
  824. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  825. BLKIO_THROTL_read_iops_device);
  826. if (pn)
  827. iops = pn->val.iops;
  828. spin_unlock_irqrestore(&blkcg->lock, flags);
  829. return iops;
  830. }
  831. unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
  832. {
  833. struct blkio_policy_node *pn;
  834. unsigned long flags;
  835. unsigned int iops = -1;
  836. spin_lock_irqsave(&blkcg->lock, flags);
  837. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  838. BLKIO_THROTL_write_iops_device);
  839. if (pn)
  840. iops = pn->val.iops;
  841. spin_unlock_irqrestore(&blkcg->lock, flags);
  842. return iops;
  843. }
  844. /* Checks whether user asked for deleting a policy rule */
  845. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  846. {
  847. switch(pn->plid) {
  848. case BLKIO_POLICY_PROP:
  849. if (pn->val.weight == 0)
  850. return 1;
  851. break;
  852. case BLKIO_POLICY_THROTL:
  853. switch(pn->fileid) {
  854. case BLKIO_THROTL_read_bps_device:
  855. case BLKIO_THROTL_write_bps_device:
  856. if (pn->val.bps == 0)
  857. return 1;
  858. break;
  859. case BLKIO_THROTL_read_iops_device:
  860. case BLKIO_THROTL_write_iops_device:
  861. if (pn->val.iops == 0)
  862. return 1;
  863. }
  864. break;
  865. default:
  866. BUG();
  867. }
  868. return 0;
  869. }
  870. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  871. struct blkio_policy_node *newpn)
  872. {
  873. switch(oldpn->plid) {
  874. case BLKIO_POLICY_PROP:
  875. oldpn->val.weight = newpn->val.weight;
  876. break;
  877. case BLKIO_POLICY_THROTL:
  878. switch(newpn->fileid) {
  879. case BLKIO_THROTL_read_bps_device:
  880. case BLKIO_THROTL_write_bps_device:
  881. oldpn->val.bps = newpn->val.bps;
  882. break;
  883. case BLKIO_THROTL_read_iops_device:
  884. case BLKIO_THROTL_write_iops_device:
  885. oldpn->val.iops = newpn->val.iops;
  886. }
  887. break;
  888. default:
  889. BUG();
  890. }
  891. }
  892. /*
  893. * Some rules/values in blkg have changed. Propagate those to respective
  894. * policies.
  895. */
  896. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  897. struct blkio_group *blkg, struct blkio_policy_node *pn)
  898. {
  899. unsigned int weight, iops;
  900. u64 bps;
  901. switch(pn->plid) {
  902. case BLKIO_POLICY_PROP:
  903. weight = pn->val.weight ? pn->val.weight :
  904. blkcg->weight;
  905. blkio_update_group_weight(blkg, weight);
  906. break;
  907. case BLKIO_POLICY_THROTL:
  908. switch(pn->fileid) {
  909. case BLKIO_THROTL_read_bps_device:
  910. case BLKIO_THROTL_write_bps_device:
  911. bps = pn->val.bps ? pn->val.bps : (-1);
  912. blkio_update_group_bps(blkg, bps, pn->fileid);
  913. break;
  914. case BLKIO_THROTL_read_iops_device:
  915. case BLKIO_THROTL_write_iops_device:
  916. iops = pn->val.iops ? pn->val.iops : (-1);
  917. blkio_update_group_iops(blkg, iops, pn->fileid);
  918. break;
  919. }
  920. break;
  921. default:
  922. BUG();
  923. }
  924. }
  925. /*
  926. * A policy node rule has been updated. Propagate this update to all the
  927. * block groups which might be affected by this update.
  928. */
  929. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  930. struct blkio_policy_node *pn)
  931. {
  932. struct blkio_group *blkg;
  933. struct hlist_node *n;
  934. spin_lock(&blkio_list_lock);
  935. spin_lock_irq(&blkcg->lock);
  936. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  937. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  938. continue;
  939. blkio_update_blkg_policy(blkcg, blkg, pn);
  940. }
  941. spin_unlock_irq(&blkcg->lock);
  942. spin_unlock(&blkio_list_lock);
  943. }
  944. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  945. const char *buffer)
  946. {
  947. int ret = 0;
  948. char *buf;
  949. struct blkio_policy_node *newpn, *pn;
  950. struct blkio_cgroup *blkcg;
  951. int keep_newpn = 0;
  952. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  953. int fileid = BLKIOFILE_ATTR(cft->private);
  954. buf = kstrdup(buffer, GFP_KERNEL);
  955. if (!buf)
  956. return -ENOMEM;
  957. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  958. if (!newpn) {
  959. ret = -ENOMEM;
  960. goto free_buf;
  961. }
  962. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  963. if (ret)
  964. goto free_newpn;
  965. blkcg = cgroup_to_blkio_cgroup(cgrp);
  966. spin_lock_irq(&blkcg->lock);
  967. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  968. if (!pn) {
  969. if (!blkio_delete_rule_command(newpn)) {
  970. blkio_policy_insert_node(blkcg, newpn);
  971. keep_newpn = 1;
  972. }
  973. spin_unlock_irq(&blkcg->lock);
  974. goto update_io_group;
  975. }
  976. if (blkio_delete_rule_command(newpn)) {
  977. blkio_policy_delete_node(pn);
  978. kfree(pn);
  979. spin_unlock_irq(&blkcg->lock);
  980. goto update_io_group;
  981. }
  982. spin_unlock_irq(&blkcg->lock);
  983. blkio_update_policy_rule(pn, newpn);
  984. update_io_group:
  985. blkio_update_policy_node_blkg(blkcg, newpn);
  986. free_newpn:
  987. if (!keep_newpn)
  988. kfree(newpn);
  989. free_buf:
  990. kfree(buf);
  991. return ret;
  992. }
  993. static void
  994. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  995. {
  996. switch(pn->plid) {
  997. case BLKIO_POLICY_PROP:
  998. if (pn->fileid == BLKIO_PROP_weight_device)
  999. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  1000. MINOR(pn->dev), pn->val.weight);
  1001. break;
  1002. case BLKIO_POLICY_THROTL:
  1003. switch(pn->fileid) {
  1004. case BLKIO_THROTL_read_bps_device:
  1005. case BLKIO_THROTL_write_bps_device:
  1006. seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
  1007. MINOR(pn->dev), pn->val.bps);
  1008. break;
  1009. case BLKIO_THROTL_read_iops_device:
  1010. case BLKIO_THROTL_write_iops_device:
  1011. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  1012. MINOR(pn->dev), pn->val.iops);
  1013. break;
  1014. }
  1015. break;
  1016. default:
  1017. BUG();
  1018. }
  1019. }
  1020. /* cgroup files which read their data from policy nodes end up here */
  1021. static void blkio_read_policy_node_files(struct cftype *cft,
  1022. struct blkio_cgroup *blkcg, struct seq_file *m)
  1023. {
  1024. struct blkio_policy_node *pn;
  1025. if (!list_empty(&blkcg->policy_list)) {
  1026. spin_lock_irq(&blkcg->lock);
  1027. list_for_each_entry(pn, &blkcg->policy_list, node) {
  1028. if (!pn_matches_cftype(cft, pn))
  1029. continue;
  1030. blkio_print_policy_node(m, pn);
  1031. }
  1032. spin_unlock_irq(&blkcg->lock);
  1033. }
  1034. }
  1035. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  1036. struct seq_file *m)
  1037. {
  1038. struct blkio_cgroup *blkcg;
  1039. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1040. int name = BLKIOFILE_ATTR(cft->private);
  1041. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1042. switch(plid) {
  1043. case BLKIO_POLICY_PROP:
  1044. switch(name) {
  1045. case BLKIO_PROP_weight_device:
  1046. blkio_read_policy_node_files(cft, blkcg, m);
  1047. return 0;
  1048. default:
  1049. BUG();
  1050. }
  1051. break;
  1052. case BLKIO_POLICY_THROTL:
  1053. switch(name){
  1054. case BLKIO_THROTL_read_bps_device:
  1055. case BLKIO_THROTL_write_bps_device:
  1056. case BLKIO_THROTL_read_iops_device:
  1057. case BLKIO_THROTL_write_iops_device:
  1058. blkio_read_policy_node_files(cft, blkcg, m);
  1059. return 0;
  1060. default:
  1061. BUG();
  1062. }
  1063. break;
  1064. default:
  1065. BUG();
  1066. }
  1067. return 0;
  1068. }
  1069. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1070. struct cftype *cft, struct cgroup_map_cb *cb,
  1071. enum stat_type type, bool show_total, bool pcpu)
  1072. {
  1073. struct blkio_group *blkg;
  1074. struct hlist_node *n;
  1075. uint64_t cgroup_total = 0;
  1076. rcu_read_lock();
  1077. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1078. if (blkg->dev) {
  1079. if (!cftype_blkg_same_policy(cft, blkg))
  1080. continue;
  1081. if (pcpu)
  1082. cgroup_total += blkio_get_stat_cpu(blkg, cb,
  1083. blkg->dev, type);
  1084. else {
  1085. spin_lock_irq(&blkg->stats_lock);
  1086. cgroup_total += blkio_get_stat(blkg, cb,
  1087. blkg->dev, type);
  1088. spin_unlock_irq(&blkg->stats_lock);
  1089. }
  1090. }
  1091. }
  1092. if (show_total)
  1093. cb->fill(cb, "Total", cgroup_total);
  1094. rcu_read_unlock();
  1095. return 0;
  1096. }
  1097. /* All map kind of cgroup file get serviced by this function */
  1098. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1099. struct cgroup_map_cb *cb)
  1100. {
  1101. struct blkio_cgroup *blkcg;
  1102. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1103. int name = BLKIOFILE_ATTR(cft->private);
  1104. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1105. switch(plid) {
  1106. case BLKIO_POLICY_PROP:
  1107. switch(name) {
  1108. case BLKIO_PROP_time:
  1109. return blkio_read_blkg_stats(blkcg, cft, cb,
  1110. BLKIO_STAT_TIME, 0, 0);
  1111. case BLKIO_PROP_sectors:
  1112. return blkio_read_blkg_stats(blkcg, cft, cb,
  1113. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1114. case BLKIO_PROP_io_service_bytes:
  1115. return blkio_read_blkg_stats(blkcg, cft, cb,
  1116. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1117. case BLKIO_PROP_io_serviced:
  1118. return blkio_read_blkg_stats(blkcg, cft, cb,
  1119. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1120. case BLKIO_PROP_io_service_time:
  1121. return blkio_read_blkg_stats(blkcg, cft, cb,
  1122. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1123. case BLKIO_PROP_io_wait_time:
  1124. return blkio_read_blkg_stats(blkcg, cft, cb,
  1125. BLKIO_STAT_WAIT_TIME, 1, 0);
  1126. case BLKIO_PROP_io_merged:
  1127. return blkio_read_blkg_stats(blkcg, cft, cb,
  1128. BLKIO_STAT_CPU_MERGED, 1, 1);
  1129. case BLKIO_PROP_io_queued:
  1130. return blkio_read_blkg_stats(blkcg, cft, cb,
  1131. BLKIO_STAT_QUEUED, 1, 0);
  1132. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1133. case BLKIO_PROP_unaccounted_time:
  1134. return blkio_read_blkg_stats(blkcg, cft, cb,
  1135. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1136. case BLKIO_PROP_dequeue:
  1137. return blkio_read_blkg_stats(blkcg, cft, cb,
  1138. BLKIO_STAT_DEQUEUE, 0, 0);
  1139. case BLKIO_PROP_avg_queue_size:
  1140. return blkio_read_blkg_stats(blkcg, cft, cb,
  1141. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1142. case BLKIO_PROP_group_wait_time:
  1143. return blkio_read_blkg_stats(blkcg, cft, cb,
  1144. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1145. case BLKIO_PROP_idle_time:
  1146. return blkio_read_blkg_stats(blkcg, cft, cb,
  1147. BLKIO_STAT_IDLE_TIME, 0, 0);
  1148. case BLKIO_PROP_empty_time:
  1149. return blkio_read_blkg_stats(blkcg, cft, cb,
  1150. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1151. #endif
  1152. default:
  1153. BUG();
  1154. }
  1155. break;
  1156. case BLKIO_POLICY_THROTL:
  1157. switch(name){
  1158. case BLKIO_THROTL_io_service_bytes:
  1159. return blkio_read_blkg_stats(blkcg, cft, cb,
  1160. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1161. case BLKIO_THROTL_io_serviced:
  1162. return blkio_read_blkg_stats(blkcg, cft, cb,
  1163. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1164. default:
  1165. BUG();
  1166. }
  1167. break;
  1168. default:
  1169. BUG();
  1170. }
  1171. return 0;
  1172. }
  1173. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  1174. {
  1175. struct blkio_group *blkg;
  1176. struct hlist_node *n;
  1177. struct blkio_policy_node *pn;
  1178. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1179. return -EINVAL;
  1180. spin_lock(&blkio_list_lock);
  1181. spin_lock_irq(&blkcg->lock);
  1182. blkcg->weight = (unsigned int)val;
  1183. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1184. pn = blkio_policy_search_node(blkcg, blkg->dev,
  1185. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  1186. if (pn)
  1187. continue;
  1188. blkio_update_group_weight(blkg, blkcg->weight);
  1189. }
  1190. spin_unlock_irq(&blkcg->lock);
  1191. spin_unlock(&blkio_list_lock);
  1192. return 0;
  1193. }
  1194. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1195. struct blkio_cgroup *blkcg;
  1196. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1197. int name = BLKIOFILE_ATTR(cft->private);
  1198. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1199. switch(plid) {
  1200. case BLKIO_POLICY_PROP:
  1201. switch(name) {
  1202. case BLKIO_PROP_weight:
  1203. return (u64)blkcg->weight;
  1204. }
  1205. break;
  1206. default:
  1207. BUG();
  1208. }
  1209. return 0;
  1210. }
  1211. static int
  1212. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1213. {
  1214. struct blkio_cgroup *blkcg;
  1215. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1216. int name = BLKIOFILE_ATTR(cft->private);
  1217. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1218. switch(plid) {
  1219. case BLKIO_POLICY_PROP:
  1220. switch(name) {
  1221. case BLKIO_PROP_weight:
  1222. return blkio_weight_write(blkcg, val);
  1223. }
  1224. break;
  1225. default:
  1226. BUG();
  1227. }
  1228. return 0;
  1229. }
  1230. struct cftype blkio_files[] = {
  1231. {
  1232. .name = "weight_device",
  1233. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1234. BLKIO_PROP_weight_device),
  1235. .read_seq_string = blkiocg_file_read,
  1236. .write_string = blkiocg_file_write,
  1237. .max_write_len = 256,
  1238. },
  1239. {
  1240. .name = "weight",
  1241. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1242. BLKIO_PROP_weight),
  1243. .read_u64 = blkiocg_file_read_u64,
  1244. .write_u64 = blkiocg_file_write_u64,
  1245. },
  1246. {
  1247. .name = "time",
  1248. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1249. BLKIO_PROP_time),
  1250. .read_map = blkiocg_file_read_map,
  1251. },
  1252. {
  1253. .name = "sectors",
  1254. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1255. BLKIO_PROP_sectors),
  1256. .read_map = blkiocg_file_read_map,
  1257. },
  1258. {
  1259. .name = "io_service_bytes",
  1260. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1261. BLKIO_PROP_io_service_bytes),
  1262. .read_map = blkiocg_file_read_map,
  1263. },
  1264. {
  1265. .name = "io_serviced",
  1266. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1267. BLKIO_PROP_io_serviced),
  1268. .read_map = blkiocg_file_read_map,
  1269. },
  1270. {
  1271. .name = "io_service_time",
  1272. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1273. BLKIO_PROP_io_service_time),
  1274. .read_map = blkiocg_file_read_map,
  1275. },
  1276. {
  1277. .name = "io_wait_time",
  1278. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1279. BLKIO_PROP_io_wait_time),
  1280. .read_map = blkiocg_file_read_map,
  1281. },
  1282. {
  1283. .name = "io_merged",
  1284. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1285. BLKIO_PROP_io_merged),
  1286. .read_map = blkiocg_file_read_map,
  1287. },
  1288. {
  1289. .name = "io_queued",
  1290. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1291. BLKIO_PROP_io_queued),
  1292. .read_map = blkiocg_file_read_map,
  1293. },
  1294. {
  1295. .name = "reset_stats",
  1296. .write_u64 = blkiocg_reset_stats,
  1297. },
  1298. #ifdef CONFIG_BLK_DEV_THROTTLING
  1299. {
  1300. .name = "throttle.read_bps_device",
  1301. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1302. BLKIO_THROTL_read_bps_device),
  1303. .read_seq_string = blkiocg_file_read,
  1304. .write_string = blkiocg_file_write,
  1305. .max_write_len = 256,
  1306. },
  1307. {
  1308. .name = "throttle.write_bps_device",
  1309. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1310. BLKIO_THROTL_write_bps_device),
  1311. .read_seq_string = blkiocg_file_read,
  1312. .write_string = blkiocg_file_write,
  1313. .max_write_len = 256,
  1314. },
  1315. {
  1316. .name = "throttle.read_iops_device",
  1317. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1318. BLKIO_THROTL_read_iops_device),
  1319. .read_seq_string = blkiocg_file_read,
  1320. .write_string = blkiocg_file_write,
  1321. .max_write_len = 256,
  1322. },
  1323. {
  1324. .name = "throttle.write_iops_device",
  1325. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1326. BLKIO_THROTL_write_iops_device),
  1327. .read_seq_string = blkiocg_file_read,
  1328. .write_string = blkiocg_file_write,
  1329. .max_write_len = 256,
  1330. },
  1331. {
  1332. .name = "throttle.io_service_bytes",
  1333. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1334. BLKIO_THROTL_io_service_bytes),
  1335. .read_map = blkiocg_file_read_map,
  1336. },
  1337. {
  1338. .name = "throttle.io_serviced",
  1339. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1340. BLKIO_THROTL_io_serviced),
  1341. .read_map = blkiocg_file_read_map,
  1342. },
  1343. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1344. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1345. {
  1346. .name = "avg_queue_size",
  1347. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1348. BLKIO_PROP_avg_queue_size),
  1349. .read_map = blkiocg_file_read_map,
  1350. },
  1351. {
  1352. .name = "group_wait_time",
  1353. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1354. BLKIO_PROP_group_wait_time),
  1355. .read_map = blkiocg_file_read_map,
  1356. },
  1357. {
  1358. .name = "idle_time",
  1359. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1360. BLKIO_PROP_idle_time),
  1361. .read_map = blkiocg_file_read_map,
  1362. },
  1363. {
  1364. .name = "empty_time",
  1365. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1366. BLKIO_PROP_empty_time),
  1367. .read_map = blkiocg_file_read_map,
  1368. },
  1369. {
  1370. .name = "dequeue",
  1371. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1372. BLKIO_PROP_dequeue),
  1373. .read_map = blkiocg_file_read_map,
  1374. },
  1375. {
  1376. .name = "unaccounted_time",
  1377. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1378. BLKIO_PROP_unaccounted_time),
  1379. .read_map = blkiocg_file_read_map,
  1380. },
  1381. #endif
  1382. };
  1383. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1384. {
  1385. return cgroup_add_files(cgroup, subsys, blkio_files,
  1386. ARRAY_SIZE(blkio_files));
  1387. }
  1388. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1389. {
  1390. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1391. unsigned long flags;
  1392. struct blkio_group *blkg;
  1393. struct request_queue *q;
  1394. struct blkio_policy_type *blkiop;
  1395. struct blkio_policy_node *pn, *pntmp;
  1396. rcu_read_lock();
  1397. do {
  1398. spin_lock_irqsave(&blkcg->lock, flags);
  1399. if (hlist_empty(&blkcg->blkg_list)) {
  1400. spin_unlock_irqrestore(&blkcg->lock, flags);
  1401. break;
  1402. }
  1403. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1404. blkcg_node);
  1405. q = rcu_dereference(blkg->q);
  1406. __blkiocg_del_blkio_group(blkg);
  1407. spin_unlock_irqrestore(&blkcg->lock, flags);
  1408. /*
  1409. * This blkio_group is being unlinked as associated cgroup is
  1410. * going away. Let all the IO controlling policies know about
  1411. * this event.
  1412. */
  1413. spin_lock(&blkio_list_lock);
  1414. list_for_each_entry(blkiop, &blkio_list, list) {
  1415. if (blkiop->plid != blkg->plid)
  1416. continue;
  1417. blkiop->ops.blkio_unlink_group_fn(q, blkg);
  1418. }
  1419. spin_unlock(&blkio_list_lock);
  1420. } while (1);
  1421. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1422. blkio_policy_delete_node(pn);
  1423. kfree(pn);
  1424. }
  1425. free_css_id(&blkio_subsys, &blkcg->css);
  1426. rcu_read_unlock();
  1427. if (blkcg != &blkio_root_cgroup)
  1428. kfree(blkcg);
  1429. }
  1430. static struct cgroup_subsys_state *
  1431. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1432. {
  1433. struct blkio_cgroup *blkcg;
  1434. struct cgroup *parent = cgroup->parent;
  1435. if (!parent) {
  1436. blkcg = &blkio_root_cgroup;
  1437. goto done;
  1438. }
  1439. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1440. if (!blkcg)
  1441. return ERR_PTR(-ENOMEM);
  1442. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1443. done:
  1444. spin_lock_init(&blkcg->lock);
  1445. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1446. INIT_LIST_HEAD(&blkcg->policy_list);
  1447. return &blkcg->css;
  1448. }
  1449. /*
  1450. * We cannot support shared io contexts, as we have no mean to support
  1451. * two tasks with the same ioc in two different groups without major rework
  1452. * of the main cic data structures. For now we allow a task to change
  1453. * its cgroup only if it's the only owner of its ioc.
  1454. */
  1455. static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1456. struct cgroup_taskset *tset)
  1457. {
  1458. struct task_struct *task;
  1459. struct io_context *ioc;
  1460. int ret = 0;
  1461. /* task_lock() is needed to avoid races with exit_io_context() */
  1462. cgroup_taskset_for_each(task, cgrp, tset) {
  1463. task_lock(task);
  1464. ioc = task->io_context;
  1465. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1466. ret = -EINVAL;
  1467. task_unlock(task);
  1468. if (ret)
  1469. break;
  1470. }
  1471. return ret;
  1472. }
  1473. static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1474. struct cgroup_taskset *tset)
  1475. {
  1476. struct task_struct *task;
  1477. struct io_context *ioc;
  1478. cgroup_taskset_for_each(task, cgrp, tset) {
  1479. /* we don't lose anything even if ioc allocation fails */
  1480. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1481. if (ioc) {
  1482. ioc_cgroup_changed(ioc);
  1483. put_io_context(ioc);
  1484. }
  1485. }
  1486. }
  1487. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1488. {
  1489. spin_lock(&blkio_list_lock);
  1490. list_add_tail(&blkiop->list, &blkio_list);
  1491. spin_unlock(&blkio_list_lock);
  1492. }
  1493. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1494. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1495. {
  1496. spin_lock(&blkio_list_lock);
  1497. list_del_init(&blkiop->list);
  1498. spin_unlock(&blkio_list_lock);
  1499. }
  1500. EXPORT_SYMBOL_GPL(blkio_policy_unregister);