blk-cgroup.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include "blk-cgroup.h"
  21. #include <linux/genhd.h>
  22. #define MAX_KEY_LEN 100
  23. static DEFINE_SPINLOCK(blkio_list_lock);
  24. static LIST_HEAD(blkio_list);
  25. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  26. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  27. /* for encoding cft->private value on file */
  28. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  29. /* What policy owns the file, proportional or throttle */
  30. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  31. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  32. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  33. struct blkio_policy_node *pn)
  34. {
  35. list_add(&pn->node, &blkcg->policy_list);
  36. }
  37. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  38. struct blkio_group *blkg)
  39. {
  40. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  41. if (blkg->plid == plid)
  42. return 1;
  43. return 0;
  44. }
  45. /* Determines if policy node matches cgroup file being accessed */
  46. static inline bool pn_matches_cftype(struct cftype *cft,
  47. struct blkio_policy_node *pn)
  48. {
  49. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  50. int fileid = BLKIOFILE_ATTR(cft->private);
  51. return (plid == pn->plid && fileid == pn->fileid);
  52. }
  53. /* Must be called with blkcg->lock held */
  54. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  55. {
  56. list_del(&pn->node);
  57. }
  58. /* Must be called with blkcg->lock held */
  59. static struct blkio_policy_node *
  60. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  61. enum blkio_policy_id plid, int fileid)
  62. {
  63. struct blkio_policy_node *pn;
  64. list_for_each_entry(pn, &blkcg->policy_list, node) {
  65. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  66. return pn;
  67. }
  68. return NULL;
  69. }
  70. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  71. {
  72. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  73. struct blkio_cgroup, css);
  74. }
  75. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  76. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  77. {
  78. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  79. struct blkio_cgroup, css);
  80. }
  81. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  82. static inline void
  83. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  84. {
  85. struct blkio_policy_type *blkiop;
  86. list_for_each_entry(blkiop, &blkio_list, list) {
  87. /* If this policy does not own the blkg, do not send updates */
  88. if (blkiop->plid != blkg->plid)
  89. continue;
  90. if (blkiop->ops.blkio_update_group_weight_fn)
  91. blkiop->ops.blkio_update_group_weight_fn(blkg->key,
  92. blkg, weight);
  93. }
  94. }
  95. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  96. int fileid)
  97. {
  98. struct blkio_policy_type *blkiop;
  99. list_for_each_entry(blkiop, &blkio_list, list) {
  100. /* If this policy does not own the blkg, do not send updates */
  101. if (blkiop->plid != blkg->plid)
  102. continue;
  103. if (fileid == BLKIO_THROTL_read_bps_device
  104. && blkiop->ops.blkio_update_group_read_bps_fn)
  105. blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
  106. blkg, bps);
  107. if (fileid == BLKIO_THROTL_write_bps_device
  108. && blkiop->ops.blkio_update_group_write_bps_fn)
  109. blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
  110. blkg, bps);
  111. }
  112. }
  113. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  114. unsigned int iops, int fileid)
  115. {
  116. struct blkio_policy_type *blkiop;
  117. list_for_each_entry(blkiop, &blkio_list, list) {
  118. /* If this policy does not own the blkg, do not send updates */
  119. if (blkiop->plid != blkg->plid)
  120. continue;
  121. if (fileid == BLKIO_THROTL_read_iops_device
  122. && blkiop->ops.blkio_update_group_read_iops_fn)
  123. blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
  124. blkg, iops);
  125. if (fileid == BLKIO_THROTL_write_iops_device
  126. && blkiop->ops.blkio_update_group_write_iops_fn)
  127. blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
  128. blkg,iops);
  129. }
  130. }
  131. /*
  132. * Add to the appropriate stat variable depending on the request type.
  133. * This should be called with the blkg->stats_lock held.
  134. */
  135. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  136. bool sync)
  137. {
  138. if (direction)
  139. stat[BLKIO_STAT_WRITE] += add;
  140. else
  141. stat[BLKIO_STAT_READ] += add;
  142. if (sync)
  143. stat[BLKIO_STAT_SYNC] += add;
  144. else
  145. stat[BLKIO_STAT_ASYNC] += add;
  146. }
  147. /*
  148. * Decrements the appropriate stat variable if non-zero depending on the
  149. * request type. Panics on value being zero.
  150. * This should be called with the blkg->stats_lock held.
  151. */
  152. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  153. {
  154. if (direction) {
  155. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  156. stat[BLKIO_STAT_WRITE]--;
  157. } else {
  158. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  159. stat[BLKIO_STAT_READ]--;
  160. }
  161. if (sync) {
  162. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  163. stat[BLKIO_STAT_SYNC]--;
  164. } else {
  165. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  166. stat[BLKIO_STAT_ASYNC]--;
  167. }
  168. }
  169. #ifdef CONFIG_DEBUG_BLK_CGROUP
  170. /* This should be called with the blkg->stats_lock held. */
  171. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  172. struct blkio_group *curr_blkg)
  173. {
  174. if (blkio_blkg_waiting(&blkg->stats))
  175. return;
  176. if (blkg == curr_blkg)
  177. return;
  178. blkg->stats.start_group_wait_time = sched_clock();
  179. blkio_mark_blkg_waiting(&blkg->stats);
  180. }
  181. /* This should be called with the blkg->stats_lock held. */
  182. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  183. {
  184. unsigned long long now;
  185. if (!blkio_blkg_waiting(stats))
  186. return;
  187. now = sched_clock();
  188. if (time_after64(now, stats->start_group_wait_time))
  189. stats->group_wait_time += now - stats->start_group_wait_time;
  190. blkio_clear_blkg_waiting(stats);
  191. }
  192. /* This should be called with the blkg->stats_lock held. */
  193. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  194. {
  195. unsigned long long now;
  196. if (!blkio_blkg_empty(stats))
  197. return;
  198. now = sched_clock();
  199. if (time_after64(now, stats->start_empty_time))
  200. stats->empty_time += now - stats->start_empty_time;
  201. blkio_clear_blkg_empty(stats);
  202. }
  203. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  204. {
  205. unsigned long flags;
  206. spin_lock_irqsave(&blkg->stats_lock, flags);
  207. BUG_ON(blkio_blkg_idling(&blkg->stats));
  208. blkg->stats.start_idle_time = sched_clock();
  209. blkio_mark_blkg_idling(&blkg->stats);
  210. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  211. }
  212. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  213. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  214. {
  215. unsigned long flags;
  216. unsigned long long now;
  217. struct blkio_group_stats *stats;
  218. spin_lock_irqsave(&blkg->stats_lock, flags);
  219. stats = &blkg->stats;
  220. if (blkio_blkg_idling(stats)) {
  221. now = sched_clock();
  222. if (time_after64(now, stats->start_idle_time))
  223. stats->idle_time += now - stats->start_idle_time;
  224. blkio_clear_blkg_idling(stats);
  225. }
  226. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  227. }
  228. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  229. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  230. {
  231. unsigned long flags;
  232. struct blkio_group_stats *stats;
  233. spin_lock_irqsave(&blkg->stats_lock, flags);
  234. stats = &blkg->stats;
  235. stats->avg_queue_size_sum +=
  236. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  237. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  238. stats->avg_queue_size_samples++;
  239. blkio_update_group_wait_time(stats);
  240. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  241. }
  242. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  243. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  244. {
  245. unsigned long flags;
  246. struct blkio_group_stats *stats;
  247. spin_lock_irqsave(&blkg->stats_lock, flags);
  248. stats = &blkg->stats;
  249. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  250. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  251. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  252. return;
  253. }
  254. /*
  255. * group is already marked empty. This can happen if cfqq got new
  256. * request in parent group and moved to this group while being added
  257. * to service tree. Just ignore the event and move on.
  258. */
  259. if(blkio_blkg_empty(stats)) {
  260. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  261. return;
  262. }
  263. stats->start_empty_time = sched_clock();
  264. blkio_mark_blkg_empty(stats);
  265. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  266. }
  267. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  268. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  269. unsigned long dequeue)
  270. {
  271. blkg->stats.dequeue += dequeue;
  272. }
  273. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  274. #else
  275. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  276. struct blkio_group *curr_blkg) {}
  277. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  278. #endif
  279. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  280. struct blkio_group *curr_blkg, bool direction,
  281. bool sync)
  282. {
  283. unsigned long flags;
  284. spin_lock_irqsave(&blkg->stats_lock, flags);
  285. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  286. sync);
  287. blkio_end_empty_time(&blkg->stats);
  288. blkio_set_start_group_wait_time(blkg, curr_blkg);
  289. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  290. }
  291. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  292. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  293. bool direction, bool sync)
  294. {
  295. unsigned long flags;
  296. spin_lock_irqsave(&blkg->stats_lock, flags);
  297. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  298. direction, sync);
  299. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  300. }
  301. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  302. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  303. unsigned long unaccounted_time)
  304. {
  305. unsigned long flags;
  306. spin_lock_irqsave(&blkg->stats_lock, flags);
  307. blkg->stats.time += time;
  308. #ifdef CONFIG_DEBUG_BLK_CGROUP
  309. blkg->stats.unaccounted_time += unaccounted_time;
  310. #endif
  311. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  312. }
  313. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  314. /*
  315. * should be called under rcu read lock or queue lock to make sure blkg pointer
  316. * is valid.
  317. */
  318. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  319. uint64_t bytes, bool direction, bool sync)
  320. {
  321. struct blkio_group_stats_cpu *stats_cpu;
  322. unsigned long flags;
  323. /*
  324. * Disabling interrupts to provide mutual exclusion between two
  325. * writes on same cpu. It probably is not needed for 64bit. Not
  326. * optimizing that case yet.
  327. */
  328. local_irq_save(flags);
  329. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  330. u64_stats_update_begin(&stats_cpu->syncp);
  331. stats_cpu->sectors += bytes >> 9;
  332. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  333. 1, direction, sync);
  334. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  335. bytes, direction, sync);
  336. u64_stats_update_end(&stats_cpu->syncp);
  337. local_irq_restore(flags);
  338. }
  339. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  340. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  341. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  342. {
  343. struct blkio_group_stats *stats;
  344. unsigned long flags;
  345. unsigned long long now = sched_clock();
  346. spin_lock_irqsave(&blkg->stats_lock, flags);
  347. stats = &blkg->stats;
  348. if (time_after64(now, io_start_time))
  349. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  350. now - io_start_time, direction, sync);
  351. if (time_after64(io_start_time, start_time))
  352. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  353. io_start_time - start_time, direction, sync);
  354. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  355. }
  356. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  357. /* Merged stats are per cpu. */
  358. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  359. bool sync)
  360. {
  361. struct blkio_group_stats_cpu *stats_cpu;
  362. unsigned long flags;
  363. /*
  364. * Disabling interrupts to provide mutual exclusion between two
  365. * writes on same cpu. It probably is not needed for 64bit. Not
  366. * optimizing that case yet.
  367. */
  368. local_irq_save(flags);
  369. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  370. u64_stats_update_begin(&stats_cpu->syncp);
  371. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  372. direction, sync);
  373. u64_stats_update_end(&stats_cpu->syncp);
  374. local_irq_restore(flags);
  375. }
  376. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  377. /*
  378. * This function allocates the per cpu stats for blkio_group. Should be called
  379. * from sleepable context as alloc_per_cpu() requires that.
  380. */
  381. int blkio_alloc_blkg_stats(struct blkio_group *blkg)
  382. {
  383. /* Allocate memory for per cpu stats */
  384. blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  385. if (!blkg->stats_cpu)
  386. return -ENOMEM;
  387. return 0;
  388. }
  389. EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
  390. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  391. struct blkio_group *blkg, void *key, dev_t dev,
  392. enum blkio_policy_id plid)
  393. {
  394. unsigned long flags;
  395. spin_lock_irqsave(&blkcg->lock, flags);
  396. spin_lock_init(&blkg->stats_lock);
  397. rcu_assign_pointer(blkg->key, key);
  398. blkg->blkcg_id = css_id(&blkcg->css);
  399. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  400. blkg->plid = plid;
  401. spin_unlock_irqrestore(&blkcg->lock, flags);
  402. /* Need to take css reference ? */
  403. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  404. blkg->dev = dev;
  405. }
  406. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  407. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  408. {
  409. hlist_del_init_rcu(&blkg->blkcg_node);
  410. blkg->blkcg_id = 0;
  411. }
  412. /*
  413. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  414. * indicating that blk_group was unhashed by the time we got to it.
  415. */
  416. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  417. {
  418. struct blkio_cgroup *blkcg;
  419. unsigned long flags;
  420. struct cgroup_subsys_state *css;
  421. int ret = 1;
  422. rcu_read_lock();
  423. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  424. if (css) {
  425. blkcg = container_of(css, struct blkio_cgroup, css);
  426. spin_lock_irqsave(&blkcg->lock, flags);
  427. if (!hlist_unhashed(&blkg->blkcg_node)) {
  428. __blkiocg_del_blkio_group(blkg);
  429. ret = 0;
  430. }
  431. spin_unlock_irqrestore(&blkcg->lock, flags);
  432. }
  433. rcu_read_unlock();
  434. return ret;
  435. }
  436. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  437. /* called under rcu_read_lock(). */
  438. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  439. {
  440. struct blkio_group *blkg;
  441. struct hlist_node *n;
  442. void *__key;
  443. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  444. __key = blkg->key;
  445. if (__key == key)
  446. return blkg;
  447. }
  448. return NULL;
  449. }
  450. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  451. static void blkio_reset_stats_cpu(struct blkio_group *blkg)
  452. {
  453. struct blkio_group_stats_cpu *stats_cpu;
  454. int i, j, k;
  455. /*
  456. * Note: On 64 bit arch this should not be an issue. This has the
  457. * possibility of returning some inconsistent value on 32bit arch
  458. * as 64bit update on 32bit is non atomic. Taking care of this
  459. * corner case makes code very complicated, like sending IPIs to
  460. * cpus, taking care of stats of offline cpus etc.
  461. *
  462. * reset stats is anyway more of a debug feature and this sounds a
  463. * corner case. So I am not complicating the code yet until and
  464. * unless this becomes a real issue.
  465. */
  466. for_each_possible_cpu(i) {
  467. stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
  468. stats_cpu->sectors = 0;
  469. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  470. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  471. stats_cpu->stat_arr_cpu[j][k] = 0;
  472. }
  473. }
  474. static int
  475. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  476. {
  477. struct blkio_cgroup *blkcg;
  478. struct blkio_group *blkg;
  479. struct blkio_group_stats *stats;
  480. struct hlist_node *n;
  481. uint64_t queued[BLKIO_STAT_TOTAL];
  482. int i;
  483. #ifdef CONFIG_DEBUG_BLK_CGROUP
  484. bool idling, waiting, empty;
  485. unsigned long long now = sched_clock();
  486. #endif
  487. blkcg = cgroup_to_blkio_cgroup(cgroup);
  488. spin_lock_irq(&blkcg->lock);
  489. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  490. spin_lock(&blkg->stats_lock);
  491. stats = &blkg->stats;
  492. #ifdef CONFIG_DEBUG_BLK_CGROUP
  493. idling = blkio_blkg_idling(stats);
  494. waiting = blkio_blkg_waiting(stats);
  495. empty = blkio_blkg_empty(stats);
  496. #endif
  497. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  498. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  499. memset(stats, 0, sizeof(struct blkio_group_stats));
  500. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  501. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  502. #ifdef CONFIG_DEBUG_BLK_CGROUP
  503. if (idling) {
  504. blkio_mark_blkg_idling(stats);
  505. stats->start_idle_time = now;
  506. }
  507. if (waiting) {
  508. blkio_mark_blkg_waiting(stats);
  509. stats->start_group_wait_time = now;
  510. }
  511. if (empty) {
  512. blkio_mark_blkg_empty(stats);
  513. stats->start_empty_time = now;
  514. }
  515. #endif
  516. spin_unlock(&blkg->stats_lock);
  517. /* Reset Per cpu stats which don't take blkg->stats_lock */
  518. blkio_reset_stats_cpu(blkg);
  519. }
  520. spin_unlock_irq(&blkcg->lock);
  521. return 0;
  522. }
  523. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  524. int chars_left, bool diskname_only)
  525. {
  526. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  527. chars_left -= strlen(str);
  528. if (chars_left <= 0) {
  529. printk(KERN_WARNING
  530. "Possibly incorrect cgroup stat display format");
  531. return;
  532. }
  533. if (diskname_only)
  534. return;
  535. switch (type) {
  536. case BLKIO_STAT_READ:
  537. strlcat(str, " Read", chars_left);
  538. break;
  539. case BLKIO_STAT_WRITE:
  540. strlcat(str, " Write", chars_left);
  541. break;
  542. case BLKIO_STAT_SYNC:
  543. strlcat(str, " Sync", chars_left);
  544. break;
  545. case BLKIO_STAT_ASYNC:
  546. strlcat(str, " Async", chars_left);
  547. break;
  548. case BLKIO_STAT_TOTAL:
  549. strlcat(str, " Total", chars_left);
  550. break;
  551. default:
  552. strlcat(str, " Invalid", chars_left);
  553. }
  554. }
  555. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  556. struct cgroup_map_cb *cb, dev_t dev)
  557. {
  558. blkio_get_key_name(0, dev, str, chars_left, true);
  559. cb->fill(cb, str, val);
  560. return val;
  561. }
  562. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
  563. enum stat_type_cpu type, enum stat_sub_type sub_type)
  564. {
  565. int cpu;
  566. struct blkio_group_stats_cpu *stats_cpu;
  567. u64 val = 0, tval;
  568. for_each_possible_cpu(cpu) {
  569. unsigned int start;
  570. stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
  571. do {
  572. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  573. if (type == BLKIO_STAT_CPU_SECTORS)
  574. tval = stats_cpu->sectors;
  575. else
  576. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  577. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  578. val += tval;
  579. }
  580. return val;
  581. }
  582. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
  583. struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
  584. {
  585. uint64_t disk_total, val;
  586. char key_str[MAX_KEY_LEN];
  587. enum stat_sub_type sub_type;
  588. if (type == BLKIO_STAT_CPU_SECTORS) {
  589. val = blkio_read_stat_cpu(blkg, type, 0);
  590. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
  591. }
  592. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  593. sub_type++) {
  594. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  595. val = blkio_read_stat_cpu(blkg, type, sub_type);
  596. cb->fill(cb, key_str, val);
  597. }
  598. disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
  599. blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
  600. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  601. cb->fill(cb, key_str, disk_total);
  602. return disk_total;
  603. }
  604. /* This should be called with blkg->stats_lock held */
  605. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  606. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  607. {
  608. uint64_t disk_total;
  609. char key_str[MAX_KEY_LEN];
  610. enum stat_sub_type sub_type;
  611. if (type == BLKIO_STAT_TIME)
  612. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  613. blkg->stats.time, cb, dev);
  614. #ifdef CONFIG_DEBUG_BLK_CGROUP
  615. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  616. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  617. blkg->stats.unaccounted_time, cb, dev);
  618. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  619. uint64_t sum = blkg->stats.avg_queue_size_sum;
  620. uint64_t samples = blkg->stats.avg_queue_size_samples;
  621. if (samples)
  622. do_div(sum, samples);
  623. else
  624. sum = 0;
  625. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  626. }
  627. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  628. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  629. blkg->stats.group_wait_time, cb, dev);
  630. if (type == BLKIO_STAT_IDLE_TIME)
  631. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  632. blkg->stats.idle_time, cb, dev);
  633. if (type == BLKIO_STAT_EMPTY_TIME)
  634. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  635. blkg->stats.empty_time, cb, dev);
  636. if (type == BLKIO_STAT_DEQUEUE)
  637. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  638. blkg->stats.dequeue, cb, dev);
  639. #endif
  640. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  641. sub_type++) {
  642. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  643. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  644. }
  645. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  646. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  647. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  648. cb->fill(cb, key_str, disk_total);
  649. return disk_total;
  650. }
  651. static int blkio_policy_parse_and_set(char *buf,
  652. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  653. {
  654. struct gendisk *disk = NULL;
  655. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  656. unsigned long major, minor;
  657. int i = 0, ret = -EINVAL;
  658. int part;
  659. dev_t dev;
  660. u64 temp;
  661. memset(s, 0, sizeof(s));
  662. while ((p = strsep(&buf, " ")) != NULL) {
  663. if (!*p)
  664. continue;
  665. s[i++] = p;
  666. /* Prevent from inputing too many things */
  667. if (i == 3)
  668. break;
  669. }
  670. if (i != 2)
  671. goto out;
  672. p = strsep(&s[0], ":");
  673. if (p != NULL)
  674. major_s = p;
  675. else
  676. goto out;
  677. minor_s = s[0];
  678. if (!minor_s)
  679. goto out;
  680. if (strict_strtoul(major_s, 10, &major))
  681. goto out;
  682. if (strict_strtoul(minor_s, 10, &minor))
  683. goto out;
  684. dev = MKDEV(major, minor);
  685. if (strict_strtoull(s[1], 10, &temp))
  686. goto out;
  687. /* For rule removal, do not check for device presence. */
  688. if (temp) {
  689. disk = get_gendisk(dev, &part);
  690. if (!disk || part) {
  691. ret = -ENODEV;
  692. goto out;
  693. }
  694. }
  695. newpn->dev = dev;
  696. switch (plid) {
  697. case BLKIO_POLICY_PROP:
  698. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  699. temp > BLKIO_WEIGHT_MAX)
  700. goto out;
  701. newpn->plid = plid;
  702. newpn->fileid = fileid;
  703. newpn->val.weight = temp;
  704. break;
  705. case BLKIO_POLICY_THROTL:
  706. switch(fileid) {
  707. case BLKIO_THROTL_read_bps_device:
  708. case BLKIO_THROTL_write_bps_device:
  709. newpn->plid = plid;
  710. newpn->fileid = fileid;
  711. newpn->val.bps = temp;
  712. break;
  713. case BLKIO_THROTL_read_iops_device:
  714. case BLKIO_THROTL_write_iops_device:
  715. if (temp > THROTL_IOPS_MAX)
  716. goto out;
  717. newpn->plid = plid;
  718. newpn->fileid = fileid;
  719. newpn->val.iops = (unsigned int)temp;
  720. break;
  721. }
  722. break;
  723. default:
  724. BUG();
  725. }
  726. ret = 0;
  727. out:
  728. put_disk(disk);
  729. return ret;
  730. }
  731. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  732. dev_t dev)
  733. {
  734. struct blkio_policy_node *pn;
  735. unsigned long flags;
  736. unsigned int weight;
  737. spin_lock_irqsave(&blkcg->lock, flags);
  738. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  739. BLKIO_PROP_weight_device);
  740. if (pn)
  741. weight = pn->val.weight;
  742. else
  743. weight = blkcg->weight;
  744. spin_unlock_irqrestore(&blkcg->lock, flags);
  745. return weight;
  746. }
  747. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  748. uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
  749. {
  750. struct blkio_policy_node *pn;
  751. unsigned long flags;
  752. uint64_t bps = -1;
  753. spin_lock_irqsave(&blkcg->lock, flags);
  754. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  755. BLKIO_THROTL_read_bps_device);
  756. if (pn)
  757. bps = pn->val.bps;
  758. spin_unlock_irqrestore(&blkcg->lock, flags);
  759. return bps;
  760. }
  761. uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
  762. {
  763. struct blkio_policy_node *pn;
  764. unsigned long flags;
  765. uint64_t bps = -1;
  766. spin_lock_irqsave(&blkcg->lock, flags);
  767. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  768. BLKIO_THROTL_write_bps_device);
  769. if (pn)
  770. bps = pn->val.bps;
  771. spin_unlock_irqrestore(&blkcg->lock, flags);
  772. return bps;
  773. }
  774. unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
  775. {
  776. struct blkio_policy_node *pn;
  777. unsigned long flags;
  778. unsigned int iops = -1;
  779. spin_lock_irqsave(&blkcg->lock, flags);
  780. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  781. BLKIO_THROTL_read_iops_device);
  782. if (pn)
  783. iops = pn->val.iops;
  784. spin_unlock_irqrestore(&blkcg->lock, flags);
  785. return iops;
  786. }
  787. unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
  788. {
  789. struct blkio_policy_node *pn;
  790. unsigned long flags;
  791. unsigned int iops = -1;
  792. spin_lock_irqsave(&blkcg->lock, flags);
  793. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  794. BLKIO_THROTL_write_iops_device);
  795. if (pn)
  796. iops = pn->val.iops;
  797. spin_unlock_irqrestore(&blkcg->lock, flags);
  798. return iops;
  799. }
  800. /* Checks whether user asked for deleting a policy rule */
  801. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  802. {
  803. switch(pn->plid) {
  804. case BLKIO_POLICY_PROP:
  805. if (pn->val.weight == 0)
  806. return 1;
  807. break;
  808. case BLKIO_POLICY_THROTL:
  809. switch(pn->fileid) {
  810. case BLKIO_THROTL_read_bps_device:
  811. case BLKIO_THROTL_write_bps_device:
  812. if (pn->val.bps == 0)
  813. return 1;
  814. break;
  815. case BLKIO_THROTL_read_iops_device:
  816. case BLKIO_THROTL_write_iops_device:
  817. if (pn->val.iops == 0)
  818. return 1;
  819. }
  820. break;
  821. default:
  822. BUG();
  823. }
  824. return 0;
  825. }
  826. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  827. struct blkio_policy_node *newpn)
  828. {
  829. switch(oldpn->plid) {
  830. case BLKIO_POLICY_PROP:
  831. oldpn->val.weight = newpn->val.weight;
  832. break;
  833. case BLKIO_POLICY_THROTL:
  834. switch(newpn->fileid) {
  835. case BLKIO_THROTL_read_bps_device:
  836. case BLKIO_THROTL_write_bps_device:
  837. oldpn->val.bps = newpn->val.bps;
  838. break;
  839. case BLKIO_THROTL_read_iops_device:
  840. case BLKIO_THROTL_write_iops_device:
  841. oldpn->val.iops = newpn->val.iops;
  842. }
  843. break;
  844. default:
  845. BUG();
  846. }
  847. }
  848. /*
  849. * Some rules/values in blkg have changed. Propagate those to respective
  850. * policies.
  851. */
  852. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  853. struct blkio_group *blkg, struct blkio_policy_node *pn)
  854. {
  855. unsigned int weight, iops;
  856. u64 bps;
  857. switch(pn->plid) {
  858. case BLKIO_POLICY_PROP:
  859. weight = pn->val.weight ? pn->val.weight :
  860. blkcg->weight;
  861. blkio_update_group_weight(blkg, weight);
  862. break;
  863. case BLKIO_POLICY_THROTL:
  864. switch(pn->fileid) {
  865. case BLKIO_THROTL_read_bps_device:
  866. case BLKIO_THROTL_write_bps_device:
  867. bps = pn->val.bps ? pn->val.bps : (-1);
  868. blkio_update_group_bps(blkg, bps, pn->fileid);
  869. break;
  870. case BLKIO_THROTL_read_iops_device:
  871. case BLKIO_THROTL_write_iops_device:
  872. iops = pn->val.iops ? pn->val.iops : (-1);
  873. blkio_update_group_iops(blkg, iops, pn->fileid);
  874. break;
  875. }
  876. break;
  877. default:
  878. BUG();
  879. }
  880. }
  881. /*
  882. * A policy node rule has been updated. Propagate this update to all the
  883. * block groups which might be affected by this update.
  884. */
  885. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  886. struct blkio_policy_node *pn)
  887. {
  888. struct blkio_group *blkg;
  889. struct hlist_node *n;
  890. spin_lock(&blkio_list_lock);
  891. spin_lock_irq(&blkcg->lock);
  892. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  893. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  894. continue;
  895. blkio_update_blkg_policy(blkcg, blkg, pn);
  896. }
  897. spin_unlock_irq(&blkcg->lock);
  898. spin_unlock(&blkio_list_lock);
  899. }
  900. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  901. const char *buffer)
  902. {
  903. int ret = 0;
  904. char *buf;
  905. struct blkio_policy_node *newpn, *pn;
  906. struct blkio_cgroup *blkcg;
  907. int keep_newpn = 0;
  908. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  909. int fileid = BLKIOFILE_ATTR(cft->private);
  910. buf = kstrdup(buffer, GFP_KERNEL);
  911. if (!buf)
  912. return -ENOMEM;
  913. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  914. if (!newpn) {
  915. ret = -ENOMEM;
  916. goto free_buf;
  917. }
  918. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  919. if (ret)
  920. goto free_newpn;
  921. blkcg = cgroup_to_blkio_cgroup(cgrp);
  922. spin_lock_irq(&blkcg->lock);
  923. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  924. if (!pn) {
  925. if (!blkio_delete_rule_command(newpn)) {
  926. blkio_policy_insert_node(blkcg, newpn);
  927. keep_newpn = 1;
  928. }
  929. spin_unlock_irq(&blkcg->lock);
  930. goto update_io_group;
  931. }
  932. if (blkio_delete_rule_command(newpn)) {
  933. blkio_policy_delete_node(pn);
  934. kfree(pn);
  935. spin_unlock_irq(&blkcg->lock);
  936. goto update_io_group;
  937. }
  938. spin_unlock_irq(&blkcg->lock);
  939. blkio_update_policy_rule(pn, newpn);
  940. update_io_group:
  941. blkio_update_policy_node_blkg(blkcg, newpn);
  942. free_newpn:
  943. if (!keep_newpn)
  944. kfree(newpn);
  945. free_buf:
  946. kfree(buf);
  947. return ret;
  948. }
  949. static void
  950. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  951. {
  952. switch(pn->plid) {
  953. case BLKIO_POLICY_PROP:
  954. if (pn->fileid == BLKIO_PROP_weight_device)
  955. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  956. MINOR(pn->dev), pn->val.weight);
  957. break;
  958. case BLKIO_POLICY_THROTL:
  959. switch(pn->fileid) {
  960. case BLKIO_THROTL_read_bps_device:
  961. case BLKIO_THROTL_write_bps_device:
  962. seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
  963. MINOR(pn->dev), pn->val.bps);
  964. break;
  965. case BLKIO_THROTL_read_iops_device:
  966. case BLKIO_THROTL_write_iops_device:
  967. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  968. MINOR(pn->dev), pn->val.iops);
  969. break;
  970. }
  971. break;
  972. default:
  973. BUG();
  974. }
  975. }
  976. /* cgroup files which read their data from policy nodes end up here */
  977. static void blkio_read_policy_node_files(struct cftype *cft,
  978. struct blkio_cgroup *blkcg, struct seq_file *m)
  979. {
  980. struct blkio_policy_node *pn;
  981. if (!list_empty(&blkcg->policy_list)) {
  982. spin_lock_irq(&blkcg->lock);
  983. list_for_each_entry(pn, &blkcg->policy_list, node) {
  984. if (!pn_matches_cftype(cft, pn))
  985. continue;
  986. blkio_print_policy_node(m, pn);
  987. }
  988. spin_unlock_irq(&blkcg->lock);
  989. }
  990. }
  991. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  992. struct seq_file *m)
  993. {
  994. struct blkio_cgroup *blkcg;
  995. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  996. int name = BLKIOFILE_ATTR(cft->private);
  997. blkcg = cgroup_to_blkio_cgroup(cgrp);
  998. switch(plid) {
  999. case BLKIO_POLICY_PROP:
  1000. switch(name) {
  1001. case BLKIO_PROP_weight_device:
  1002. blkio_read_policy_node_files(cft, blkcg, m);
  1003. return 0;
  1004. default:
  1005. BUG();
  1006. }
  1007. break;
  1008. case BLKIO_POLICY_THROTL:
  1009. switch(name){
  1010. case BLKIO_THROTL_read_bps_device:
  1011. case BLKIO_THROTL_write_bps_device:
  1012. case BLKIO_THROTL_read_iops_device:
  1013. case BLKIO_THROTL_write_iops_device:
  1014. blkio_read_policy_node_files(cft, blkcg, m);
  1015. return 0;
  1016. default:
  1017. BUG();
  1018. }
  1019. break;
  1020. default:
  1021. BUG();
  1022. }
  1023. return 0;
  1024. }
  1025. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1026. struct cftype *cft, struct cgroup_map_cb *cb,
  1027. enum stat_type type, bool show_total, bool pcpu)
  1028. {
  1029. struct blkio_group *blkg;
  1030. struct hlist_node *n;
  1031. uint64_t cgroup_total = 0;
  1032. rcu_read_lock();
  1033. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1034. if (blkg->dev) {
  1035. if (!cftype_blkg_same_policy(cft, blkg))
  1036. continue;
  1037. if (pcpu)
  1038. cgroup_total += blkio_get_stat_cpu(blkg, cb,
  1039. blkg->dev, type);
  1040. else {
  1041. spin_lock_irq(&blkg->stats_lock);
  1042. cgroup_total += blkio_get_stat(blkg, cb,
  1043. blkg->dev, type);
  1044. spin_unlock_irq(&blkg->stats_lock);
  1045. }
  1046. }
  1047. }
  1048. if (show_total)
  1049. cb->fill(cb, "Total", cgroup_total);
  1050. rcu_read_unlock();
  1051. return 0;
  1052. }
  1053. /* All map kind of cgroup file get serviced by this function */
  1054. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1055. struct cgroup_map_cb *cb)
  1056. {
  1057. struct blkio_cgroup *blkcg;
  1058. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1059. int name = BLKIOFILE_ATTR(cft->private);
  1060. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1061. switch(plid) {
  1062. case BLKIO_POLICY_PROP:
  1063. switch(name) {
  1064. case BLKIO_PROP_time:
  1065. return blkio_read_blkg_stats(blkcg, cft, cb,
  1066. BLKIO_STAT_TIME, 0, 0);
  1067. case BLKIO_PROP_sectors:
  1068. return blkio_read_blkg_stats(blkcg, cft, cb,
  1069. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1070. case BLKIO_PROP_io_service_bytes:
  1071. return blkio_read_blkg_stats(blkcg, cft, cb,
  1072. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1073. case BLKIO_PROP_io_serviced:
  1074. return blkio_read_blkg_stats(blkcg, cft, cb,
  1075. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1076. case BLKIO_PROP_io_service_time:
  1077. return blkio_read_blkg_stats(blkcg, cft, cb,
  1078. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1079. case BLKIO_PROP_io_wait_time:
  1080. return blkio_read_blkg_stats(blkcg, cft, cb,
  1081. BLKIO_STAT_WAIT_TIME, 1, 0);
  1082. case BLKIO_PROP_io_merged:
  1083. return blkio_read_blkg_stats(blkcg, cft, cb,
  1084. BLKIO_STAT_CPU_MERGED, 1, 1);
  1085. case BLKIO_PROP_io_queued:
  1086. return blkio_read_blkg_stats(blkcg, cft, cb,
  1087. BLKIO_STAT_QUEUED, 1, 0);
  1088. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1089. case BLKIO_PROP_unaccounted_time:
  1090. return blkio_read_blkg_stats(blkcg, cft, cb,
  1091. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1092. case BLKIO_PROP_dequeue:
  1093. return blkio_read_blkg_stats(blkcg, cft, cb,
  1094. BLKIO_STAT_DEQUEUE, 0, 0);
  1095. case BLKIO_PROP_avg_queue_size:
  1096. return blkio_read_blkg_stats(blkcg, cft, cb,
  1097. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1098. case BLKIO_PROP_group_wait_time:
  1099. return blkio_read_blkg_stats(blkcg, cft, cb,
  1100. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1101. case BLKIO_PROP_idle_time:
  1102. return blkio_read_blkg_stats(blkcg, cft, cb,
  1103. BLKIO_STAT_IDLE_TIME, 0, 0);
  1104. case BLKIO_PROP_empty_time:
  1105. return blkio_read_blkg_stats(blkcg, cft, cb,
  1106. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1107. #endif
  1108. default:
  1109. BUG();
  1110. }
  1111. break;
  1112. case BLKIO_POLICY_THROTL:
  1113. switch(name){
  1114. case BLKIO_THROTL_io_service_bytes:
  1115. return blkio_read_blkg_stats(blkcg, cft, cb,
  1116. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1117. case BLKIO_THROTL_io_serviced:
  1118. return blkio_read_blkg_stats(blkcg, cft, cb,
  1119. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1120. default:
  1121. BUG();
  1122. }
  1123. break;
  1124. default:
  1125. BUG();
  1126. }
  1127. return 0;
  1128. }
  1129. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  1130. {
  1131. struct blkio_group *blkg;
  1132. struct hlist_node *n;
  1133. struct blkio_policy_node *pn;
  1134. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1135. return -EINVAL;
  1136. spin_lock(&blkio_list_lock);
  1137. spin_lock_irq(&blkcg->lock);
  1138. blkcg->weight = (unsigned int)val;
  1139. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1140. pn = blkio_policy_search_node(blkcg, blkg->dev,
  1141. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  1142. if (pn)
  1143. continue;
  1144. blkio_update_group_weight(blkg, blkcg->weight);
  1145. }
  1146. spin_unlock_irq(&blkcg->lock);
  1147. spin_unlock(&blkio_list_lock);
  1148. return 0;
  1149. }
  1150. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1151. struct blkio_cgroup *blkcg;
  1152. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1153. int name = BLKIOFILE_ATTR(cft->private);
  1154. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1155. switch(plid) {
  1156. case BLKIO_POLICY_PROP:
  1157. switch(name) {
  1158. case BLKIO_PROP_weight:
  1159. return (u64)blkcg->weight;
  1160. }
  1161. break;
  1162. default:
  1163. BUG();
  1164. }
  1165. return 0;
  1166. }
  1167. static int
  1168. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1169. {
  1170. struct blkio_cgroup *blkcg;
  1171. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1172. int name = BLKIOFILE_ATTR(cft->private);
  1173. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1174. switch(plid) {
  1175. case BLKIO_POLICY_PROP:
  1176. switch(name) {
  1177. case BLKIO_PROP_weight:
  1178. return blkio_weight_write(blkcg, val);
  1179. }
  1180. break;
  1181. default:
  1182. BUG();
  1183. }
  1184. return 0;
  1185. }
  1186. struct cftype blkio_files[] = {
  1187. {
  1188. .name = "weight_device",
  1189. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1190. BLKIO_PROP_weight_device),
  1191. .read_seq_string = blkiocg_file_read,
  1192. .write_string = blkiocg_file_write,
  1193. .max_write_len = 256,
  1194. },
  1195. {
  1196. .name = "weight",
  1197. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1198. BLKIO_PROP_weight),
  1199. .read_u64 = blkiocg_file_read_u64,
  1200. .write_u64 = blkiocg_file_write_u64,
  1201. },
  1202. {
  1203. .name = "time",
  1204. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1205. BLKIO_PROP_time),
  1206. .read_map = blkiocg_file_read_map,
  1207. },
  1208. {
  1209. .name = "sectors",
  1210. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1211. BLKIO_PROP_sectors),
  1212. .read_map = blkiocg_file_read_map,
  1213. },
  1214. {
  1215. .name = "io_service_bytes",
  1216. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1217. BLKIO_PROP_io_service_bytes),
  1218. .read_map = blkiocg_file_read_map,
  1219. },
  1220. {
  1221. .name = "io_serviced",
  1222. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1223. BLKIO_PROP_io_serviced),
  1224. .read_map = blkiocg_file_read_map,
  1225. },
  1226. {
  1227. .name = "io_service_time",
  1228. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1229. BLKIO_PROP_io_service_time),
  1230. .read_map = blkiocg_file_read_map,
  1231. },
  1232. {
  1233. .name = "io_wait_time",
  1234. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1235. BLKIO_PROP_io_wait_time),
  1236. .read_map = blkiocg_file_read_map,
  1237. },
  1238. {
  1239. .name = "io_merged",
  1240. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1241. BLKIO_PROP_io_merged),
  1242. .read_map = blkiocg_file_read_map,
  1243. },
  1244. {
  1245. .name = "io_queued",
  1246. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1247. BLKIO_PROP_io_queued),
  1248. .read_map = blkiocg_file_read_map,
  1249. },
  1250. {
  1251. .name = "reset_stats",
  1252. .write_u64 = blkiocg_reset_stats,
  1253. },
  1254. #ifdef CONFIG_BLK_DEV_THROTTLING
  1255. {
  1256. .name = "throttle.read_bps_device",
  1257. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1258. BLKIO_THROTL_read_bps_device),
  1259. .read_seq_string = blkiocg_file_read,
  1260. .write_string = blkiocg_file_write,
  1261. .max_write_len = 256,
  1262. },
  1263. {
  1264. .name = "throttle.write_bps_device",
  1265. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1266. BLKIO_THROTL_write_bps_device),
  1267. .read_seq_string = blkiocg_file_read,
  1268. .write_string = blkiocg_file_write,
  1269. .max_write_len = 256,
  1270. },
  1271. {
  1272. .name = "throttle.read_iops_device",
  1273. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1274. BLKIO_THROTL_read_iops_device),
  1275. .read_seq_string = blkiocg_file_read,
  1276. .write_string = blkiocg_file_write,
  1277. .max_write_len = 256,
  1278. },
  1279. {
  1280. .name = "throttle.write_iops_device",
  1281. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1282. BLKIO_THROTL_write_iops_device),
  1283. .read_seq_string = blkiocg_file_read,
  1284. .write_string = blkiocg_file_write,
  1285. .max_write_len = 256,
  1286. },
  1287. {
  1288. .name = "throttle.io_service_bytes",
  1289. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1290. BLKIO_THROTL_io_service_bytes),
  1291. .read_map = blkiocg_file_read_map,
  1292. },
  1293. {
  1294. .name = "throttle.io_serviced",
  1295. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1296. BLKIO_THROTL_io_serviced),
  1297. .read_map = blkiocg_file_read_map,
  1298. },
  1299. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1300. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1301. {
  1302. .name = "avg_queue_size",
  1303. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1304. BLKIO_PROP_avg_queue_size),
  1305. .read_map = blkiocg_file_read_map,
  1306. },
  1307. {
  1308. .name = "group_wait_time",
  1309. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1310. BLKIO_PROP_group_wait_time),
  1311. .read_map = blkiocg_file_read_map,
  1312. },
  1313. {
  1314. .name = "idle_time",
  1315. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1316. BLKIO_PROP_idle_time),
  1317. .read_map = blkiocg_file_read_map,
  1318. },
  1319. {
  1320. .name = "empty_time",
  1321. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1322. BLKIO_PROP_empty_time),
  1323. .read_map = blkiocg_file_read_map,
  1324. },
  1325. {
  1326. .name = "dequeue",
  1327. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1328. BLKIO_PROP_dequeue),
  1329. .read_map = blkiocg_file_read_map,
  1330. },
  1331. {
  1332. .name = "unaccounted_time",
  1333. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1334. BLKIO_PROP_unaccounted_time),
  1335. .read_map = blkiocg_file_read_map,
  1336. },
  1337. #endif
  1338. { } /* terminate */
  1339. };
  1340. static void blkiocg_destroy(struct cgroup *cgroup)
  1341. {
  1342. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1343. unsigned long flags;
  1344. struct blkio_group *blkg;
  1345. void *key;
  1346. struct blkio_policy_type *blkiop;
  1347. struct blkio_policy_node *pn, *pntmp;
  1348. rcu_read_lock();
  1349. do {
  1350. spin_lock_irqsave(&blkcg->lock, flags);
  1351. if (hlist_empty(&blkcg->blkg_list)) {
  1352. spin_unlock_irqrestore(&blkcg->lock, flags);
  1353. break;
  1354. }
  1355. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1356. blkcg_node);
  1357. key = rcu_dereference(blkg->key);
  1358. __blkiocg_del_blkio_group(blkg);
  1359. spin_unlock_irqrestore(&blkcg->lock, flags);
  1360. /*
  1361. * This blkio_group is being unlinked as associated cgroup is
  1362. * going away. Let all the IO controlling policies know about
  1363. * this event.
  1364. */
  1365. spin_lock(&blkio_list_lock);
  1366. list_for_each_entry(blkiop, &blkio_list, list) {
  1367. if (blkiop->plid != blkg->plid)
  1368. continue;
  1369. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1370. }
  1371. spin_unlock(&blkio_list_lock);
  1372. } while (1);
  1373. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1374. blkio_policy_delete_node(pn);
  1375. kfree(pn);
  1376. }
  1377. free_css_id(&blkio_subsys, &blkcg->css);
  1378. rcu_read_unlock();
  1379. if (blkcg != &blkio_root_cgroup)
  1380. kfree(blkcg);
  1381. }
  1382. static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
  1383. {
  1384. struct blkio_cgroup *blkcg;
  1385. struct cgroup *parent = cgroup->parent;
  1386. if (!parent) {
  1387. blkcg = &blkio_root_cgroup;
  1388. goto done;
  1389. }
  1390. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1391. if (!blkcg)
  1392. return ERR_PTR(-ENOMEM);
  1393. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1394. done:
  1395. spin_lock_init(&blkcg->lock);
  1396. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1397. INIT_LIST_HEAD(&blkcg->policy_list);
  1398. return &blkcg->css;
  1399. }
  1400. /*
  1401. * We cannot support shared io contexts, as we have no mean to support
  1402. * two tasks with the same ioc in two different groups without major rework
  1403. * of the main cic data structures. For now we allow a task to change
  1404. * its cgroup only if it's the only owner of its ioc.
  1405. */
  1406. static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  1407. {
  1408. struct task_struct *task;
  1409. struct io_context *ioc;
  1410. int ret = 0;
  1411. /* task_lock() is needed to avoid races with exit_io_context() */
  1412. cgroup_taskset_for_each(task, cgrp, tset) {
  1413. task_lock(task);
  1414. ioc = task->io_context;
  1415. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1416. ret = -EINVAL;
  1417. task_unlock(task);
  1418. if (ret)
  1419. break;
  1420. }
  1421. return ret;
  1422. }
  1423. static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  1424. {
  1425. struct task_struct *task;
  1426. struct io_context *ioc;
  1427. cgroup_taskset_for_each(task, cgrp, tset) {
  1428. /* we don't lose anything even if ioc allocation fails */
  1429. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1430. if (ioc) {
  1431. ioc_cgroup_changed(ioc);
  1432. put_io_context(ioc);
  1433. }
  1434. }
  1435. }
  1436. struct cgroup_subsys blkio_subsys = {
  1437. .name = "blkio",
  1438. .create = blkiocg_create,
  1439. .can_attach = blkiocg_can_attach,
  1440. .attach = blkiocg_attach,
  1441. .destroy = blkiocg_destroy,
  1442. #ifdef CONFIG_BLK_CGROUP
  1443. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  1444. .subsys_id = blkio_subsys_id,
  1445. #endif
  1446. .base_cftypes = blkio_files,
  1447. .use_id = 1,
  1448. .module = THIS_MODULE,
  1449. };
  1450. EXPORT_SYMBOL_GPL(blkio_subsys);
  1451. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1452. {
  1453. spin_lock(&blkio_list_lock);
  1454. list_add_tail(&blkiop->list, &blkio_list);
  1455. spin_unlock(&blkio_list_lock);
  1456. }
  1457. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1458. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1459. {
  1460. spin_lock(&blkio_list_lock);
  1461. list_del_init(&blkiop->list);
  1462. spin_unlock(&blkio_list_lock);
  1463. }
  1464. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  1465. static int __init init_cgroup_blkio(void)
  1466. {
  1467. return cgroup_load_subsys(&blkio_subsys);
  1468. }
  1469. static void __exit exit_cgroup_blkio(void)
  1470. {
  1471. cgroup_unload_subsys(&blkio_subsys);
  1472. }
  1473. module_init(init_cgroup_blkio);
  1474. module_exit(exit_cgroup_blkio);
  1475. MODULE_LICENSE("GPL");