blk-cgroup.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include "blk-cgroup.h"
  23. #define MAX_KEY_LEN 100
  24. static DEFINE_SPINLOCK(blkio_list_lock);
  25. static LIST_HEAD(blkio_list);
  26. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  27. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  28. static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
  29. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  30. struct cgroup *);
  31. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  32. struct cgroup_taskset *);
  33. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  34. struct cgroup_taskset *);
  35. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  36. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  37. /* for encoding cft->private value on file */
  38. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  39. /* What policy owns the file, proportional or throttle */
  40. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  41. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  42. struct cgroup_subsys blkio_subsys = {
  43. .name = "blkio",
  44. .create = blkiocg_create,
  45. .can_attach = blkiocg_can_attach,
  46. .attach = blkiocg_attach,
  47. .destroy = blkiocg_destroy,
  48. .populate = blkiocg_populate,
  49. .subsys_id = blkio_subsys_id,
  50. .use_id = 1,
  51. .module = THIS_MODULE,
  52. };
  53. EXPORT_SYMBOL_GPL(blkio_subsys);
  54. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  55. {
  56. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  57. struct blkio_cgroup, css);
  58. }
  59. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  60. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  61. {
  62. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  63. struct blkio_cgroup, css);
  64. }
  65. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  66. static inline void
  67. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  68. {
  69. struct blkio_policy_type *blkiop;
  70. list_for_each_entry(blkiop, &blkio_list, list) {
  71. /* If this policy does not own the blkg, do not send updates */
  72. if (blkiop->plid != blkg->plid)
  73. continue;
  74. if (blkiop->ops.blkio_update_group_weight_fn)
  75. blkiop->ops.blkio_update_group_weight_fn(blkg->q,
  76. blkg, weight);
  77. }
  78. }
  79. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  80. int fileid)
  81. {
  82. struct blkio_policy_type *blkiop;
  83. list_for_each_entry(blkiop, &blkio_list, list) {
  84. /* If this policy does not own the blkg, do not send updates */
  85. if (blkiop->plid != blkg->plid)
  86. continue;
  87. if (fileid == BLKIO_THROTL_read_bps_device
  88. && blkiop->ops.blkio_update_group_read_bps_fn)
  89. blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
  90. blkg, bps);
  91. if (fileid == BLKIO_THROTL_write_bps_device
  92. && blkiop->ops.blkio_update_group_write_bps_fn)
  93. blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
  94. blkg, bps);
  95. }
  96. }
  97. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  98. unsigned int iops, int fileid)
  99. {
  100. struct blkio_policy_type *blkiop;
  101. list_for_each_entry(blkiop, &blkio_list, list) {
  102. /* If this policy does not own the blkg, do not send updates */
  103. if (blkiop->plid != blkg->plid)
  104. continue;
  105. if (fileid == BLKIO_THROTL_read_iops_device
  106. && blkiop->ops.blkio_update_group_read_iops_fn)
  107. blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
  108. blkg, iops);
  109. if (fileid == BLKIO_THROTL_write_iops_device
  110. && blkiop->ops.blkio_update_group_write_iops_fn)
  111. blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
  112. blkg,iops);
  113. }
  114. }
  115. /*
  116. * Add to the appropriate stat variable depending on the request type.
  117. * This should be called with the blkg->stats_lock held.
  118. */
  119. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  120. bool sync)
  121. {
  122. if (direction)
  123. stat[BLKIO_STAT_WRITE] += add;
  124. else
  125. stat[BLKIO_STAT_READ] += add;
  126. if (sync)
  127. stat[BLKIO_STAT_SYNC] += add;
  128. else
  129. stat[BLKIO_STAT_ASYNC] += add;
  130. }
  131. /*
  132. * Decrements the appropriate stat variable if non-zero depending on the
  133. * request type. Panics on value being zero.
  134. * This should be called with the blkg->stats_lock held.
  135. */
  136. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  137. {
  138. if (direction) {
  139. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  140. stat[BLKIO_STAT_WRITE]--;
  141. } else {
  142. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  143. stat[BLKIO_STAT_READ]--;
  144. }
  145. if (sync) {
  146. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  147. stat[BLKIO_STAT_SYNC]--;
  148. } else {
  149. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  150. stat[BLKIO_STAT_ASYNC]--;
  151. }
  152. }
  153. #ifdef CONFIG_DEBUG_BLK_CGROUP
  154. /* This should be called with the blkg->stats_lock held. */
  155. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  156. struct blkio_group *curr_blkg)
  157. {
  158. if (blkio_blkg_waiting(&blkg->stats))
  159. return;
  160. if (blkg == curr_blkg)
  161. return;
  162. blkg->stats.start_group_wait_time = sched_clock();
  163. blkio_mark_blkg_waiting(&blkg->stats);
  164. }
  165. /* This should be called with the blkg->stats_lock held. */
  166. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  167. {
  168. unsigned long long now;
  169. if (!blkio_blkg_waiting(stats))
  170. return;
  171. now = sched_clock();
  172. if (time_after64(now, stats->start_group_wait_time))
  173. stats->group_wait_time += now - stats->start_group_wait_time;
  174. blkio_clear_blkg_waiting(stats);
  175. }
  176. /* This should be called with the blkg->stats_lock held. */
  177. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  178. {
  179. unsigned long long now;
  180. if (!blkio_blkg_empty(stats))
  181. return;
  182. now = sched_clock();
  183. if (time_after64(now, stats->start_empty_time))
  184. stats->empty_time += now - stats->start_empty_time;
  185. blkio_clear_blkg_empty(stats);
  186. }
  187. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  188. {
  189. unsigned long flags;
  190. spin_lock_irqsave(&blkg->stats_lock, flags);
  191. BUG_ON(blkio_blkg_idling(&blkg->stats));
  192. blkg->stats.start_idle_time = sched_clock();
  193. blkio_mark_blkg_idling(&blkg->stats);
  194. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  195. }
  196. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  197. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  198. {
  199. unsigned long flags;
  200. unsigned long long now;
  201. struct blkio_group_stats *stats;
  202. spin_lock_irqsave(&blkg->stats_lock, flags);
  203. stats = &blkg->stats;
  204. if (blkio_blkg_idling(stats)) {
  205. now = sched_clock();
  206. if (time_after64(now, stats->start_idle_time))
  207. stats->idle_time += now - stats->start_idle_time;
  208. blkio_clear_blkg_idling(stats);
  209. }
  210. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  211. }
  212. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  213. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  214. {
  215. unsigned long flags;
  216. struct blkio_group_stats *stats;
  217. spin_lock_irqsave(&blkg->stats_lock, flags);
  218. stats = &blkg->stats;
  219. stats->avg_queue_size_sum +=
  220. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  221. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  222. stats->avg_queue_size_samples++;
  223. blkio_update_group_wait_time(stats);
  224. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  225. }
  226. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  227. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  228. {
  229. unsigned long flags;
  230. struct blkio_group_stats *stats;
  231. spin_lock_irqsave(&blkg->stats_lock, flags);
  232. stats = &blkg->stats;
  233. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  234. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  235. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  236. return;
  237. }
  238. /*
  239. * group is already marked empty. This can happen if cfqq got new
  240. * request in parent group and moved to this group while being added
  241. * to service tree. Just ignore the event and move on.
  242. */
  243. if(blkio_blkg_empty(stats)) {
  244. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  245. return;
  246. }
  247. stats->start_empty_time = sched_clock();
  248. blkio_mark_blkg_empty(stats);
  249. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  250. }
  251. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  252. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  253. unsigned long dequeue)
  254. {
  255. blkg->stats.dequeue += dequeue;
  256. }
  257. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  258. #else
  259. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  260. struct blkio_group *curr_blkg) {}
  261. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  262. #endif
  263. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  264. struct blkio_group *curr_blkg, bool direction,
  265. bool sync)
  266. {
  267. unsigned long flags;
  268. spin_lock_irqsave(&blkg->stats_lock, flags);
  269. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  270. sync);
  271. blkio_end_empty_time(&blkg->stats);
  272. blkio_set_start_group_wait_time(blkg, curr_blkg);
  273. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  274. }
  275. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  276. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  277. bool direction, bool sync)
  278. {
  279. unsigned long flags;
  280. spin_lock_irqsave(&blkg->stats_lock, flags);
  281. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  282. direction, sync);
  283. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  284. }
  285. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  286. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  287. unsigned long unaccounted_time)
  288. {
  289. unsigned long flags;
  290. spin_lock_irqsave(&blkg->stats_lock, flags);
  291. blkg->stats.time += time;
  292. #ifdef CONFIG_DEBUG_BLK_CGROUP
  293. blkg->stats.unaccounted_time += unaccounted_time;
  294. #endif
  295. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  296. }
  297. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  298. /*
  299. * should be called under rcu read lock or queue lock to make sure blkg pointer
  300. * is valid.
  301. */
  302. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  303. uint64_t bytes, bool direction, bool sync)
  304. {
  305. struct blkio_group_stats_cpu *stats_cpu;
  306. unsigned long flags;
  307. /*
  308. * Disabling interrupts to provide mutual exclusion between two
  309. * writes on same cpu. It probably is not needed for 64bit. Not
  310. * optimizing that case yet.
  311. */
  312. local_irq_save(flags);
  313. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  314. u64_stats_update_begin(&stats_cpu->syncp);
  315. stats_cpu->sectors += bytes >> 9;
  316. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  317. 1, direction, sync);
  318. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  319. bytes, direction, sync);
  320. u64_stats_update_end(&stats_cpu->syncp);
  321. local_irq_restore(flags);
  322. }
  323. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  324. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  325. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  326. {
  327. struct blkio_group_stats *stats;
  328. unsigned long flags;
  329. unsigned long long now = sched_clock();
  330. spin_lock_irqsave(&blkg->stats_lock, flags);
  331. stats = &blkg->stats;
  332. if (time_after64(now, io_start_time))
  333. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  334. now - io_start_time, direction, sync);
  335. if (time_after64(io_start_time, start_time))
  336. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  337. io_start_time - start_time, direction, sync);
  338. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  339. }
  340. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  341. /* Merged stats are per cpu. */
  342. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  343. bool sync)
  344. {
  345. struct blkio_group_stats_cpu *stats_cpu;
  346. unsigned long flags;
  347. /*
  348. * Disabling interrupts to provide mutual exclusion between two
  349. * writes on same cpu. It probably is not needed for 64bit. Not
  350. * optimizing that case yet.
  351. */
  352. local_irq_save(flags);
  353. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  354. u64_stats_update_begin(&stats_cpu->syncp);
  355. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  356. direction, sync);
  357. u64_stats_update_end(&stats_cpu->syncp);
  358. local_irq_restore(flags);
  359. }
  360. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  361. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  362. struct request_queue *q,
  363. enum blkio_policy_id plid,
  364. bool for_root)
  365. __releases(q->queue_lock) __acquires(q->queue_lock)
  366. {
  367. struct blkio_policy_type *pol = blkio_policy[plid];
  368. struct blkio_group *blkg, *new_blkg;
  369. WARN_ON_ONCE(!rcu_read_lock_held());
  370. lockdep_assert_held(q->queue_lock);
  371. /*
  372. * This could be the first entry point of blkcg implementation and
  373. * we shouldn't allow anything to go through for a bypassing queue.
  374. * The following can be removed if blkg lookup is guaranteed to
  375. * fail on a bypassing queue.
  376. */
  377. if (unlikely(blk_queue_bypass(q)) && !for_root)
  378. return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  379. blkg = blkg_lookup(blkcg, q, plid);
  380. if (blkg)
  381. return blkg;
  382. if (!css_tryget(&blkcg->css))
  383. return ERR_PTR(-EINVAL);
  384. /*
  385. * Allocate and initialize.
  386. *
  387. * FIXME: The following is broken. Percpu memory allocation
  388. * requires %GFP_KERNEL context and can't be performed from IO
  389. * path. Allocation here should inherently be atomic and the
  390. * following lock dancing can be removed once the broken percpu
  391. * allocation is fixed.
  392. */
  393. spin_unlock_irq(q->queue_lock);
  394. rcu_read_unlock();
  395. new_blkg = pol->ops.blkio_alloc_group_fn(q, blkcg);
  396. if (new_blkg) {
  397. new_blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  398. spin_lock_init(&new_blkg->stats_lock);
  399. rcu_assign_pointer(new_blkg->q, q);
  400. new_blkg->blkcg_id = css_id(&blkcg->css);
  401. new_blkg->plid = plid;
  402. cgroup_path(blkcg->css.cgroup, new_blkg->path,
  403. sizeof(new_blkg->path));
  404. }
  405. rcu_read_lock();
  406. spin_lock_irq(q->queue_lock);
  407. css_put(&blkcg->css);
  408. /* did bypass get turned on inbetween? */
  409. if (unlikely(blk_queue_bypass(q)) && !for_root) {
  410. blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  411. goto out;
  412. }
  413. /* did someone beat us to it? */
  414. blkg = blkg_lookup(blkcg, q, plid);
  415. if (unlikely(blkg))
  416. goto out;
  417. /* did alloc fail? */
  418. if (unlikely(!new_blkg || !new_blkg->stats_cpu)) {
  419. blkg = ERR_PTR(-ENOMEM);
  420. goto out;
  421. }
  422. /* insert */
  423. spin_lock(&blkcg->lock);
  424. swap(blkg, new_blkg);
  425. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  426. pol->ops.blkio_link_group_fn(q, blkg);
  427. spin_unlock(&blkcg->lock);
  428. out:
  429. if (new_blkg) {
  430. free_percpu(new_blkg->stats_cpu);
  431. kfree(new_blkg);
  432. }
  433. return blkg;
  434. }
  435. EXPORT_SYMBOL_GPL(blkg_lookup_create);
  436. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  437. {
  438. hlist_del_init_rcu(&blkg->blkcg_node);
  439. blkg->blkcg_id = 0;
  440. }
  441. /*
  442. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  443. * indicating that blk_group was unhashed by the time we got to it.
  444. */
  445. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  446. {
  447. struct blkio_cgroup *blkcg;
  448. unsigned long flags;
  449. struct cgroup_subsys_state *css;
  450. int ret = 1;
  451. rcu_read_lock();
  452. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  453. if (css) {
  454. blkcg = container_of(css, struct blkio_cgroup, css);
  455. spin_lock_irqsave(&blkcg->lock, flags);
  456. if (!hlist_unhashed(&blkg->blkcg_node)) {
  457. __blkiocg_del_blkio_group(blkg);
  458. ret = 0;
  459. }
  460. spin_unlock_irqrestore(&blkcg->lock, flags);
  461. }
  462. rcu_read_unlock();
  463. return ret;
  464. }
  465. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  466. /* called under rcu_read_lock(). */
  467. struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  468. struct request_queue *q,
  469. enum blkio_policy_id plid)
  470. {
  471. struct blkio_group *blkg;
  472. struct hlist_node *n;
  473. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  474. if (blkg->q == q && blkg->plid == plid)
  475. return blkg;
  476. return NULL;
  477. }
  478. EXPORT_SYMBOL_GPL(blkg_lookup);
  479. void blkg_destroy_all(struct request_queue *q)
  480. {
  481. struct blkio_policy_type *pol;
  482. while (true) {
  483. bool done = true;
  484. spin_lock(&blkio_list_lock);
  485. spin_lock_irq(q->queue_lock);
  486. /*
  487. * clear_queue_fn() might return with non-empty group list
  488. * if it raced cgroup removal and lost. cgroup removal is
  489. * guaranteed to make forward progress and retrying after a
  490. * while is enough. This ugliness is scheduled to be
  491. * removed after locking update.
  492. */
  493. list_for_each_entry(pol, &blkio_list, list)
  494. if (!pol->ops.blkio_clear_queue_fn(q))
  495. done = false;
  496. spin_unlock_irq(q->queue_lock);
  497. spin_unlock(&blkio_list_lock);
  498. if (done)
  499. break;
  500. msleep(10); /* just some random duration I like */
  501. }
  502. }
  503. static void blkio_reset_stats_cpu(struct blkio_group *blkg)
  504. {
  505. struct blkio_group_stats_cpu *stats_cpu;
  506. int i, j, k;
  507. /*
  508. * Note: On 64 bit arch this should not be an issue. This has the
  509. * possibility of returning some inconsistent value on 32bit arch
  510. * as 64bit update on 32bit is non atomic. Taking care of this
  511. * corner case makes code very complicated, like sending IPIs to
  512. * cpus, taking care of stats of offline cpus etc.
  513. *
  514. * reset stats is anyway more of a debug feature and this sounds a
  515. * corner case. So I am not complicating the code yet until and
  516. * unless this becomes a real issue.
  517. */
  518. for_each_possible_cpu(i) {
  519. stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
  520. stats_cpu->sectors = 0;
  521. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  522. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  523. stats_cpu->stat_arr_cpu[j][k] = 0;
  524. }
  525. }
  526. static int
  527. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  528. {
  529. struct blkio_cgroup *blkcg;
  530. struct blkio_group *blkg;
  531. struct blkio_group_stats *stats;
  532. struct hlist_node *n;
  533. uint64_t queued[BLKIO_STAT_TOTAL];
  534. int i;
  535. #ifdef CONFIG_DEBUG_BLK_CGROUP
  536. bool idling, waiting, empty;
  537. unsigned long long now = sched_clock();
  538. #endif
  539. blkcg = cgroup_to_blkio_cgroup(cgroup);
  540. spin_lock_irq(&blkcg->lock);
  541. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  542. spin_lock(&blkg->stats_lock);
  543. stats = &blkg->stats;
  544. #ifdef CONFIG_DEBUG_BLK_CGROUP
  545. idling = blkio_blkg_idling(stats);
  546. waiting = blkio_blkg_waiting(stats);
  547. empty = blkio_blkg_empty(stats);
  548. #endif
  549. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  550. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  551. memset(stats, 0, sizeof(struct blkio_group_stats));
  552. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  553. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  554. #ifdef CONFIG_DEBUG_BLK_CGROUP
  555. if (idling) {
  556. blkio_mark_blkg_idling(stats);
  557. stats->start_idle_time = now;
  558. }
  559. if (waiting) {
  560. blkio_mark_blkg_waiting(stats);
  561. stats->start_group_wait_time = now;
  562. }
  563. if (empty) {
  564. blkio_mark_blkg_empty(stats);
  565. stats->start_empty_time = now;
  566. }
  567. #endif
  568. spin_unlock(&blkg->stats_lock);
  569. /* Reset Per cpu stats which don't take blkg->stats_lock */
  570. blkio_reset_stats_cpu(blkg);
  571. }
  572. spin_unlock_irq(&blkcg->lock);
  573. return 0;
  574. }
  575. static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
  576. char *str, int chars_left, bool diskname_only)
  577. {
  578. snprintf(str, chars_left, "%s", dname);
  579. chars_left -= strlen(str);
  580. if (chars_left <= 0) {
  581. printk(KERN_WARNING
  582. "Possibly incorrect cgroup stat display format");
  583. return;
  584. }
  585. if (diskname_only)
  586. return;
  587. switch (type) {
  588. case BLKIO_STAT_READ:
  589. strlcat(str, " Read", chars_left);
  590. break;
  591. case BLKIO_STAT_WRITE:
  592. strlcat(str, " Write", chars_left);
  593. break;
  594. case BLKIO_STAT_SYNC:
  595. strlcat(str, " Sync", chars_left);
  596. break;
  597. case BLKIO_STAT_ASYNC:
  598. strlcat(str, " Async", chars_left);
  599. break;
  600. case BLKIO_STAT_TOTAL:
  601. strlcat(str, " Total", chars_left);
  602. break;
  603. default:
  604. strlcat(str, " Invalid", chars_left);
  605. }
  606. }
  607. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  608. struct cgroup_map_cb *cb, const char *dname)
  609. {
  610. blkio_get_key_name(0, dname, str, chars_left, true);
  611. cb->fill(cb, str, val);
  612. return val;
  613. }
  614. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
  615. enum stat_type_cpu type, enum stat_sub_type sub_type)
  616. {
  617. int cpu;
  618. struct blkio_group_stats_cpu *stats_cpu;
  619. u64 val = 0, tval;
  620. for_each_possible_cpu(cpu) {
  621. unsigned int start;
  622. stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
  623. do {
  624. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  625. if (type == BLKIO_STAT_CPU_SECTORS)
  626. tval = stats_cpu->sectors;
  627. else
  628. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  629. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  630. val += tval;
  631. }
  632. return val;
  633. }
  634. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
  635. struct cgroup_map_cb *cb, const char *dname,
  636. enum stat_type_cpu type)
  637. {
  638. uint64_t disk_total, val;
  639. char key_str[MAX_KEY_LEN];
  640. enum stat_sub_type sub_type;
  641. if (type == BLKIO_STAT_CPU_SECTORS) {
  642. val = blkio_read_stat_cpu(blkg, type, 0);
  643. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
  644. dname);
  645. }
  646. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  647. sub_type++) {
  648. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  649. false);
  650. val = blkio_read_stat_cpu(blkg, type, sub_type);
  651. cb->fill(cb, key_str, val);
  652. }
  653. disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
  654. blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
  655. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  656. false);
  657. cb->fill(cb, key_str, disk_total);
  658. return disk_total;
  659. }
  660. /* This should be called with blkg->stats_lock held */
  661. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  662. struct cgroup_map_cb *cb, const char *dname,
  663. enum stat_type type)
  664. {
  665. uint64_t disk_total;
  666. char key_str[MAX_KEY_LEN];
  667. enum stat_sub_type sub_type;
  668. if (type == BLKIO_STAT_TIME)
  669. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  670. blkg->stats.time, cb, dname);
  671. #ifdef CONFIG_DEBUG_BLK_CGROUP
  672. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  673. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  674. blkg->stats.unaccounted_time, cb, dname);
  675. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  676. uint64_t sum = blkg->stats.avg_queue_size_sum;
  677. uint64_t samples = blkg->stats.avg_queue_size_samples;
  678. if (samples)
  679. do_div(sum, samples);
  680. else
  681. sum = 0;
  682. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  683. sum, cb, dname);
  684. }
  685. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  686. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  687. blkg->stats.group_wait_time, cb, dname);
  688. if (type == BLKIO_STAT_IDLE_TIME)
  689. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  690. blkg->stats.idle_time, cb, dname);
  691. if (type == BLKIO_STAT_EMPTY_TIME)
  692. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  693. blkg->stats.empty_time, cb, dname);
  694. if (type == BLKIO_STAT_DEQUEUE)
  695. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  696. blkg->stats.dequeue, cb, dname);
  697. #endif
  698. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  699. sub_type++) {
  700. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  701. false);
  702. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  703. }
  704. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  705. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  706. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  707. false);
  708. cb->fill(cb, key_str, disk_total);
  709. return disk_total;
  710. }
  711. static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
  712. int fileid, struct blkio_cgroup *blkcg)
  713. {
  714. struct gendisk *disk = NULL;
  715. struct blkio_group *blkg = NULL;
  716. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  717. unsigned long major, minor;
  718. int i = 0, ret = -EINVAL;
  719. int part;
  720. dev_t dev;
  721. u64 temp;
  722. memset(s, 0, sizeof(s));
  723. while ((p = strsep(&buf, " ")) != NULL) {
  724. if (!*p)
  725. continue;
  726. s[i++] = p;
  727. /* Prevent from inputing too many things */
  728. if (i == 3)
  729. break;
  730. }
  731. if (i != 2)
  732. goto out;
  733. p = strsep(&s[0], ":");
  734. if (p != NULL)
  735. major_s = p;
  736. else
  737. goto out;
  738. minor_s = s[0];
  739. if (!minor_s)
  740. goto out;
  741. if (strict_strtoul(major_s, 10, &major))
  742. goto out;
  743. if (strict_strtoul(minor_s, 10, &minor))
  744. goto out;
  745. dev = MKDEV(major, minor);
  746. if (strict_strtoull(s[1], 10, &temp))
  747. goto out;
  748. disk = get_gendisk(dev, &part);
  749. if (!disk || part)
  750. goto out;
  751. rcu_read_lock();
  752. spin_lock_irq(disk->queue->queue_lock);
  753. blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
  754. spin_unlock_irq(disk->queue->queue_lock);
  755. if (IS_ERR(blkg)) {
  756. ret = PTR_ERR(blkg);
  757. goto out_unlock;
  758. }
  759. switch (plid) {
  760. case BLKIO_POLICY_PROP:
  761. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  762. temp > BLKIO_WEIGHT_MAX)
  763. goto out_unlock;
  764. blkg->conf.weight = temp;
  765. blkio_update_group_weight(blkg, temp ?: blkcg->weight);
  766. break;
  767. case BLKIO_POLICY_THROTL:
  768. switch(fileid) {
  769. case BLKIO_THROTL_read_bps_device:
  770. blkg->conf.bps[READ] = temp;
  771. blkio_update_group_bps(blkg, temp ?: -1, fileid);
  772. break;
  773. case BLKIO_THROTL_write_bps_device:
  774. blkg->conf.bps[WRITE] = temp;
  775. blkio_update_group_bps(blkg, temp ?: -1, fileid);
  776. break;
  777. case BLKIO_THROTL_read_iops_device:
  778. if (temp > THROTL_IOPS_MAX)
  779. goto out_unlock;
  780. blkg->conf.iops[READ] = temp;
  781. blkio_update_group_iops(blkg, temp ?: -1, fileid);
  782. break;
  783. case BLKIO_THROTL_write_iops_device:
  784. if (temp > THROTL_IOPS_MAX)
  785. goto out_unlock;
  786. blkg->conf.iops[WRITE] = temp;
  787. blkio_update_group_iops(blkg, temp ?: -1, fileid);
  788. break;
  789. }
  790. break;
  791. default:
  792. BUG();
  793. }
  794. ret = 0;
  795. out_unlock:
  796. rcu_read_unlock();
  797. out:
  798. put_disk(disk);
  799. /*
  800. * If queue was bypassing, we should retry. Do so after a short
  801. * msleep(). It isn't strictly necessary but queue can be
  802. * bypassing for some time and it's always nice to avoid busy
  803. * looping.
  804. */
  805. if (ret == -EBUSY) {
  806. msleep(10);
  807. return restart_syscall();
  808. }
  809. return ret;
  810. }
  811. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  812. const char *buffer)
  813. {
  814. int ret = 0;
  815. char *buf;
  816. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  817. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  818. int fileid = BLKIOFILE_ATTR(cft->private);
  819. buf = kstrdup(buffer, GFP_KERNEL);
  820. if (!buf)
  821. return -ENOMEM;
  822. ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
  823. kfree(buf);
  824. return ret;
  825. }
  826. static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
  827. struct seq_file *m)
  828. {
  829. const char *dname = dev_name(blkg->q->backing_dev_info.dev);
  830. int fileid = BLKIOFILE_ATTR(cft->private);
  831. int rw = WRITE;
  832. switch (blkg->plid) {
  833. case BLKIO_POLICY_PROP:
  834. if (blkg->conf.weight)
  835. seq_printf(m, "%s\t%u\n",
  836. dname, blkg->conf.weight);
  837. break;
  838. case BLKIO_POLICY_THROTL:
  839. switch (fileid) {
  840. case BLKIO_THROTL_read_bps_device:
  841. rw = READ;
  842. case BLKIO_THROTL_write_bps_device:
  843. if (blkg->conf.bps[rw])
  844. seq_printf(m, "%s\t%llu\n",
  845. dname, blkg->conf.bps[rw]);
  846. break;
  847. case BLKIO_THROTL_read_iops_device:
  848. rw = READ;
  849. case BLKIO_THROTL_write_iops_device:
  850. if (blkg->conf.iops[rw])
  851. seq_printf(m, "%s\t%u\n",
  852. dname, blkg->conf.iops[rw]);
  853. break;
  854. }
  855. break;
  856. default:
  857. BUG();
  858. }
  859. }
  860. /* cgroup files which read their data from policy nodes end up here */
  861. static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
  862. struct seq_file *m)
  863. {
  864. struct blkio_group *blkg;
  865. struct hlist_node *n;
  866. spin_lock_irq(&blkcg->lock);
  867. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  868. if (BLKIOFILE_POLICY(cft->private) == blkg->plid)
  869. blkio_print_group_conf(cft, blkg, m);
  870. spin_unlock_irq(&blkcg->lock);
  871. }
  872. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  873. struct seq_file *m)
  874. {
  875. struct blkio_cgroup *blkcg;
  876. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  877. int name = BLKIOFILE_ATTR(cft->private);
  878. blkcg = cgroup_to_blkio_cgroup(cgrp);
  879. switch(plid) {
  880. case BLKIO_POLICY_PROP:
  881. switch(name) {
  882. case BLKIO_PROP_weight_device:
  883. blkio_read_conf(cft, blkcg, m);
  884. return 0;
  885. default:
  886. BUG();
  887. }
  888. break;
  889. case BLKIO_POLICY_THROTL:
  890. switch(name){
  891. case BLKIO_THROTL_read_bps_device:
  892. case BLKIO_THROTL_write_bps_device:
  893. case BLKIO_THROTL_read_iops_device:
  894. case BLKIO_THROTL_write_iops_device:
  895. blkio_read_conf(cft, blkcg, m);
  896. return 0;
  897. default:
  898. BUG();
  899. }
  900. break;
  901. default:
  902. BUG();
  903. }
  904. return 0;
  905. }
  906. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  907. struct cftype *cft, struct cgroup_map_cb *cb,
  908. enum stat_type type, bool show_total, bool pcpu)
  909. {
  910. struct blkio_group *blkg;
  911. struct hlist_node *n;
  912. uint64_t cgroup_total = 0;
  913. rcu_read_lock();
  914. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  915. const char *dname = dev_name(blkg->q->backing_dev_info.dev);
  916. if (BLKIOFILE_POLICY(cft->private) != blkg->plid)
  917. continue;
  918. if (pcpu)
  919. cgroup_total += blkio_get_stat_cpu(blkg, cb, dname,
  920. type);
  921. else {
  922. spin_lock_irq(&blkg->stats_lock);
  923. cgroup_total += blkio_get_stat(blkg, cb, dname, type);
  924. spin_unlock_irq(&blkg->stats_lock);
  925. }
  926. }
  927. if (show_total)
  928. cb->fill(cb, "Total", cgroup_total);
  929. rcu_read_unlock();
  930. return 0;
  931. }
  932. /* All map kind of cgroup file get serviced by this function */
  933. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  934. struct cgroup_map_cb *cb)
  935. {
  936. struct blkio_cgroup *blkcg;
  937. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  938. int name = BLKIOFILE_ATTR(cft->private);
  939. blkcg = cgroup_to_blkio_cgroup(cgrp);
  940. switch(plid) {
  941. case BLKIO_POLICY_PROP:
  942. switch(name) {
  943. case BLKIO_PROP_time:
  944. return blkio_read_blkg_stats(blkcg, cft, cb,
  945. BLKIO_STAT_TIME, 0, 0);
  946. case BLKIO_PROP_sectors:
  947. return blkio_read_blkg_stats(blkcg, cft, cb,
  948. BLKIO_STAT_CPU_SECTORS, 0, 1);
  949. case BLKIO_PROP_io_service_bytes:
  950. return blkio_read_blkg_stats(blkcg, cft, cb,
  951. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  952. case BLKIO_PROP_io_serviced:
  953. return blkio_read_blkg_stats(blkcg, cft, cb,
  954. BLKIO_STAT_CPU_SERVICED, 1, 1);
  955. case BLKIO_PROP_io_service_time:
  956. return blkio_read_blkg_stats(blkcg, cft, cb,
  957. BLKIO_STAT_SERVICE_TIME, 1, 0);
  958. case BLKIO_PROP_io_wait_time:
  959. return blkio_read_blkg_stats(blkcg, cft, cb,
  960. BLKIO_STAT_WAIT_TIME, 1, 0);
  961. case BLKIO_PROP_io_merged:
  962. return blkio_read_blkg_stats(blkcg, cft, cb,
  963. BLKIO_STAT_CPU_MERGED, 1, 1);
  964. case BLKIO_PROP_io_queued:
  965. return blkio_read_blkg_stats(blkcg, cft, cb,
  966. BLKIO_STAT_QUEUED, 1, 0);
  967. #ifdef CONFIG_DEBUG_BLK_CGROUP
  968. case BLKIO_PROP_unaccounted_time:
  969. return blkio_read_blkg_stats(blkcg, cft, cb,
  970. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  971. case BLKIO_PROP_dequeue:
  972. return blkio_read_blkg_stats(blkcg, cft, cb,
  973. BLKIO_STAT_DEQUEUE, 0, 0);
  974. case BLKIO_PROP_avg_queue_size:
  975. return blkio_read_blkg_stats(blkcg, cft, cb,
  976. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  977. case BLKIO_PROP_group_wait_time:
  978. return blkio_read_blkg_stats(blkcg, cft, cb,
  979. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  980. case BLKIO_PROP_idle_time:
  981. return blkio_read_blkg_stats(blkcg, cft, cb,
  982. BLKIO_STAT_IDLE_TIME, 0, 0);
  983. case BLKIO_PROP_empty_time:
  984. return blkio_read_blkg_stats(blkcg, cft, cb,
  985. BLKIO_STAT_EMPTY_TIME, 0, 0);
  986. #endif
  987. default:
  988. BUG();
  989. }
  990. break;
  991. case BLKIO_POLICY_THROTL:
  992. switch(name){
  993. case BLKIO_THROTL_io_service_bytes:
  994. return blkio_read_blkg_stats(blkcg, cft, cb,
  995. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  996. case BLKIO_THROTL_io_serviced:
  997. return blkio_read_blkg_stats(blkcg, cft, cb,
  998. BLKIO_STAT_CPU_SERVICED, 1, 1);
  999. default:
  1000. BUG();
  1001. }
  1002. break;
  1003. default:
  1004. BUG();
  1005. }
  1006. return 0;
  1007. }
  1008. static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
  1009. {
  1010. struct blkio_group *blkg;
  1011. struct hlist_node *n;
  1012. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1013. return -EINVAL;
  1014. spin_lock(&blkio_list_lock);
  1015. spin_lock_irq(&blkcg->lock);
  1016. blkcg->weight = (unsigned int)val;
  1017. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  1018. if (blkg->plid == plid && !blkg->conf.weight)
  1019. blkio_update_group_weight(blkg, blkcg->weight);
  1020. spin_unlock_irq(&blkcg->lock);
  1021. spin_unlock(&blkio_list_lock);
  1022. return 0;
  1023. }
  1024. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1025. struct blkio_cgroup *blkcg;
  1026. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1027. int name = BLKIOFILE_ATTR(cft->private);
  1028. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1029. switch(plid) {
  1030. case BLKIO_POLICY_PROP:
  1031. switch(name) {
  1032. case BLKIO_PROP_weight:
  1033. return (u64)blkcg->weight;
  1034. }
  1035. break;
  1036. default:
  1037. BUG();
  1038. }
  1039. return 0;
  1040. }
  1041. static int
  1042. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1043. {
  1044. struct blkio_cgroup *blkcg;
  1045. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1046. int name = BLKIOFILE_ATTR(cft->private);
  1047. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1048. switch(plid) {
  1049. case BLKIO_POLICY_PROP:
  1050. switch(name) {
  1051. case BLKIO_PROP_weight:
  1052. return blkio_weight_write(blkcg, plid, val);
  1053. }
  1054. break;
  1055. default:
  1056. BUG();
  1057. }
  1058. return 0;
  1059. }
  1060. struct cftype blkio_files[] = {
  1061. {
  1062. .name = "weight_device",
  1063. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1064. BLKIO_PROP_weight_device),
  1065. .read_seq_string = blkiocg_file_read,
  1066. .write_string = blkiocg_file_write,
  1067. .max_write_len = 256,
  1068. },
  1069. {
  1070. .name = "weight",
  1071. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1072. BLKIO_PROP_weight),
  1073. .read_u64 = blkiocg_file_read_u64,
  1074. .write_u64 = blkiocg_file_write_u64,
  1075. },
  1076. {
  1077. .name = "time",
  1078. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1079. BLKIO_PROP_time),
  1080. .read_map = blkiocg_file_read_map,
  1081. },
  1082. {
  1083. .name = "sectors",
  1084. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1085. BLKIO_PROP_sectors),
  1086. .read_map = blkiocg_file_read_map,
  1087. },
  1088. {
  1089. .name = "io_service_bytes",
  1090. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1091. BLKIO_PROP_io_service_bytes),
  1092. .read_map = blkiocg_file_read_map,
  1093. },
  1094. {
  1095. .name = "io_serviced",
  1096. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1097. BLKIO_PROP_io_serviced),
  1098. .read_map = blkiocg_file_read_map,
  1099. },
  1100. {
  1101. .name = "io_service_time",
  1102. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1103. BLKIO_PROP_io_service_time),
  1104. .read_map = blkiocg_file_read_map,
  1105. },
  1106. {
  1107. .name = "io_wait_time",
  1108. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1109. BLKIO_PROP_io_wait_time),
  1110. .read_map = blkiocg_file_read_map,
  1111. },
  1112. {
  1113. .name = "io_merged",
  1114. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1115. BLKIO_PROP_io_merged),
  1116. .read_map = blkiocg_file_read_map,
  1117. },
  1118. {
  1119. .name = "io_queued",
  1120. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1121. BLKIO_PROP_io_queued),
  1122. .read_map = blkiocg_file_read_map,
  1123. },
  1124. {
  1125. .name = "reset_stats",
  1126. .write_u64 = blkiocg_reset_stats,
  1127. },
  1128. #ifdef CONFIG_BLK_DEV_THROTTLING
  1129. {
  1130. .name = "throttle.read_bps_device",
  1131. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1132. BLKIO_THROTL_read_bps_device),
  1133. .read_seq_string = blkiocg_file_read,
  1134. .write_string = blkiocg_file_write,
  1135. .max_write_len = 256,
  1136. },
  1137. {
  1138. .name = "throttle.write_bps_device",
  1139. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1140. BLKIO_THROTL_write_bps_device),
  1141. .read_seq_string = blkiocg_file_read,
  1142. .write_string = blkiocg_file_write,
  1143. .max_write_len = 256,
  1144. },
  1145. {
  1146. .name = "throttle.read_iops_device",
  1147. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1148. BLKIO_THROTL_read_iops_device),
  1149. .read_seq_string = blkiocg_file_read,
  1150. .write_string = blkiocg_file_write,
  1151. .max_write_len = 256,
  1152. },
  1153. {
  1154. .name = "throttle.write_iops_device",
  1155. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1156. BLKIO_THROTL_write_iops_device),
  1157. .read_seq_string = blkiocg_file_read,
  1158. .write_string = blkiocg_file_write,
  1159. .max_write_len = 256,
  1160. },
  1161. {
  1162. .name = "throttle.io_service_bytes",
  1163. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1164. BLKIO_THROTL_io_service_bytes),
  1165. .read_map = blkiocg_file_read_map,
  1166. },
  1167. {
  1168. .name = "throttle.io_serviced",
  1169. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1170. BLKIO_THROTL_io_serviced),
  1171. .read_map = blkiocg_file_read_map,
  1172. },
  1173. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1174. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1175. {
  1176. .name = "avg_queue_size",
  1177. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1178. BLKIO_PROP_avg_queue_size),
  1179. .read_map = blkiocg_file_read_map,
  1180. },
  1181. {
  1182. .name = "group_wait_time",
  1183. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1184. BLKIO_PROP_group_wait_time),
  1185. .read_map = blkiocg_file_read_map,
  1186. },
  1187. {
  1188. .name = "idle_time",
  1189. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1190. BLKIO_PROP_idle_time),
  1191. .read_map = blkiocg_file_read_map,
  1192. },
  1193. {
  1194. .name = "empty_time",
  1195. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1196. BLKIO_PROP_empty_time),
  1197. .read_map = blkiocg_file_read_map,
  1198. },
  1199. {
  1200. .name = "dequeue",
  1201. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1202. BLKIO_PROP_dequeue),
  1203. .read_map = blkiocg_file_read_map,
  1204. },
  1205. {
  1206. .name = "unaccounted_time",
  1207. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1208. BLKIO_PROP_unaccounted_time),
  1209. .read_map = blkiocg_file_read_map,
  1210. },
  1211. #endif
  1212. };
  1213. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1214. {
  1215. return cgroup_add_files(cgroup, subsys, blkio_files,
  1216. ARRAY_SIZE(blkio_files));
  1217. }
  1218. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1219. {
  1220. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1221. unsigned long flags;
  1222. struct blkio_group *blkg;
  1223. struct request_queue *q;
  1224. struct blkio_policy_type *blkiop;
  1225. rcu_read_lock();
  1226. do {
  1227. spin_lock_irqsave(&blkcg->lock, flags);
  1228. if (hlist_empty(&blkcg->blkg_list)) {
  1229. spin_unlock_irqrestore(&blkcg->lock, flags);
  1230. break;
  1231. }
  1232. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1233. blkcg_node);
  1234. q = rcu_dereference(blkg->q);
  1235. __blkiocg_del_blkio_group(blkg);
  1236. spin_unlock_irqrestore(&blkcg->lock, flags);
  1237. /*
  1238. * This blkio_group is being unlinked as associated cgroup is
  1239. * going away. Let all the IO controlling policies know about
  1240. * this event.
  1241. */
  1242. spin_lock(&blkio_list_lock);
  1243. list_for_each_entry(blkiop, &blkio_list, list) {
  1244. if (blkiop->plid != blkg->plid)
  1245. continue;
  1246. blkiop->ops.blkio_unlink_group_fn(q, blkg);
  1247. }
  1248. spin_unlock(&blkio_list_lock);
  1249. } while (1);
  1250. free_css_id(&blkio_subsys, &blkcg->css);
  1251. rcu_read_unlock();
  1252. if (blkcg != &blkio_root_cgroup)
  1253. kfree(blkcg);
  1254. }
  1255. static struct cgroup_subsys_state *
  1256. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1257. {
  1258. struct blkio_cgroup *blkcg;
  1259. struct cgroup *parent = cgroup->parent;
  1260. if (!parent) {
  1261. blkcg = &blkio_root_cgroup;
  1262. goto done;
  1263. }
  1264. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1265. if (!blkcg)
  1266. return ERR_PTR(-ENOMEM);
  1267. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1268. done:
  1269. spin_lock_init(&blkcg->lock);
  1270. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1271. return &blkcg->css;
  1272. }
  1273. /*
  1274. * We cannot support shared io contexts, as we have no mean to support
  1275. * two tasks with the same ioc in two different groups without major rework
  1276. * of the main cic data structures. For now we allow a task to change
  1277. * its cgroup only if it's the only owner of its ioc.
  1278. */
  1279. static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1280. struct cgroup_taskset *tset)
  1281. {
  1282. struct task_struct *task;
  1283. struct io_context *ioc;
  1284. int ret = 0;
  1285. /* task_lock() is needed to avoid races with exit_io_context() */
  1286. cgroup_taskset_for_each(task, cgrp, tset) {
  1287. task_lock(task);
  1288. ioc = task->io_context;
  1289. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1290. ret = -EINVAL;
  1291. task_unlock(task);
  1292. if (ret)
  1293. break;
  1294. }
  1295. return ret;
  1296. }
  1297. static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1298. struct cgroup_taskset *tset)
  1299. {
  1300. struct task_struct *task;
  1301. struct io_context *ioc;
  1302. cgroup_taskset_for_each(task, cgrp, tset) {
  1303. /* we don't lose anything even if ioc allocation fails */
  1304. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1305. if (ioc) {
  1306. ioc_cgroup_changed(ioc);
  1307. put_io_context(ioc);
  1308. }
  1309. }
  1310. }
  1311. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1312. {
  1313. spin_lock(&blkio_list_lock);
  1314. BUG_ON(blkio_policy[blkiop->plid]);
  1315. blkio_policy[blkiop->plid] = blkiop;
  1316. list_add_tail(&blkiop->list, &blkio_list);
  1317. spin_unlock(&blkio_list_lock);
  1318. }
  1319. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1320. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1321. {
  1322. spin_lock(&blkio_list_lock);
  1323. BUG_ON(blkio_policy[blkiop->plid] != blkiop);
  1324. blkio_policy[blkiop->plid] = NULL;
  1325. list_del_init(&blkiop->list);
  1326. spin_unlock(&blkio_list_lock);
  1327. }
  1328. EXPORT_SYMBOL_GPL(blkio_policy_unregister);