blk-cgroup.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include <linux/atomic.h>
  23. #include "blk-cgroup.h"
  24. #include "blk.h"
  25. #define MAX_KEY_LEN 100
  26. static DEFINE_SPINLOCK(blkio_list_lock);
  27. static LIST_HEAD(blkio_list);
  28. static DEFINE_MUTEX(all_q_mutex);
  29. static LIST_HEAD(all_q_list);
  30. /* List of groups pending per cpu stats allocation */
  31. static DEFINE_SPINLOCK(alloc_list_lock);
  32. static LIST_HEAD(alloc_list);
  33. static void blkio_stat_alloc_fn(struct work_struct *);
  34. static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
  35. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  36. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  37. static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
  38. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  39. {
  40. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  41. struct blkio_cgroup, css);
  42. }
  43. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  44. static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  45. {
  46. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  47. struct blkio_cgroup, css);
  48. }
  49. struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
  50. {
  51. if (bio && bio->bi_css)
  52. return container_of(bio->bi_css, struct blkio_cgroup, css);
  53. return task_blkio_cgroup(current);
  54. }
  55. EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
  56. static inline void blkio_update_group_weight(struct blkio_group *blkg,
  57. int plid, unsigned int weight)
  58. {
  59. struct blkio_policy_type *blkiop;
  60. list_for_each_entry(blkiop, &blkio_list, list) {
  61. /* If this policy does not own the blkg, do not send updates */
  62. if (blkiop->plid != plid)
  63. continue;
  64. if (blkiop->ops.blkio_update_group_weight_fn)
  65. blkiop->ops.blkio_update_group_weight_fn(blkg->q,
  66. blkg, weight);
  67. }
  68. }
  69. static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
  70. u64 bps, int rw)
  71. {
  72. struct blkio_policy_type *blkiop;
  73. list_for_each_entry(blkiop, &blkio_list, list) {
  74. /* If this policy does not own the blkg, do not send updates */
  75. if (blkiop->plid != plid)
  76. continue;
  77. if (rw == READ && blkiop->ops.blkio_update_group_read_bps_fn)
  78. blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
  79. blkg, bps);
  80. if (rw == WRITE && blkiop->ops.blkio_update_group_write_bps_fn)
  81. blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
  82. blkg, bps);
  83. }
  84. }
  85. static inline void blkio_update_group_iops(struct blkio_group *blkg, int plid,
  86. u64 iops, int rw)
  87. {
  88. struct blkio_policy_type *blkiop;
  89. list_for_each_entry(blkiop, &blkio_list, list) {
  90. /* If this policy does not own the blkg, do not send updates */
  91. if (blkiop->plid != plid)
  92. continue;
  93. if (rw == READ && blkiop->ops.blkio_update_group_read_iops_fn)
  94. blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
  95. blkg, iops);
  96. if (rw == WRITE && blkiop->ops.blkio_update_group_write_iops_fn)
  97. blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
  98. blkg,iops);
  99. }
  100. }
  101. #ifdef CONFIG_DEBUG_BLK_CGROUP
  102. /* This should be called with the queue_lock held. */
  103. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  104. struct blkio_policy_type *pol,
  105. struct blkio_group *curr_blkg)
  106. {
  107. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  108. if (blkio_blkg_waiting(&pd->stats))
  109. return;
  110. if (blkg == curr_blkg)
  111. return;
  112. pd->stats.start_group_wait_time = sched_clock();
  113. blkio_mark_blkg_waiting(&pd->stats);
  114. }
  115. /* This should be called with the queue_lock held. */
  116. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  117. {
  118. unsigned long long now;
  119. if (!blkio_blkg_waiting(stats))
  120. return;
  121. now = sched_clock();
  122. if (time_after64(now, stats->start_group_wait_time))
  123. blkg_stat_add(&stats->group_wait_time,
  124. now - stats->start_group_wait_time);
  125. blkio_clear_blkg_waiting(stats);
  126. }
  127. /* This should be called with the queue_lock held. */
  128. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  129. {
  130. unsigned long long now;
  131. if (!blkio_blkg_empty(stats))
  132. return;
  133. now = sched_clock();
  134. if (time_after64(now, stats->start_empty_time))
  135. blkg_stat_add(&stats->empty_time,
  136. now - stats->start_empty_time);
  137. blkio_clear_blkg_empty(stats);
  138. }
  139. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
  140. struct blkio_policy_type *pol)
  141. {
  142. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  143. lockdep_assert_held(blkg->q->queue_lock);
  144. BUG_ON(blkio_blkg_idling(stats));
  145. stats->start_idle_time = sched_clock();
  146. blkio_mark_blkg_idling(stats);
  147. }
  148. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  149. void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
  150. struct blkio_policy_type *pol)
  151. {
  152. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  153. lockdep_assert_held(blkg->q->queue_lock);
  154. if (blkio_blkg_idling(stats)) {
  155. unsigned long long now = sched_clock();
  156. if (time_after64(now, stats->start_idle_time))
  157. blkg_stat_add(&stats->idle_time,
  158. now - stats->start_idle_time);
  159. blkio_clear_blkg_idling(stats);
  160. }
  161. }
  162. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  163. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
  164. struct blkio_policy_type *pol)
  165. {
  166. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  167. lockdep_assert_held(blkg->q->queue_lock);
  168. blkg_stat_add(&stats->avg_queue_size_sum,
  169. blkg_rwstat_sum(&stats->queued));
  170. blkg_stat_add(&stats->avg_queue_size_samples, 1);
  171. blkio_update_group_wait_time(stats);
  172. }
  173. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  174. void blkiocg_set_start_empty_time(struct blkio_group *blkg,
  175. struct blkio_policy_type *pol)
  176. {
  177. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  178. lockdep_assert_held(blkg->q->queue_lock);
  179. if (blkg_rwstat_sum(&stats->queued))
  180. return;
  181. /*
  182. * group is already marked empty. This can happen if cfqq got new
  183. * request in parent group and moved to this group while being added
  184. * to service tree. Just ignore the event and move on.
  185. */
  186. if (blkio_blkg_empty(stats))
  187. return;
  188. stats->start_empty_time = sched_clock();
  189. blkio_mark_blkg_empty(stats);
  190. }
  191. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  192. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  193. struct blkio_policy_type *pol,
  194. unsigned long dequeue)
  195. {
  196. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  197. lockdep_assert_held(blkg->q->queue_lock);
  198. blkg_stat_add(&pd->stats.dequeue, dequeue);
  199. }
  200. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  201. #else
  202. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  203. struct blkio_policy_type *pol,
  204. struct blkio_group *curr_blkg) { }
  205. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
  206. #endif
  207. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  208. struct blkio_policy_type *pol,
  209. struct blkio_group *curr_blkg, bool direction,
  210. bool sync)
  211. {
  212. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  213. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  214. lockdep_assert_held(blkg->q->queue_lock);
  215. blkg_rwstat_add(&stats->queued, rw, 1);
  216. blkio_end_empty_time(stats);
  217. blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
  218. }
  219. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  220. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  221. struct blkio_policy_type *pol,
  222. bool direction, bool sync)
  223. {
  224. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  225. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  226. lockdep_assert_held(blkg->q->queue_lock);
  227. blkg_rwstat_add(&stats->queued, rw, -1);
  228. }
  229. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  230. void blkiocg_update_timeslice_used(struct blkio_group *blkg,
  231. struct blkio_policy_type *pol,
  232. unsigned long time,
  233. unsigned long unaccounted_time)
  234. {
  235. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  236. lockdep_assert_held(blkg->q->queue_lock);
  237. blkg_stat_add(&stats->time, time);
  238. #ifdef CONFIG_DEBUG_BLK_CGROUP
  239. blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
  240. #endif
  241. }
  242. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  243. /*
  244. * should be called under rcu read lock or queue lock to make sure blkg pointer
  245. * is valid.
  246. */
  247. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  248. struct blkio_policy_type *pol,
  249. uint64_t bytes, bool direction, bool sync)
  250. {
  251. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  252. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  253. struct blkio_group_stats_cpu *stats_cpu;
  254. unsigned long flags;
  255. /* If per cpu stats are not allocated yet, don't do any accounting. */
  256. if (pd->stats_cpu == NULL)
  257. return;
  258. /*
  259. * Disabling interrupts to provide mutual exclusion between two
  260. * writes on same cpu. It probably is not needed for 64bit. Not
  261. * optimizing that case yet.
  262. */
  263. local_irq_save(flags);
  264. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  265. blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
  266. blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
  267. blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
  268. local_irq_restore(flags);
  269. }
  270. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  271. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  272. struct blkio_policy_type *pol,
  273. uint64_t start_time,
  274. uint64_t io_start_time, bool direction,
  275. bool sync)
  276. {
  277. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  278. unsigned long long now = sched_clock();
  279. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  280. lockdep_assert_held(blkg->q->queue_lock);
  281. if (time_after64(now, io_start_time))
  282. blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
  283. if (time_after64(io_start_time, start_time))
  284. blkg_rwstat_add(&stats->wait_time, rw,
  285. io_start_time - start_time);
  286. }
  287. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  288. /* Merged stats are per cpu. */
  289. void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  290. struct blkio_policy_type *pol,
  291. bool direction, bool sync)
  292. {
  293. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  294. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  295. lockdep_assert_held(blkg->q->queue_lock);
  296. blkg_rwstat_add(&stats->merged, rw, 1);
  297. }
  298. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  299. /*
  300. * Worker for allocating per cpu stat for blk groups. This is scheduled on
  301. * the system_nrt_wq once there are some groups on the alloc_list waiting
  302. * for allocation.
  303. */
  304. static void blkio_stat_alloc_fn(struct work_struct *work)
  305. {
  306. static void *pcpu_stats[BLKIO_NR_POLICIES];
  307. struct delayed_work *dwork = to_delayed_work(work);
  308. struct blkio_group *blkg;
  309. int i;
  310. bool empty = false;
  311. alloc_stats:
  312. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  313. if (pcpu_stats[i] != NULL)
  314. continue;
  315. pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
  316. /* Allocation failed. Try again after some time. */
  317. if (pcpu_stats[i] == NULL) {
  318. queue_delayed_work(system_nrt_wq, dwork,
  319. msecs_to_jiffies(10));
  320. return;
  321. }
  322. }
  323. spin_lock_irq(&blkio_list_lock);
  324. spin_lock(&alloc_list_lock);
  325. /* cgroup got deleted or queue exited. */
  326. if (!list_empty(&alloc_list)) {
  327. blkg = list_first_entry(&alloc_list, struct blkio_group,
  328. alloc_node);
  329. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  330. struct blkg_policy_data *pd = blkg->pd[i];
  331. if (blkio_policy[i] && pd && !pd->stats_cpu)
  332. swap(pd->stats_cpu, pcpu_stats[i]);
  333. }
  334. list_del_init(&blkg->alloc_node);
  335. }
  336. empty = list_empty(&alloc_list);
  337. spin_unlock(&alloc_list_lock);
  338. spin_unlock_irq(&blkio_list_lock);
  339. if (!empty)
  340. goto alloc_stats;
  341. }
  342. /**
  343. * blkg_free - free a blkg
  344. * @blkg: blkg to free
  345. *
  346. * Free @blkg which may be partially allocated.
  347. */
  348. static void blkg_free(struct blkio_group *blkg)
  349. {
  350. int i;
  351. if (!blkg)
  352. return;
  353. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  354. struct blkg_policy_data *pd = blkg->pd[i];
  355. if (pd) {
  356. free_percpu(pd->stats_cpu);
  357. kfree(pd);
  358. }
  359. }
  360. kfree(blkg);
  361. }
  362. /**
  363. * blkg_alloc - allocate a blkg
  364. * @blkcg: block cgroup the new blkg is associated with
  365. * @q: request_queue the new blkg is associated with
  366. *
  367. * Allocate a new blkg assocating @blkcg and @q.
  368. */
  369. static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
  370. struct request_queue *q)
  371. {
  372. struct blkio_group *blkg;
  373. int i;
  374. /* alloc and init base part */
  375. blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
  376. if (!blkg)
  377. return NULL;
  378. blkg->q = q;
  379. INIT_LIST_HEAD(&blkg->q_node);
  380. INIT_LIST_HEAD(&blkg->alloc_node);
  381. blkg->blkcg = blkcg;
  382. blkg->refcnt = 1;
  383. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  384. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  385. struct blkio_policy_type *pol = blkio_policy[i];
  386. struct blkg_policy_data *pd;
  387. if (!pol)
  388. continue;
  389. /* alloc per-policy data and attach it to blkg */
  390. pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
  391. q->node);
  392. if (!pd) {
  393. blkg_free(blkg);
  394. return NULL;
  395. }
  396. blkg->pd[i] = pd;
  397. pd->blkg = blkg;
  398. }
  399. /* invoke per-policy init */
  400. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  401. struct blkio_policy_type *pol = blkio_policy[i];
  402. if (pol)
  403. pol->ops.blkio_init_group_fn(blkg);
  404. }
  405. return blkg;
  406. }
  407. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  408. struct request_queue *q,
  409. bool for_root)
  410. __releases(q->queue_lock) __acquires(q->queue_lock)
  411. {
  412. struct blkio_group *blkg;
  413. WARN_ON_ONCE(!rcu_read_lock_held());
  414. lockdep_assert_held(q->queue_lock);
  415. /*
  416. * This could be the first entry point of blkcg implementation and
  417. * we shouldn't allow anything to go through for a bypassing queue.
  418. * The following can be removed if blkg lookup is guaranteed to
  419. * fail on a bypassing queue.
  420. */
  421. if (unlikely(blk_queue_bypass(q)) && !for_root)
  422. return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  423. blkg = blkg_lookup(blkcg, q);
  424. if (blkg)
  425. return blkg;
  426. /* blkg holds a reference to blkcg */
  427. if (!css_tryget(&blkcg->css))
  428. return ERR_PTR(-EINVAL);
  429. /*
  430. * Allocate and initialize.
  431. */
  432. blkg = blkg_alloc(blkcg, q);
  433. /* did alloc fail? */
  434. if (unlikely(!blkg)) {
  435. blkg = ERR_PTR(-ENOMEM);
  436. goto out;
  437. }
  438. /* insert */
  439. spin_lock(&blkcg->lock);
  440. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  441. list_add(&blkg->q_node, &q->blkg_list);
  442. spin_unlock(&blkcg->lock);
  443. spin_lock(&alloc_list_lock);
  444. list_add(&blkg->alloc_node, &alloc_list);
  445. /* Queue per cpu stat allocation from worker thread. */
  446. queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
  447. spin_unlock(&alloc_list_lock);
  448. out:
  449. return blkg;
  450. }
  451. EXPORT_SYMBOL_GPL(blkg_lookup_create);
  452. /* called under rcu_read_lock(). */
  453. struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  454. struct request_queue *q)
  455. {
  456. struct blkio_group *blkg;
  457. struct hlist_node *n;
  458. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  459. if (blkg->q == q)
  460. return blkg;
  461. return NULL;
  462. }
  463. EXPORT_SYMBOL_GPL(blkg_lookup);
  464. static void blkg_destroy(struct blkio_group *blkg)
  465. {
  466. struct request_queue *q = blkg->q;
  467. struct blkio_cgroup *blkcg = blkg->blkcg;
  468. lockdep_assert_held(q->queue_lock);
  469. lockdep_assert_held(&blkcg->lock);
  470. /* Something wrong if we are trying to remove same group twice */
  471. WARN_ON_ONCE(list_empty(&blkg->q_node));
  472. WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
  473. list_del_init(&blkg->q_node);
  474. hlist_del_init_rcu(&blkg->blkcg_node);
  475. spin_lock(&alloc_list_lock);
  476. list_del_init(&blkg->alloc_node);
  477. spin_unlock(&alloc_list_lock);
  478. /*
  479. * Put the reference taken at the time of creation so that when all
  480. * queues are gone, group can be destroyed.
  481. */
  482. blkg_put(blkg);
  483. }
  484. /*
  485. * XXX: This updates blkg policy data in-place for root blkg, which is
  486. * necessary across elevator switch and policy registration as root blkgs
  487. * aren't shot down. This broken and racy implementation is temporary.
  488. * Eventually, blkg shoot down will be replaced by proper in-place update.
  489. */
  490. void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
  491. {
  492. struct blkio_policy_type *pol = blkio_policy[plid];
  493. struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
  494. struct blkg_policy_data *pd;
  495. if (!blkg)
  496. return;
  497. kfree(blkg->pd[plid]);
  498. blkg->pd[plid] = NULL;
  499. if (!pol)
  500. return;
  501. pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
  502. WARN_ON_ONCE(!pd);
  503. pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  504. WARN_ON_ONCE(!pd->stats_cpu);
  505. blkg->pd[plid] = pd;
  506. pd->blkg = blkg;
  507. pol->ops.blkio_init_group_fn(blkg);
  508. }
  509. EXPORT_SYMBOL_GPL(update_root_blkg_pd);
  510. /**
  511. * blkg_destroy_all - destroy all blkgs associated with a request_queue
  512. * @q: request_queue of interest
  513. * @destroy_root: whether to destroy root blkg or not
  514. *
  515. * Destroy blkgs associated with @q. If @destroy_root is %true, all are
  516. * destroyed; otherwise, root blkg is left alone.
  517. */
  518. void blkg_destroy_all(struct request_queue *q, bool destroy_root)
  519. {
  520. struct blkio_group *blkg, *n;
  521. spin_lock_irq(q->queue_lock);
  522. list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
  523. struct blkio_cgroup *blkcg = blkg->blkcg;
  524. /* skip root? */
  525. if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
  526. continue;
  527. spin_lock(&blkcg->lock);
  528. blkg_destroy(blkg);
  529. spin_unlock(&blkcg->lock);
  530. }
  531. spin_unlock_irq(q->queue_lock);
  532. }
  533. EXPORT_SYMBOL_GPL(blkg_destroy_all);
  534. static void blkg_rcu_free(struct rcu_head *rcu_head)
  535. {
  536. blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
  537. }
  538. void __blkg_release(struct blkio_group *blkg)
  539. {
  540. /* release the extra blkcg reference this blkg has been holding */
  541. css_put(&blkg->blkcg->css);
  542. /*
  543. * A group is freed in rcu manner. But having an rcu lock does not
  544. * mean that one can access all the fields of blkg and assume these
  545. * are valid. For example, don't try to follow throtl_data and
  546. * request queue links.
  547. *
  548. * Having a reference to blkg under an rcu allows acess to only
  549. * values local to groups like group stats and group rate limits
  550. */
  551. call_rcu(&blkg->rcu_head, blkg_rcu_free);
  552. }
  553. EXPORT_SYMBOL_GPL(__blkg_release);
  554. static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
  555. {
  556. struct blkg_policy_data *pd = blkg->pd[plid];
  557. int cpu;
  558. if (pd->stats_cpu == NULL)
  559. return;
  560. for_each_possible_cpu(cpu) {
  561. struct blkio_group_stats_cpu *sc =
  562. per_cpu_ptr(pd->stats_cpu, cpu);
  563. blkg_rwstat_reset(&sc->service_bytes);
  564. blkg_rwstat_reset(&sc->serviced);
  565. blkg_stat_reset(&sc->sectors);
  566. }
  567. }
  568. static int
  569. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  570. {
  571. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  572. struct blkio_group *blkg;
  573. struct hlist_node *n;
  574. spin_lock(&blkio_list_lock);
  575. spin_lock_irq(&blkcg->lock);
  576. /*
  577. * Note that stat reset is racy - it doesn't synchronize against
  578. * stat updates. This is a debug feature which shouldn't exist
  579. * anyway. If you get hit by a race, retry.
  580. */
  581. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  582. struct blkio_policy_type *pol;
  583. list_for_each_entry(pol, &blkio_list, list) {
  584. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  585. struct blkio_group_stats *stats = &pd->stats;
  586. /* queued stats shouldn't be cleared */
  587. blkg_rwstat_reset(&stats->merged);
  588. blkg_rwstat_reset(&stats->service_time);
  589. blkg_rwstat_reset(&stats->wait_time);
  590. blkg_stat_reset(&stats->time);
  591. #ifdef CONFIG_DEBUG_BLK_CGROUP
  592. blkg_stat_reset(&stats->unaccounted_time);
  593. blkg_stat_reset(&stats->avg_queue_size_sum);
  594. blkg_stat_reset(&stats->avg_queue_size_samples);
  595. blkg_stat_reset(&stats->dequeue);
  596. blkg_stat_reset(&stats->group_wait_time);
  597. blkg_stat_reset(&stats->idle_time);
  598. blkg_stat_reset(&stats->empty_time);
  599. #endif
  600. blkio_reset_stats_cpu(blkg, pol->plid);
  601. }
  602. }
  603. spin_unlock_irq(&blkcg->lock);
  604. spin_unlock(&blkio_list_lock);
  605. return 0;
  606. }
  607. static const char *blkg_dev_name(struct blkio_group *blkg)
  608. {
  609. /* some drivers (floppy) instantiate a queue w/o disk registered */
  610. if (blkg->q->backing_dev_info.dev)
  611. return dev_name(blkg->q->backing_dev_info.dev);
  612. return NULL;
  613. }
  614. /**
  615. * blkcg_print_blkgs - helper for printing per-blkg data
  616. * @sf: seq_file to print to
  617. * @blkcg: blkcg of interest
  618. * @prfill: fill function to print out a blkg
  619. * @pol: policy in question
  620. * @data: data to be passed to @prfill
  621. * @show_total: to print out sum of prfill return values or not
  622. *
  623. * This function invokes @prfill on each blkg of @blkcg if pd for the
  624. * policy specified by @pol exists. @prfill is invoked with @sf, the
  625. * policy data and @data. If @show_total is %true, the sum of the return
  626. * values from @prfill is printed with "Total" label at the end.
  627. *
  628. * This is to be used to construct print functions for
  629. * cftype->read_seq_string method.
  630. */
  631. static void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
  632. u64 (*prfill)(struct seq_file *,
  633. struct blkg_policy_data *, int),
  634. int pol, int data, bool show_total)
  635. {
  636. struct blkio_group *blkg;
  637. struct hlist_node *n;
  638. u64 total = 0;
  639. spin_lock_irq(&blkcg->lock);
  640. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  641. if (blkg->pd[pol])
  642. total += prfill(sf, blkg->pd[pol], data);
  643. spin_unlock_irq(&blkcg->lock);
  644. if (show_total)
  645. seq_printf(sf, "Total %llu\n", (unsigned long long)total);
  646. }
  647. /**
  648. * __blkg_prfill_u64 - prfill helper for a single u64 value
  649. * @sf: seq_file to print to
  650. * @pd: policy data of interest
  651. * @v: value to print
  652. *
  653. * Print @v to @sf for the device assocaited with @pd.
  654. */
  655. static u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd,
  656. u64 v)
  657. {
  658. const char *dname = blkg_dev_name(pd->blkg);
  659. if (!dname)
  660. return 0;
  661. seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
  662. return v;
  663. }
  664. /**
  665. * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
  666. * @sf: seq_file to print to
  667. * @pd: policy data of interest
  668. * @rwstat: rwstat to print
  669. *
  670. * Print @rwstat to @sf for the device assocaited with @pd.
  671. */
  672. static u64 __blkg_prfill_rwstat(struct seq_file *sf,
  673. struct blkg_policy_data *pd,
  674. const struct blkg_rwstat *rwstat)
  675. {
  676. static const char *rwstr[] = {
  677. [BLKG_RWSTAT_READ] = "Read",
  678. [BLKG_RWSTAT_WRITE] = "Write",
  679. [BLKG_RWSTAT_SYNC] = "Sync",
  680. [BLKG_RWSTAT_ASYNC] = "Async",
  681. };
  682. const char *dname = blkg_dev_name(pd->blkg);
  683. u64 v;
  684. int i;
  685. if (!dname)
  686. return 0;
  687. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  688. seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
  689. (unsigned long long)rwstat->cnt[i]);
  690. v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
  691. seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
  692. return v;
  693. }
  694. static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
  695. int off)
  696. {
  697. return __blkg_prfill_u64(sf, pd,
  698. blkg_stat_read((void *)&pd->stats + off));
  699. }
  700. static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  701. int off)
  702. {
  703. struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off);
  704. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  705. }
  706. /* print blkg_stat specified by BLKCG_STAT_PRIV() */
  707. static int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
  708. struct seq_file *sf)
  709. {
  710. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  711. blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat,
  712. BLKCG_STAT_POL(cft->private),
  713. BLKCG_STAT_OFF(cft->private), false);
  714. return 0;
  715. }
  716. /* print blkg_rwstat specified by BLKCG_STAT_PRIV() */
  717. static int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
  718. struct seq_file *sf)
  719. {
  720. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  721. blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat,
  722. BLKCG_STAT_POL(cft->private),
  723. BLKCG_STAT_OFF(cft->private), true);
  724. return 0;
  725. }
  726. static u64 blkg_prfill_cpu_stat(struct seq_file *sf,
  727. struct blkg_policy_data *pd, int off)
  728. {
  729. u64 v = 0;
  730. int cpu;
  731. for_each_possible_cpu(cpu) {
  732. struct blkio_group_stats_cpu *sc =
  733. per_cpu_ptr(pd->stats_cpu, cpu);
  734. v += blkg_stat_read((void *)sc + off);
  735. }
  736. return __blkg_prfill_u64(sf, pd, v);
  737. }
  738. static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
  739. struct blkg_policy_data *pd, int off)
  740. {
  741. struct blkg_rwstat rwstat = { }, tmp;
  742. int i, cpu;
  743. for_each_possible_cpu(cpu) {
  744. struct blkio_group_stats_cpu *sc =
  745. per_cpu_ptr(pd->stats_cpu, cpu);
  746. tmp = blkg_rwstat_read((void *)sc + off);
  747. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  748. rwstat.cnt[i] += tmp.cnt[i];
  749. }
  750. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  751. }
  752. /* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
  753. static int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
  754. struct seq_file *sf)
  755. {
  756. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  757. blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat,
  758. BLKCG_STAT_POL(cft->private),
  759. BLKCG_STAT_OFF(cft->private), false);
  760. return 0;
  761. }
  762. /* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
  763. static int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
  764. struct seq_file *sf)
  765. {
  766. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  767. blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
  768. BLKCG_STAT_POL(cft->private),
  769. BLKCG_STAT_OFF(cft->private), true);
  770. return 0;
  771. }
  772. #ifdef CONFIG_DEBUG_BLK_CGROUP
  773. static u64 blkg_prfill_avg_queue_size(struct seq_file *sf,
  774. struct blkg_policy_data *pd, int off)
  775. {
  776. u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples);
  777. u64 v = 0;
  778. if (samples) {
  779. v = blkg_stat_read(&pd->stats.avg_queue_size_sum);
  780. do_div(v, samples);
  781. }
  782. __blkg_prfill_u64(sf, pd, v);
  783. return 0;
  784. }
  785. /* print avg_queue_size */
  786. static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
  787. struct seq_file *sf)
  788. {
  789. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  790. blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size,
  791. BLKIO_POLICY_PROP, 0, false);
  792. return 0;
  793. }
  794. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  795. struct blkg_conf_ctx {
  796. struct gendisk *disk;
  797. struct blkio_group *blkg;
  798. u64 v;
  799. };
  800. /**
  801. * blkg_conf_prep - parse and prepare for per-blkg config update
  802. * @blkcg: target block cgroup
  803. * @input: input string
  804. * @ctx: blkg_conf_ctx to be filled
  805. *
  806. * Parse per-blkg config update from @input and initialize @ctx with the
  807. * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
  808. * value. This function returns with RCU read locked and must be paired
  809. * with blkg_conf_finish().
  810. */
  811. static int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
  812. struct blkg_conf_ctx *ctx)
  813. __acquires(rcu)
  814. {
  815. struct gendisk *disk;
  816. struct blkio_group *blkg;
  817. char *buf, *s[4], *p, *major_s, *minor_s;
  818. unsigned long major, minor;
  819. int i = 0, ret = -EINVAL;
  820. int part;
  821. dev_t dev;
  822. u64 temp;
  823. buf = kstrdup(input, GFP_KERNEL);
  824. if (!buf)
  825. return -ENOMEM;
  826. memset(s, 0, sizeof(s));
  827. while ((p = strsep(&buf, " ")) != NULL) {
  828. if (!*p)
  829. continue;
  830. s[i++] = p;
  831. /* Prevent from inputing too many things */
  832. if (i == 3)
  833. break;
  834. }
  835. if (i != 2)
  836. goto out;
  837. p = strsep(&s[0], ":");
  838. if (p != NULL)
  839. major_s = p;
  840. else
  841. goto out;
  842. minor_s = s[0];
  843. if (!minor_s)
  844. goto out;
  845. if (strict_strtoul(major_s, 10, &major))
  846. goto out;
  847. if (strict_strtoul(minor_s, 10, &minor))
  848. goto out;
  849. dev = MKDEV(major, minor);
  850. if (strict_strtoull(s[1], 10, &temp))
  851. goto out;
  852. disk = get_gendisk(dev, &part);
  853. if (!disk || part)
  854. goto out;
  855. rcu_read_lock();
  856. spin_lock_irq(disk->queue->queue_lock);
  857. blkg = blkg_lookup_create(blkcg, disk->queue, false);
  858. spin_unlock_irq(disk->queue->queue_lock);
  859. if (IS_ERR(blkg)) {
  860. ret = PTR_ERR(blkg);
  861. rcu_read_unlock();
  862. put_disk(disk);
  863. /*
  864. * If queue was bypassing, we should retry. Do so after a
  865. * short msleep(). It isn't strictly necessary but queue
  866. * can be bypassing for some time and it's always nice to
  867. * avoid busy looping.
  868. */
  869. if (ret == -EBUSY) {
  870. msleep(10);
  871. ret = restart_syscall();
  872. }
  873. goto out;
  874. }
  875. ctx->disk = disk;
  876. ctx->blkg = blkg;
  877. ctx->v = temp;
  878. ret = 0;
  879. out:
  880. kfree(buf);
  881. return ret;
  882. }
  883. /**
  884. * blkg_conf_finish - finish up per-blkg config update
  885. * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
  886. *
  887. * Finish up after per-blkg config update. This function must be paired
  888. * with blkg_conf_prep().
  889. */
  890. static void blkg_conf_finish(struct blkg_conf_ctx *ctx)
  891. __releases(rcu)
  892. {
  893. rcu_read_unlock();
  894. put_disk(ctx->disk);
  895. }
  896. /* for propio conf */
  897. static u64 blkg_prfill_weight_device(struct seq_file *sf,
  898. struct blkg_policy_data *pd, int off)
  899. {
  900. if (!pd->conf.weight)
  901. return 0;
  902. return __blkg_prfill_u64(sf, pd, pd->conf.weight);
  903. }
  904. static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
  905. struct seq_file *sf)
  906. {
  907. blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
  908. blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
  909. false);
  910. return 0;
  911. }
  912. static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
  913. struct seq_file *sf)
  914. {
  915. seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight);
  916. return 0;
  917. }
  918. static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
  919. const char *buf)
  920. {
  921. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  922. struct blkg_policy_data *pd;
  923. struct blkg_conf_ctx ctx;
  924. int ret;
  925. ret = blkg_conf_prep(blkcg, buf, &ctx);
  926. if (ret)
  927. return ret;
  928. ret = -EINVAL;
  929. pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
  930. if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
  931. ctx.v <= BLKIO_WEIGHT_MAX))) {
  932. pd->conf.weight = ctx.v;
  933. blkio_update_group_weight(ctx.blkg, BLKIO_POLICY_PROP,
  934. ctx.v ?: blkcg->weight);
  935. ret = 0;
  936. }
  937. blkg_conf_finish(&ctx);
  938. return ret;
  939. }
  940. static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
  941. {
  942. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  943. struct blkio_group *blkg;
  944. struct hlist_node *n;
  945. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  946. return -EINVAL;
  947. spin_lock(&blkio_list_lock);
  948. spin_lock_irq(&blkcg->lock);
  949. blkcg->weight = (unsigned int)val;
  950. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  951. struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP];
  952. if (pd && !pd->conf.weight)
  953. blkio_update_group_weight(blkg, BLKIO_POLICY_PROP,
  954. blkcg->weight);
  955. }
  956. spin_unlock_irq(&blkcg->lock);
  957. spin_unlock(&blkio_list_lock);
  958. return 0;
  959. }
  960. /* for blk-throttle conf */
  961. #ifdef CONFIG_BLK_DEV_THROTTLING
  962. static u64 blkg_prfill_conf_u64(struct seq_file *sf,
  963. struct blkg_policy_data *pd, int off)
  964. {
  965. u64 v = *(u64 *)((void *)&pd->conf + off);
  966. if (!v)
  967. return 0;
  968. return __blkg_prfill_u64(sf, pd, v);
  969. }
  970. static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
  971. struct seq_file *sf)
  972. {
  973. blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
  974. blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
  975. cft->private, false);
  976. return 0;
  977. }
  978. static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
  979. const char *buf, int rw,
  980. void (*update)(struct blkio_group *, int, u64, int))
  981. {
  982. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  983. struct blkg_policy_data *pd;
  984. struct blkg_conf_ctx ctx;
  985. int ret;
  986. ret = blkg_conf_prep(blkcg, buf, &ctx);
  987. if (ret)
  988. return ret;
  989. ret = -EINVAL;
  990. pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
  991. if (pd) {
  992. *(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
  993. update(ctx.blkg, BLKIO_POLICY_THROTL, ctx.v ?: -1, rw);
  994. ret = 0;
  995. }
  996. blkg_conf_finish(&ctx);
  997. return ret;
  998. }
  999. static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
  1000. const char *buf)
  1001. {
  1002. return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_bps);
  1003. }
  1004. static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
  1005. const char *buf)
  1006. {
  1007. return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_bps);
  1008. }
  1009. static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
  1010. const char *buf)
  1011. {
  1012. return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_iops);
  1013. }
  1014. static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
  1015. const char *buf)
  1016. {
  1017. return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_iops);
  1018. }
  1019. #endif
  1020. struct cftype blkio_files[] = {
  1021. {
  1022. .name = "weight_device",
  1023. .read_seq_string = blkcg_print_weight_device,
  1024. .write_string = blkcg_set_weight_device,
  1025. .max_write_len = 256,
  1026. },
  1027. {
  1028. .name = "weight",
  1029. .read_seq_string = blkcg_print_weight,
  1030. .write_u64 = blkcg_set_weight,
  1031. },
  1032. {
  1033. .name = "time",
  1034. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1035. offsetof(struct blkio_group_stats, time)),
  1036. .read_seq_string = blkcg_print_stat,
  1037. },
  1038. {
  1039. .name = "sectors",
  1040. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1041. offsetof(struct blkio_group_stats_cpu, sectors)),
  1042. .read_seq_string = blkcg_print_cpu_stat,
  1043. },
  1044. {
  1045. .name = "io_service_bytes",
  1046. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1047. offsetof(struct blkio_group_stats_cpu, service_bytes)),
  1048. .read_seq_string = blkcg_print_cpu_rwstat,
  1049. },
  1050. {
  1051. .name = "io_serviced",
  1052. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1053. offsetof(struct blkio_group_stats_cpu, serviced)),
  1054. .read_seq_string = blkcg_print_cpu_rwstat,
  1055. },
  1056. {
  1057. .name = "io_service_time",
  1058. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1059. offsetof(struct blkio_group_stats, service_time)),
  1060. .read_seq_string = blkcg_print_rwstat,
  1061. },
  1062. {
  1063. .name = "io_wait_time",
  1064. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1065. offsetof(struct blkio_group_stats, wait_time)),
  1066. .read_seq_string = blkcg_print_rwstat,
  1067. },
  1068. {
  1069. .name = "io_merged",
  1070. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1071. offsetof(struct blkio_group_stats, merged)),
  1072. .read_seq_string = blkcg_print_rwstat,
  1073. },
  1074. {
  1075. .name = "io_queued",
  1076. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1077. offsetof(struct blkio_group_stats, queued)),
  1078. .read_seq_string = blkcg_print_rwstat,
  1079. },
  1080. {
  1081. .name = "reset_stats",
  1082. .write_u64 = blkiocg_reset_stats,
  1083. },
  1084. #ifdef CONFIG_BLK_DEV_THROTTLING
  1085. {
  1086. .name = "throttle.read_bps_device",
  1087. .private = offsetof(struct blkio_group_conf, bps[READ]),
  1088. .read_seq_string = blkcg_print_conf_u64,
  1089. .write_string = blkcg_set_conf_bps_r,
  1090. .max_write_len = 256,
  1091. },
  1092. {
  1093. .name = "throttle.write_bps_device",
  1094. .private = offsetof(struct blkio_group_conf, bps[WRITE]),
  1095. .read_seq_string = blkcg_print_conf_u64,
  1096. .write_string = blkcg_set_conf_bps_w,
  1097. .max_write_len = 256,
  1098. },
  1099. {
  1100. .name = "throttle.read_iops_device",
  1101. .private = offsetof(struct blkio_group_conf, iops[READ]),
  1102. .read_seq_string = blkcg_print_conf_u64,
  1103. .write_string = blkcg_set_conf_iops_r,
  1104. .max_write_len = 256,
  1105. },
  1106. {
  1107. .name = "throttle.write_iops_device",
  1108. .private = offsetof(struct blkio_group_conf, iops[WRITE]),
  1109. .read_seq_string = blkcg_print_conf_u64,
  1110. .write_string = blkcg_set_conf_iops_w,
  1111. .max_write_len = 256,
  1112. },
  1113. {
  1114. .name = "throttle.io_service_bytes",
  1115. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
  1116. offsetof(struct blkio_group_stats_cpu, service_bytes)),
  1117. .read_seq_string = blkcg_print_cpu_rwstat,
  1118. },
  1119. {
  1120. .name = "throttle.io_serviced",
  1121. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
  1122. offsetof(struct blkio_group_stats_cpu, serviced)),
  1123. .read_seq_string = blkcg_print_cpu_rwstat,
  1124. },
  1125. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1126. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1127. {
  1128. .name = "avg_queue_size",
  1129. .read_seq_string = blkcg_print_avg_queue_size,
  1130. },
  1131. {
  1132. .name = "group_wait_time",
  1133. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1134. offsetof(struct blkio_group_stats, group_wait_time)),
  1135. .read_seq_string = blkcg_print_stat,
  1136. },
  1137. {
  1138. .name = "idle_time",
  1139. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1140. offsetof(struct blkio_group_stats, idle_time)),
  1141. .read_seq_string = blkcg_print_stat,
  1142. },
  1143. {
  1144. .name = "empty_time",
  1145. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1146. offsetof(struct blkio_group_stats, empty_time)),
  1147. .read_seq_string = blkcg_print_stat,
  1148. },
  1149. {
  1150. .name = "dequeue",
  1151. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1152. offsetof(struct blkio_group_stats, dequeue)),
  1153. .read_seq_string = blkcg_print_stat,
  1154. },
  1155. {
  1156. .name = "unaccounted_time",
  1157. .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
  1158. offsetof(struct blkio_group_stats, unaccounted_time)),
  1159. .read_seq_string = blkcg_print_stat,
  1160. },
  1161. #endif
  1162. { } /* terminate */
  1163. };
  1164. /**
  1165. * blkiocg_pre_destroy - cgroup pre_destroy callback
  1166. * @cgroup: cgroup of interest
  1167. *
  1168. * This function is called when @cgroup is about to go away and responsible
  1169. * for shooting down all blkgs associated with @cgroup. blkgs should be
  1170. * removed while holding both q and blkcg locks. As blkcg lock is nested
  1171. * inside q lock, this function performs reverse double lock dancing.
  1172. *
  1173. * This is the blkcg counterpart of ioc_release_fn().
  1174. */
  1175. static int blkiocg_pre_destroy(struct cgroup *cgroup)
  1176. {
  1177. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1178. spin_lock_irq(&blkcg->lock);
  1179. while (!hlist_empty(&blkcg->blkg_list)) {
  1180. struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
  1181. struct blkio_group, blkcg_node);
  1182. struct request_queue *q = blkg->q;
  1183. if (spin_trylock(q->queue_lock)) {
  1184. blkg_destroy(blkg);
  1185. spin_unlock(q->queue_lock);
  1186. } else {
  1187. spin_unlock_irq(&blkcg->lock);
  1188. cpu_relax();
  1189. spin_lock_irq(&blkcg->lock);
  1190. }
  1191. }
  1192. spin_unlock_irq(&blkcg->lock);
  1193. return 0;
  1194. }
  1195. static void blkiocg_destroy(struct cgroup *cgroup)
  1196. {
  1197. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1198. if (blkcg != &blkio_root_cgroup)
  1199. kfree(blkcg);
  1200. }
  1201. static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
  1202. {
  1203. static atomic64_t id_seq = ATOMIC64_INIT(0);
  1204. struct blkio_cgroup *blkcg;
  1205. struct cgroup *parent = cgroup->parent;
  1206. if (!parent) {
  1207. blkcg = &blkio_root_cgroup;
  1208. goto done;
  1209. }
  1210. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1211. if (!blkcg)
  1212. return ERR_PTR(-ENOMEM);
  1213. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1214. blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
  1215. done:
  1216. spin_lock_init(&blkcg->lock);
  1217. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1218. return &blkcg->css;
  1219. }
  1220. /**
  1221. * blkcg_init_queue - initialize blkcg part of request queue
  1222. * @q: request_queue to initialize
  1223. *
  1224. * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  1225. * part of new request_queue @q.
  1226. *
  1227. * RETURNS:
  1228. * 0 on success, -errno on failure.
  1229. */
  1230. int blkcg_init_queue(struct request_queue *q)
  1231. {
  1232. int ret;
  1233. might_sleep();
  1234. ret = blk_throtl_init(q);
  1235. if (ret)
  1236. return ret;
  1237. mutex_lock(&all_q_mutex);
  1238. INIT_LIST_HEAD(&q->all_q_node);
  1239. list_add_tail(&q->all_q_node, &all_q_list);
  1240. mutex_unlock(&all_q_mutex);
  1241. return 0;
  1242. }
  1243. /**
  1244. * blkcg_drain_queue - drain blkcg part of request_queue
  1245. * @q: request_queue to drain
  1246. *
  1247. * Called from blk_drain_queue(). Responsible for draining blkcg part.
  1248. */
  1249. void blkcg_drain_queue(struct request_queue *q)
  1250. {
  1251. lockdep_assert_held(q->queue_lock);
  1252. blk_throtl_drain(q);
  1253. }
  1254. /**
  1255. * blkcg_exit_queue - exit and release blkcg part of request_queue
  1256. * @q: request_queue being released
  1257. *
  1258. * Called from blk_release_queue(). Responsible for exiting blkcg part.
  1259. */
  1260. void blkcg_exit_queue(struct request_queue *q)
  1261. {
  1262. mutex_lock(&all_q_mutex);
  1263. list_del_init(&q->all_q_node);
  1264. mutex_unlock(&all_q_mutex);
  1265. blkg_destroy_all(q, true);
  1266. blk_throtl_exit(q);
  1267. }
  1268. /*
  1269. * We cannot support shared io contexts, as we have no mean to support
  1270. * two tasks with the same ioc in two different groups without major rework
  1271. * of the main cic data structures. For now we allow a task to change
  1272. * its cgroup only if it's the only owner of its ioc.
  1273. */
  1274. static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  1275. {
  1276. struct task_struct *task;
  1277. struct io_context *ioc;
  1278. int ret = 0;
  1279. /* task_lock() is needed to avoid races with exit_io_context() */
  1280. cgroup_taskset_for_each(task, cgrp, tset) {
  1281. task_lock(task);
  1282. ioc = task->io_context;
  1283. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1284. ret = -EINVAL;
  1285. task_unlock(task);
  1286. if (ret)
  1287. break;
  1288. }
  1289. return ret;
  1290. }
  1291. static void blkcg_bypass_start(void)
  1292. __acquires(&all_q_mutex)
  1293. {
  1294. struct request_queue *q;
  1295. mutex_lock(&all_q_mutex);
  1296. list_for_each_entry(q, &all_q_list, all_q_node) {
  1297. blk_queue_bypass_start(q);
  1298. blkg_destroy_all(q, false);
  1299. }
  1300. }
  1301. static void blkcg_bypass_end(void)
  1302. __releases(&all_q_mutex)
  1303. {
  1304. struct request_queue *q;
  1305. list_for_each_entry(q, &all_q_list, all_q_node)
  1306. blk_queue_bypass_end(q);
  1307. mutex_unlock(&all_q_mutex);
  1308. }
  1309. struct cgroup_subsys blkio_subsys = {
  1310. .name = "blkio",
  1311. .create = blkiocg_create,
  1312. .can_attach = blkiocg_can_attach,
  1313. .pre_destroy = blkiocg_pre_destroy,
  1314. .destroy = blkiocg_destroy,
  1315. .subsys_id = blkio_subsys_id,
  1316. .base_cftypes = blkio_files,
  1317. .module = THIS_MODULE,
  1318. };
  1319. EXPORT_SYMBOL_GPL(blkio_subsys);
  1320. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1321. {
  1322. struct request_queue *q;
  1323. blkcg_bypass_start();
  1324. spin_lock(&blkio_list_lock);
  1325. BUG_ON(blkio_policy[blkiop->plid]);
  1326. blkio_policy[blkiop->plid] = blkiop;
  1327. list_add_tail(&blkiop->list, &blkio_list);
  1328. spin_unlock(&blkio_list_lock);
  1329. list_for_each_entry(q, &all_q_list, all_q_node)
  1330. update_root_blkg_pd(q, blkiop->plid);
  1331. blkcg_bypass_end();
  1332. }
  1333. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1334. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1335. {
  1336. struct request_queue *q;
  1337. blkcg_bypass_start();
  1338. spin_lock(&blkio_list_lock);
  1339. BUG_ON(blkio_policy[blkiop->plid] != blkiop);
  1340. blkio_policy[blkiop->plid] = NULL;
  1341. list_del_init(&blkiop->list);
  1342. spin_unlock(&blkio_list_lock);
  1343. list_for_each_entry(q, &all_q_list, all_q_node)
  1344. update_root_blkg_pd(q, blkiop->plid);
  1345. blkcg_bypass_end();
  1346. }
  1347. EXPORT_SYMBOL_GPL(blkio_policy_unregister);