blk-cgroup.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include "blk-cgroup.h"
  23. #include "blk.h"
  24. #define MAX_KEY_LEN 100
  25. static DEFINE_SPINLOCK(blkio_list_lock);
  26. static LIST_HEAD(blkio_list);
  27. static DEFINE_MUTEX(all_q_mutex);
  28. static LIST_HEAD(all_q_list);
  29. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  30. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  31. static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
  32. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  33. struct cgroup *);
  34. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  35. struct cgroup_taskset *);
  36. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  37. struct cgroup_taskset *);
  38. static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
  39. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  40. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  41. /* for encoding cft->private value on file */
  42. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  43. /* What policy owns the file, proportional or throttle */
  44. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  45. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  46. struct cgroup_subsys blkio_subsys = {
  47. .name = "blkio",
  48. .create = blkiocg_create,
  49. .can_attach = blkiocg_can_attach,
  50. .attach = blkiocg_attach,
  51. .pre_destroy = blkiocg_pre_destroy,
  52. .destroy = blkiocg_destroy,
  53. .populate = blkiocg_populate,
  54. .subsys_id = blkio_subsys_id,
  55. .module = THIS_MODULE,
  56. };
  57. EXPORT_SYMBOL_GPL(blkio_subsys);
  58. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  59. {
  60. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  61. struct blkio_cgroup, css);
  62. }
  63. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  64. static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  65. {
  66. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  67. struct blkio_cgroup, css);
  68. }
  69. struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
  70. {
  71. if (bio && bio->bi_css)
  72. return container_of(bio->bi_css, struct blkio_cgroup, css);
  73. return task_blkio_cgroup(current);
  74. }
  75. EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
  76. static inline void blkio_update_group_weight(struct blkio_group *blkg,
  77. int plid, unsigned int weight)
  78. {
  79. struct blkio_policy_type *blkiop;
  80. list_for_each_entry(blkiop, &blkio_list, list) {
  81. /* If this policy does not own the blkg, do not send updates */
  82. if (blkiop->plid != plid)
  83. continue;
  84. if (blkiop->ops.blkio_update_group_weight_fn)
  85. blkiop->ops.blkio_update_group_weight_fn(blkg->q,
  86. blkg, weight);
  87. }
  88. }
  89. static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
  90. u64 bps, int fileid)
  91. {
  92. struct blkio_policy_type *blkiop;
  93. list_for_each_entry(blkiop, &blkio_list, list) {
  94. /* If this policy does not own the blkg, do not send updates */
  95. if (blkiop->plid != plid)
  96. continue;
  97. if (fileid == BLKIO_THROTL_read_bps_device
  98. && blkiop->ops.blkio_update_group_read_bps_fn)
  99. blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
  100. blkg, bps);
  101. if (fileid == BLKIO_THROTL_write_bps_device
  102. && blkiop->ops.blkio_update_group_write_bps_fn)
  103. blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
  104. blkg, bps);
  105. }
  106. }
  107. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  108. int plid, unsigned int iops,
  109. int fileid)
  110. {
  111. struct blkio_policy_type *blkiop;
  112. list_for_each_entry(blkiop, &blkio_list, list) {
  113. /* If this policy does not own the blkg, do not send updates */
  114. if (blkiop->plid != plid)
  115. continue;
  116. if (fileid == BLKIO_THROTL_read_iops_device
  117. && blkiop->ops.blkio_update_group_read_iops_fn)
  118. blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
  119. blkg, iops);
  120. if (fileid == BLKIO_THROTL_write_iops_device
  121. && blkiop->ops.blkio_update_group_write_iops_fn)
  122. blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
  123. blkg,iops);
  124. }
  125. }
  126. /*
  127. * Add to the appropriate stat variable depending on the request type.
  128. * This should be called with the blkg->stats_lock held.
  129. */
  130. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  131. bool sync)
  132. {
  133. if (direction)
  134. stat[BLKIO_STAT_WRITE] += add;
  135. else
  136. stat[BLKIO_STAT_READ] += add;
  137. if (sync)
  138. stat[BLKIO_STAT_SYNC] += add;
  139. else
  140. stat[BLKIO_STAT_ASYNC] += add;
  141. }
  142. /*
  143. * Decrements the appropriate stat variable if non-zero depending on the
  144. * request type. Panics on value being zero.
  145. * This should be called with the blkg->stats_lock held.
  146. */
  147. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  148. {
  149. if (direction) {
  150. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  151. stat[BLKIO_STAT_WRITE]--;
  152. } else {
  153. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  154. stat[BLKIO_STAT_READ]--;
  155. }
  156. if (sync) {
  157. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  158. stat[BLKIO_STAT_SYNC]--;
  159. } else {
  160. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  161. stat[BLKIO_STAT_ASYNC]--;
  162. }
  163. }
  164. #ifdef CONFIG_DEBUG_BLK_CGROUP
  165. /* This should be called with the blkg->stats_lock held. */
  166. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  167. struct blkio_policy_type *pol,
  168. struct blkio_group *curr_blkg)
  169. {
  170. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  171. if (blkio_blkg_waiting(&pd->stats))
  172. return;
  173. if (blkg == curr_blkg)
  174. return;
  175. pd->stats.start_group_wait_time = sched_clock();
  176. blkio_mark_blkg_waiting(&pd->stats);
  177. }
  178. /* This should be called with the blkg->stats_lock held. */
  179. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  180. {
  181. unsigned long long now;
  182. if (!blkio_blkg_waiting(stats))
  183. return;
  184. now = sched_clock();
  185. if (time_after64(now, stats->start_group_wait_time))
  186. stats->group_wait_time += now - stats->start_group_wait_time;
  187. blkio_clear_blkg_waiting(stats);
  188. }
  189. /* This should be called with the blkg->stats_lock held. */
  190. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  191. {
  192. unsigned long long now;
  193. if (!blkio_blkg_empty(stats))
  194. return;
  195. now = sched_clock();
  196. if (time_after64(now, stats->start_empty_time))
  197. stats->empty_time += now - stats->start_empty_time;
  198. blkio_clear_blkg_empty(stats);
  199. }
  200. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
  201. struct blkio_policy_type *pol)
  202. {
  203. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  204. unsigned long flags;
  205. spin_lock_irqsave(&blkg->stats_lock, flags);
  206. BUG_ON(blkio_blkg_idling(&pd->stats));
  207. pd->stats.start_idle_time = sched_clock();
  208. blkio_mark_blkg_idling(&pd->stats);
  209. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  210. }
  211. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  212. void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
  213. struct blkio_policy_type *pol)
  214. {
  215. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  216. unsigned long flags;
  217. unsigned long long now;
  218. struct blkio_group_stats *stats;
  219. spin_lock_irqsave(&blkg->stats_lock, flags);
  220. stats = &pd->stats;
  221. if (blkio_blkg_idling(stats)) {
  222. now = sched_clock();
  223. if (time_after64(now, stats->start_idle_time))
  224. stats->idle_time += now - stats->start_idle_time;
  225. blkio_clear_blkg_idling(stats);
  226. }
  227. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  228. }
  229. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  230. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
  231. struct blkio_policy_type *pol)
  232. {
  233. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  234. unsigned long flags;
  235. struct blkio_group_stats *stats;
  236. spin_lock_irqsave(&blkg->stats_lock, flags);
  237. stats = &pd->stats;
  238. stats->avg_queue_size_sum +=
  239. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  240. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  241. stats->avg_queue_size_samples++;
  242. blkio_update_group_wait_time(stats);
  243. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  244. }
  245. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  246. void blkiocg_set_start_empty_time(struct blkio_group *blkg,
  247. struct blkio_policy_type *pol)
  248. {
  249. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  250. unsigned long flags;
  251. struct blkio_group_stats *stats;
  252. spin_lock_irqsave(&blkg->stats_lock, flags);
  253. stats = &pd->stats;
  254. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  255. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  256. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  257. return;
  258. }
  259. /*
  260. * group is already marked empty. This can happen if cfqq got new
  261. * request in parent group and moved to this group while being added
  262. * to service tree. Just ignore the event and move on.
  263. */
  264. if(blkio_blkg_empty(stats)) {
  265. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  266. return;
  267. }
  268. stats->start_empty_time = sched_clock();
  269. blkio_mark_blkg_empty(stats);
  270. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  271. }
  272. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  273. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  274. struct blkio_policy_type *pol,
  275. unsigned long dequeue)
  276. {
  277. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  278. pd->stats.dequeue += dequeue;
  279. }
  280. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  281. #else
  282. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  283. struct blkio_policy_type *pol,
  284. struct blkio_group *curr_blkg) { }
  285. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
  286. #endif
  287. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  288. struct blkio_policy_type *pol,
  289. struct blkio_group *curr_blkg, bool direction,
  290. bool sync)
  291. {
  292. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  293. unsigned long flags;
  294. spin_lock_irqsave(&blkg->stats_lock, flags);
  295. blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  296. sync);
  297. blkio_end_empty_time(&pd->stats);
  298. blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
  299. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  300. }
  301. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  302. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  303. struct blkio_policy_type *pol,
  304. bool direction, bool sync)
  305. {
  306. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  307. unsigned long flags;
  308. spin_lock_irqsave(&blkg->stats_lock, flags);
  309. blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED],
  310. direction, sync);
  311. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  312. }
  313. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  314. void blkiocg_update_timeslice_used(struct blkio_group *blkg,
  315. struct blkio_policy_type *pol,
  316. unsigned long time,
  317. unsigned long unaccounted_time)
  318. {
  319. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  320. unsigned long flags;
  321. spin_lock_irqsave(&blkg->stats_lock, flags);
  322. pd->stats.time += time;
  323. #ifdef CONFIG_DEBUG_BLK_CGROUP
  324. pd->stats.unaccounted_time += unaccounted_time;
  325. #endif
  326. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  327. }
  328. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  329. /*
  330. * should be called under rcu read lock or queue lock to make sure blkg pointer
  331. * is valid.
  332. */
  333. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  334. struct blkio_policy_type *pol,
  335. uint64_t bytes, bool direction, bool sync)
  336. {
  337. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  338. struct blkio_group_stats_cpu *stats_cpu;
  339. unsigned long flags;
  340. /*
  341. * Disabling interrupts to provide mutual exclusion between two
  342. * writes on same cpu. It probably is not needed for 64bit. Not
  343. * optimizing that case yet.
  344. */
  345. local_irq_save(flags);
  346. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  347. u64_stats_update_begin(&stats_cpu->syncp);
  348. stats_cpu->sectors += bytes >> 9;
  349. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  350. 1, direction, sync);
  351. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  352. bytes, direction, sync);
  353. u64_stats_update_end(&stats_cpu->syncp);
  354. local_irq_restore(flags);
  355. }
  356. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  357. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  358. struct blkio_policy_type *pol,
  359. uint64_t start_time,
  360. uint64_t io_start_time, bool direction,
  361. bool sync)
  362. {
  363. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  364. struct blkio_group_stats *stats;
  365. unsigned long flags;
  366. unsigned long long now = sched_clock();
  367. spin_lock_irqsave(&blkg->stats_lock, flags);
  368. stats = &pd->stats;
  369. if (time_after64(now, io_start_time))
  370. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  371. now - io_start_time, direction, sync);
  372. if (time_after64(io_start_time, start_time))
  373. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  374. io_start_time - start_time, direction, sync);
  375. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  376. }
  377. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  378. /* Merged stats are per cpu. */
  379. void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  380. struct blkio_policy_type *pol,
  381. bool direction, bool sync)
  382. {
  383. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  384. struct blkio_group_stats_cpu *stats_cpu;
  385. unsigned long flags;
  386. /*
  387. * Disabling interrupts to provide mutual exclusion between two
  388. * writes on same cpu. It probably is not needed for 64bit. Not
  389. * optimizing that case yet.
  390. */
  391. local_irq_save(flags);
  392. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  393. u64_stats_update_begin(&stats_cpu->syncp);
  394. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  395. direction, sync);
  396. u64_stats_update_end(&stats_cpu->syncp);
  397. local_irq_restore(flags);
  398. }
  399. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  400. /**
  401. * blkg_free - free a blkg
  402. * @blkg: blkg to free
  403. *
  404. * Free @blkg which may be partially allocated.
  405. */
  406. static void blkg_free(struct blkio_group *blkg)
  407. {
  408. int i;
  409. if (!blkg)
  410. return;
  411. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  412. struct blkg_policy_data *pd = blkg->pd[i];
  413. if (pd) {
  414. free_percpu(pd->stats_cpu);
  415. kfree(pd);
  416. }
  417. }
  418. kfree(blkg);
  419. }
  420. /**
  421. * blkg_alloc - allocate a blkg
  422. * @blkcg: block cgroup the new blkg is associated with
  423. * @q: request_queue the new blkg is associated with
  424. *
  425. * Allocate a new blkg assocating @blkcg and @q.
  426. *
  427. * FIXME: Should be called with queue locked but currently isn't due to
  428. * percpu stat breakage.
  429. */
  430. static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
  431. struct request_queue *q)
  432. {
  433. struct blkio_group *blkg;
  434. int i;
  435. /* alloc and init base part */
  436. blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
  437. if (!blkg)
  438. return NULL;
  439. spin_lock_init(&blkg->stats_lock);
  440. blkg->q = q;
  441. INIT_LIST_HEAD(&blkg->q_node);
  442. blkg->blkcg = blkcg;
  443. blkg->refcnt = 1;
  444. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  445. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  446. struct blkio_policy_type *pol = blkio_policy[i];
  447. struct blkg_policy_data *pd;
  448. if (!pol)
  449. continue;
  450. /* alloc per-policy data and attach it to blkg */
  451. pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
  452. q->node);
  453. if (!pd) {
  454. blkg_free(blkg);
  455. return NULL;
  456. }
  457. blkg->pd[i] = pd;
  458. pd->blkg = blkg;
  459. /* broken, read comment in the callsite */
  460. pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  461. if (!pd->stats_cpu) {
  462. blkg_free(blkg);
  463. return NULL;
  464. }
  465. }
  466. /* invoke per-policy init */
  467. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  468. struct blkio_policy_type *pol = blkio_policy[i];
  469. if (pol)
  470. pol->ops.blkio_init_group_fn(blkg);
  471. }
  472. return blkg;
  473. }
  474. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  475. struct request_queue *q,
  476. enum blkio_policy_id plid,
  477. bool for_root)
  478. __releases(q->queue_lock) __acquires(q->queue_lock)
  479. {
  480. struct blkio_group *blkg, *new_blkg;
  481. WARN_ON_ONCE(!rcu_read_lock_held());
  482. lockdep_assert_held(q->queue_lock);
  483. /*
  484. * This could be the first entry point of blkcg implementation and
  485. * we shouldn't allow anything to go through for a bypassing queue.
  486. * The following can be removed if blkg lookup is guaranteed to
  487. * fail on a bypassing queue.
  488. */
  489. if (unlikely(blk_queue_bypass(q)) && !for_root)
  490. return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  491. blkg = blkg_lookup(blkcg, q);
  492. if (blkg)
  493. return blkg;
  494. /* blkg holds a reference to blkcg */
  495. if (!css_tryget(&blkcg->css))
  496. return ERR_PTR(-EINVAL);
  497. /*
  498. * Allocate and initialize.
  499. *
  500. * FIXME: The following is broken. Percpu memory allocation
  501. * requires %GFP_KERNEL context and can't be performed from IO
  502. * path. Allocation here should inherently be atomic and the
  503. * following lock dancing can be removed once the broken percpu
  504. * allocation is fixed.
  505. */
  506. spin_unlock_irq(q->queue_lock);
  507. rcu_read_unlock();
  508. new_blkg = blkg_alloc(blkcg, q);
  509. rcu_read_lock();
  510. spin_lock_irq(q->queue_lock);
  511. /* did bypass get turned on inbetween? */
  512. if (unlikely(blk_queue_bypass(q)) && !for_root) {
  513. blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  514. goto out;
  515. }
  516. /* did someone beat us to it? */
  517. blkg = blkg_lookup(blkcg, q);
  518. if (unlikely(blkg))
  519. goto out;
  520. /* did alloc fail? */
  521. if (unlikely(!new_blkg)) {
  522. blkg = ERR_PTR(-ENOMEM);
  523. goto out;
  524. }
  525. /* insert */
  526. spin_lock(&blkcg->lock);
  527. swap(blkg, new_blkg);
  528. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  529. list_add(&blkg->q_node, &q->blkg_list);
  530. spin_unlock(&blkcg->lock);
  531. out:
  532. blkg_free(new_blkg);
  533. return blkg;
  534. }
  535. EXPORT_SYMBOL_GPL(blkg_lookup_create);
  536. /* called under rcu_read_lock(). */
  537. struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  538. struct request_queue *q)
  539. {
  540. struct blkio_group *blkg;
  541. struct hlist_node *n;
  542. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  543. if (blkg->q == q)
  544. return blkg;
  545. return NULL;
  546. }
  547. EXPORT_SYMBOL_GPL(blkg_lookup);
  548. static void blkg_destroy(struct blkio_group *blkg)
  549. {
  550. struct request_queue *q = blkg->q;
  551. struct blkio_cgroup *blkcg = blkg->blkcg;
  552. lockdep_assert_held(q->queue_lock);
  553. lockdep_assert_held(&blkcg->lock);
  554. /* Something wrong if we are trying to remove same group twice */
  555. WARN_ON_ONCE(list_empty(&blkg->q_node));
  556. WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
  557. list_del_init(&blkg->q_node);
  558. hlist_del_init_rcu(&blkg->blkcg_node);
  559. /*
  560. * Put the reference taken at the time of creation so that when all
  561. * queues are gone, group can be destroyed.
  562. */
  563. blkg_put(blkg);
  564. }
  565. /*
  566. * XXX: This updates blkg policy data in-place for root blkg, which is
  567. * necessary across elevator switch and policy registration as root blkgs
  568. * aren't shot down. This broken and racy implementation is temporary.
  569. * Eventually, blkg shoot down will be replaced by proper in-place update.
  570. */
  571. void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
  572. {
  573. struct blkio_policy_type *pol = blkio_policy[plid];
  574. struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
  575. struct blkg_policy_data *pd;
  576. if (!blkg)
  577. return;
  578. kfree(blkg->pd[plid]);
  579. blkg->pd[plid] = NULL;
  580. if (!pol)
  581. return;
  582. pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
  583. WARN_ON_ONCE(!pd);
  584. pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  585. WARN_ON_ONCE(!pd->stats_cpu);
  586. blkg->pd[plid] = pd;
  587. pd->blkg = blkg;
  588. pol->ops.blkio_init_group_fn(blkg);
  589. }
  590. EXPORT_SYMBOL_GPL(update_root_blkg_pd);
  591. /**
  592. * blkg_destroy_all - destroy all blkgs associated with a request_queue
  593. * @q: request_queue of interest
  594. * @destroy_root: whether to destroy root blkg or not
  595. *
  596. * Destroy blkgs associated with @q. If @destroy_root is %true, all are
  597. * destroyed; otherwise, root blkg is left alone.
  598. */
  599. void blkg_destroy_all(struct request_queue *q, bool destroy_root)
  600. {
  601. struct blkio_group *blkg, *n;
  602. spin_lock_irq(q->queue_lock);
  603. list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
  604. struct blkio_cgroup *blkcg = blkg->blkcg;
  605. /* skip root? */
  606. if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
  607. continue;
  608. spin_lock(&blkcg->lock);
  609. blkg_destroy(blkg);
  610. spin_unlock(&blkcg->lock);
  611. }
  612. spin_unlock_irq(q->queue_lock);
  613. }
  614. EXPORT_SYMBOL_GPL(blkg_destroy_all);
  615. static void blkg_rcu_free(struct rcu_head *rcu_head)
  616. {
  617. blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
  618. }
  619. void __blkg_release(struct blkio_group *blkg)
  620. {
  621. /* release the extra blkcg reference this blkg has been holding */
  622. css_put(&blkg->blkcg->css);
  623. /*
  624. * A group is freed in rcu manner. But having an rcu lock does not
  625. * mean that one can access all the fields of blkg and assume these
  626. * are valid. For example, don't try to follow throtl_data and
  627. * request queue links.
  628. *
  629. * Having a reference to blkg under an rcu allows acess to only
  630. * values local to groups like group stats and group rate limits
  631. */
  632. call_rcu(&blkg->rcu_head, blkg_rcu_free);
  633. }
  634. EXPORT_SYMBOL_GPL(__blkg_release);
  635. static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
  636. {
  637. struct blkg_policy_data *pd = blkg->pd[plid];
  638. struct blkio_group_stats_cpu *stats_cpu;
  639. int i, j, k;
  640. /*
  641. * Note: On 64 bit arch this should not be an issue. This has the
  642. * possibility of returning some inconsistent value on 32bit arch
  643. * as 64bit update on 32bit is non atomic. Taking care of this
  644. * corner case makes code very complicated, like sending IPIs to
  645. * cpus, taking care of stats of offline cpus etc.
  646. *
  647. * reset stats is anyway more of a debug feature and this sounds a
  648. * corner case. So I am not complicating the code yet until and
  649. * unless this becomes a real issue.
  650. */
  651. for_each_possible_cpu(i) {
  652. stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
  653. stats_cpu->sectors = 0;
  654. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  655. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  656. stats_cpu->stat_arr_cpu[j][k] = 0;
  657. }
  658. }
  659. static int
  660. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  661. {
  662. struct blkio_cgroup *blkcg;
  663. struct blkio_group *blkg;
  664. struct blkio_group_stats *stats;
  665. struct hlist_node *n;
  666. uint64_t queued[BLKIO_STAT_TOTAL];
  667. int i;
  668. #ifdef CONFIG_DEBUG_BLK_CGROUP
  669. bool idling, waiting, empty;
  670. unsigned long long now = sched_clock();
  671. #endif
  672. blkcg = cgroup_to_blkio_cgroup(cgroup);
  673. spin_lock(&blkio_list_lock);
  674. spin_lock_irq(&blkcg->lock);
  675. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  676. struct blkio_policy_type *pol;
  677. list_for_each_entry(pol, &blkio_list, list) {
  678. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  679. spin_lock(&blkg->stats_lock);
  680. stats = &pd->stats;
  681. #ifdef CONFIG_DEBUG_BLK_CGROUP
  682. idling = blkio_blkg_idling(stats);
  683. waiting = blkio_blkg_waiting(stats);
  684. empty = blkio_blkg_empty(stats);
  685. #endif
  686. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  687. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  688. memset(stats, 0, sizeof(struct blkio_group_stats));
  689. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  690. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  691. #ifdef CONFIG_DEBUG_BLK_CGROUP
  692. if (idling) {
  693. blkio_mark_blkg_idling(stats);
  694. stats->start_idle_time = now;
  695. }
  696. if (waiting) {
  697. blkio_mark_blkg_waiting(stats);
  698. stats->start_group_wait_time = now;
  699. }
  700. if (empty) {
  701. blkio_mark_blkg_empty(stats);
  702. stats->start_empty_time = now;
  703. }
  704. #endif
  705. spin_unlock(&blkg->stats_lock);
  706. /* Reset Per cpu stats which don't take blkg->stats_lock */
  707. blkio_reset_stats_cpu(blkg, pol->plid);
  708. }
  709. }
  710. spin_unlock_irq(&blkcg->lock);
  711. spin_unlock(&blkio_list_lock);
  712. return 0;
  713. }
  714. static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
  715. char *str, int chars_left, bool diskname_only)
  716. {
  717. snprintf(str, chars_left, "%s", dname);
  718. chars_left -= strlen(str);
  719. if (chars_left <= 0) {
  720. printk(KERN_WARNING
  721. "Possibly incorrect cgroup stat display format");
  722. return;
  723. }
  724. if (diskname_only)
  725. return;
  726. switch (type) {
  727. case BLKIO_STAT_READ:
  728. strlcat(str, " Read", chars_left);
  729. break;
  730. case BLKIO_STAT_WRITE:
  731. strlcat(str, " Write", chars_left);
  732. break;
  733. case BLKIO_STAT_SYNC:
  734. strlcat(str, " Sync", chars_left);
  735. break;
  736. case BLKIO_STAT_ASYNC:
  737. strlcat(str, " Async", chars_left);
  738. break;
  739. case BLKIO_STAT_TOTAL:
  740. strlcat(str, " Total", chars_left);
  741. break;
  742. default:
  743. strlcat(str, " Invalid", chars_left);
  744. }
  745. }
  746. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  747. struct cgroup_map_cb *cb, const char *dname)
  748. {
  749. blkio_get_key_name(0, dname, str, chars_left, true);
  750. cb->fill(cb, str, val);
  751. return val;
  752. }
  753. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
  754. enum stat_type_cpu type, enum stat_sub_type sub_type)
  755. {
  756. struct blkg_policy_data *pd = blkg->pd[plid];
  757. int cpu;
  758. struct blkio_group_stats_cpu *stats_cpu;
  759. u64 val = 0, tval;
  760. for_each_possible_cpu(cpu) {
  761. unsigned int start;
  762. stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
  763. do {
  764. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  765. if (type == BLKIO_STAT_CPU_SECTORS)
  766. tval = stats_cpu->sectors;
  767. else
  768. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  769. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  770. val += tval;
  771. }
  772. return val;
  773. }
  774. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
  775. struct cgroup_map_cb *cb, const char *dname,
  776. enum stat_type_cpu type)
  777. {
  778. uint64_t disk_total, val;
  779. char key_str[MAX_KEY_LEN];
  780. enum stat_sub_type sub_type;
  781. if (type == BLKIO_STAT_CPU_SECTORS) {
  782. val = blkio_read_stat_cpu(blkg, plid, type, 0);
  783. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
  784. dname);
  785. }
  786. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  787. sub_type++) {
  788. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  789. false);
  790. val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
  791. cb->fill(cb, key_str, val);
  792. }
  793. disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
  794. blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
  795. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  796. false);
  797. cb->fill(cb, key_str, disk_total);
  798. return disk_total;
  799. }
  800. /* This should be called with blkg->stats_lock held */
  801. static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
  802. struct cgroup_map_cb *cb, const char *dname,
  803. enum stat_type type)
  804. {
  805. struct blkg_policy_data *pd = blkg->pd[plid];
  806. uint64_t disk_total;
  807. char key_str[MAX_KEY_LEN];
  808. enum stat_sub_type sub_type;
  809. if (type == BLKIO_STAT_TIME)
  810. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  811. pd->stats.time, cb, dname);
  812. #ifdef CONFIG_DEBUG_BLK_CGROUP
  813. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  814. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  815. pd->stats.unaccounted_time, cb, dname);
  816. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  817. uint64_t sum = pd->stats.avg_queue_size_sum;
  818. uint64_t samples = pd->stats.avg_queue_size_samples;
  819. if (samples)
  820. do_div(sum, samples);
  821. else
  822. sum = 0;
  823. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  824. sum, cb, dname);
  825. }
  826. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  827. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  828. pd->stats.group_wait_time, cb, dname);
  829. if (type == BLKIO_STAT_IDLE_TIME)
  830. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  831. pd->stats.idle_time, cb, dname);
  832. if (type == BLKIO_STAT_EMPTY_TIME)
  833. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  834. pd->stats.empty_time, cb, dname);
  835. if (type == BLKIO_STAT_DEQUEUE)
  836. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  837. pd->stats.dequeue, cb, dname);
  838. #endif
  839. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  840. sub_type++) {
  841. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  842. false);
  843. cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]);
  844. }
  845. disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] +
  846. pd->stats.stat_arr[type][BLKIO_STAT_WRITE];
  847. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  848. false);
  849. cb->fill(cb, key_str, disk_total);
  850. return disk_total;
  851. }
  852. static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
  853. int fileid, struct blkio_cgroup *blkcg)
  854. {
  855. struct gendisk *disk = NULL;
  856. struct blkio_group *blkg = NULL;
  857. struct blkg_policy_data *pd;
  858. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  859. unsigned long major, minor;
  860. int i = 0, ret = -EINVAL;
  861. int part;
  862. dev_t dev;
  863. u64 temp;
  864. memset(s, 0, sizeof(s));
  865. while ((p = strsep(&buf, " ")) != NULL) {
  866. if (!*p)
  867. continue;
  868. s[i++] = p;
  869. /* Prevent from inputing too many things */
  870. if (i == 3)
  871. break;
  872. }
  873. if (i != 2)
  874. goto out;
  875. p = strsep(&s[0], ":");
  876. if (p != NULL)
  877. major_s = p;
  878. else
  879. goto out;
  880. minor_s = s[0];
  881. if (!minor_s)
  882. goto out;
  883. if (strict_strtoul(major_s, 10, &major))
  884. goto out;
  885. if (strict_strtoul(minor_s, 10, &minor))
  886. goto out;
  887. dev = MKDEV(major, minor);
  888. if (strict_strtoull(s[1], 10, &temp))
  889. goto out;
  890. disk = get_gendisk(dev, &part);
  891. if (!disk || part)
  892. goto out;
  893. rcu_read_lock();
  894. spin_lock_irq(disk->queue->queue_lock);
  895. blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
  896. spin_unlock_irq(disk->queue->queue_lock);
  897. if (IS_ERR(blkg)) {
  898. ret = PTR_ERR(blkg);
  899. goto out_unlock;
  900. }
  901. pd = blkg->pd[plid];
  902. switch (plid) {
  903. case BLKIO_POLICY_PROP:
  904. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  905. temp > BLKIO_WEIGHT_MAX)
  906. goto out_unlock;
  907. pd->conf.weight = temp;
  908. blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
  909. break;
  910. case BLKIO_POLICY_THROTL:
  911. switch(fileid) {
  912. case BLKIO_THROTL_read_bps_device:
  913. pd->conf.bps[READ] = temp;
  914. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  915. break;
  916. case BLKIO_THROTL_write_bps_device:
  917. pd->conf.bps[WRITE] = temp;
  918. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  919. break;
  920. case BLKIO_THROTL_read_iops_device:
  921. if (temp > THROTL_IOPS_MAX)
  922. goto out_unlock;
  923. pd->conf.iops[READ] = temp;
  924. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  925. break;
  926. case BLKIO_THROTL_write_iops_device:
  927. if (temp > THROTL_IOPS_MAX)
  928. goto out_unlock;
  929. pd->conf.iops[WRITE] = temp;
  930. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  931. break;
  932. }
  933. break;
  934. default:
  935. BUG();
  936. }
  937. ret = 0;
  938. out_unlock:
  939. rcu_read_unlock();
  940. out:
  941. put_disk(disk);
  942. /*
  943. * If queue was bypassing, we should retry. Do so after a short
  944. * msleep(). It isn't strictly necessary but queue can be
  945. * bypassing for some time and it's always nice to avoid busy
  946. * looping.
  947. */
  948. if (ret == -EBUSY) {
  949. msleep(10);
  950. return restart_syscall();
  951. }
  952. return ret;
  953. }
  954. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  955. const char *buffer)
  956. {
  957. int ret = 0;
  958. char *buf;
  959. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  960. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  961. int fileid = BLKIOFILE_ATTR(cft->private);
  962. buf = kstrdup(buffer, GFP_KERNEL);
  963. if (!buf)
  964. return -ENOMEM;
  965. ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
  966. kfree(buf);
  967. return ret;
  968. }
  969. static const char *blkg_dev_name(struct blkio_group *blkg)
  970. {
  971. /* some drivers (floppy) instantiate a queue w/o disk registered */
  972. if (blkg->q->backing_dev_info.dev)
  973. return dev_name(blkg->q->backing_dev_info.dev);
  974. return NULL;
  975. }
  976. static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
  977. struct seq_file *m)
  978. {
  979. int plid = BLKIOFILE_POLICY(cft->private);
  980. int fileid = BLKIOFILE_ATTR(cft->private);
  981. struct blkg_policy_data *pd = blkg->pd[plid];
  982. const char *dname = blkg_dev_name(blkg);
  983. int rw = WRITE;
  984. if (!dname)
  985. return;
  986. switch (plid) {
  987. case BLKIO_POLICY_PROP:
  988. if (pd->conf.weight)
  989. seq_printf(m, "%s\t%u\n",
  990. dname, pd->conf.weight);
  991. break;
  992. case BLKIO_POLICY_THROTL:
  993. switch (fileid) {
  994. case BLKIO_THROTL_read_bps_device:
  995. rw = READ;
  996. case BLKIO_THROTL_write_bps_device:
  997. if (pd->conf.bps[rw])
  998. seq_printf(m, "%s\t%llu\n",
  999. dname, pd->conf.bps[rw]);
  1000. break;
  1001. case BLKIO_THROTL_read_iops_device:
  1002. rw = READ;
  1003. case BLKIO_THROTL_write_iops_device:
  1004. if (pd->conf.iops[rw])
  1005. seq_printf(m, "%s\t%u\n",
  1006. dname, pd->conf.iops[rw]);
  1007. break;
  1008. }
  1009. break;
  1010. default:
  1011. BUG();
  1012. }
  1013. }
  1014. /* cgroup files which read their data from policy nodes end up here */
  1015. static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
  1016. struct seq_file *m)
  1017. {
  1018. struct blkio_group *blkg;
  1019. struct hlist_node *n;
  1020. spin_lock_irq(&blkcg->lock);
  1021. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  1022. blkio_print_group_conf(cft, blkg, m);
  1023. spin_unlock_irq(&blkcg->lock);
  1024. }
  1025. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  1026. struct seq_file *m)
  1027. {
  1028. struct blkio_cgroup *blkcg;
  1029. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1030. int name = BLKIOFILE_ATTR(cft->private);
  1031. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1032. switch(plid) {
  1033. case BLKIO_POLICY_PROP:
  1034. switch(name) {
  1035. case BLKIO_PROP_weight_device:
  1036. blkio_read_conf(cft, blkcg, m);
  1037. return 0;
  1038. default:
  1039. BUG();
  1040. }
  1041. break;
  1042. case BLKIO_POLICY_THROTL:
  1043. switch(name){
  1044. case BLKIO_THROTL_read_bps_device:
  1045. case BLKIO_THROTL_write_bps_device:
  1046. case BLKIO_THROTL_read_iops_device:
  1047. case BLKIO_THROTL_write_iops_device:
  1048. blkio_read_conf(cft, blkcg, m);
  1049. return 0;
  1050. default:
  1051. BUG();
  1052. }
  1053. break;
  1054. default:
  1055. BUG();
  1056. }
  1057. return 0;
  1058. }
  1059. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1060. struct cftype *cft, struct cgroup_map_cb *cb,
  1061. enum stat_type type, bool show_total, bool pcpu)
  1062. {
  1063. struct blkio_group *blkg;
  1064. struct hlist_node *n;
  1065. uint64_t cgroup_total = 0;
  1066. spin_lock_irq(&blkcg->lock);
  1067. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1068. const char *dname = blkg_dev_name(blkg);
  1069. int plid = BLKIOFILE_POLICY(cft->private);
  1070. if (!dname)
  1071. continue;
  1072. if (pcpu) {
  1073. cgroup_total += blkio_get_stat_cpu(blkg, plid,
  1074. cb, dname, type);
  1075. } else {
  1076. spin_lock(&blkg->stats_lock);
  1077. cgroup_total += blkio_get_stat(blkg, plid,
  1078. cb, dname, type);
  1079. spin_unlock(&blkg->stats_lock);
  1080. }
  1081. }
  1082. if (show_total)
  1083. cb->fill(cb, "Total", cgroup_total);
  1084. spin_unlock_irq(&blkcg->lock);
  1085. return 0;
  1086. }
  1087. /* All map kind of cgroup file get serviced by this function */
  1088. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1089. struct cgroup_map_cb *cb)
  1090. {
  1091. struct blkio_cgroup *blkcg;
  1092. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1093. int name = BLKIOFILE_ATTR(cft->private);
  1094. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1095. switch(plid) {
  1096. case BLKIO_POLICY_PROP:
  1097. switch(name) {
  1098. case BLKIO_PROP_time:
  1099. return blkio_read_blkg_stats(blkcg, cft, cb,
  1100. BLKIO_STAT_TIME, 0, 0);
  1101. case BLKIO_PROP_sectors:
  1102. return blkio_read_blkg_stats(blkcg, cft, cb,
  1103. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1104. case BLKIO_PROP_io_service_bytes:
  1105. return blkio_read_blkg_stats(blkcg, cft, cb,
  1106. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1107. case BLKIO_PROP_io_serviced:
  1108. return blkio_read_blkg_stats(blkcg, cft, cb,
  1109. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1110. case BLKIO_PROP_io_service_time:
  1111. return blkio_read_blkg_stats(blkcg, cft, cb,
  1112. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1113. case BLKIO_PROP_io_wait_time:
  1114. return blkio_read_blkg_stats(blkcg, cft, cb,
  1115. BLKIO_STAT_WAIT_TIME, 1, 0);
  1116. case BLKIO_PROP_io_merged:
  1117. return blkio_read_blkg_stats(blkcg, cft, cb,
  1118. BLKIO_STAT_CPU_MERGED, 1, 1);
  1119. case BLKIO_PROP_io_queued:
  1120. return blkio_read_blkg_stats(blkcg, cft, cb,
  1121. BLKIO_STAT_QUEUED, 1, 0);
  1122. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1123. case BLKIO_PROP_unaccounted_time:
  1124. return blkio_read_blkg_stats(blkcg, cft, cb,
  1125. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1126. case BLKIO_PROP_dequeue:
  1127. return blkio_read_blkg_stats(blkcg, cft, cb,
  1128. BLKIO_STAT_DEQUEUE, 0, 0);
  1129. case BLKIO_PROP_avg_queue_size:
  1130. return blkio_read_blkg_stats(blkcg, cft, cb,
  1131. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1132. case BLKIO_PROP_group_wait_time:
  1133. return blkio_read_blkg_stats(blkcg, cft, cb,
  1134. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1135. case BLKIO_PROP_idle_time:
  1136. return blkio_read_blkg_stats(blkcg, cft, cb,
  1137. BLKIO_STAT_IDLE_TIME, 0, 0);
  1138. case BLKIO_PROP_empty_time:
  1139. return blkio_read_blkg_stats(blkcg, cft, cb,
  1140. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1141. #endif
  1142. default:
  1143. BUG();
  1144. }
  1145. break;
  1146. case BLKIO_POLICY_THROTL:
  1147. switch(name){
  1148. case BLKIO_THROTL_io_service_bytes:
  1149. return blkio_read_blkg_stats(blkcg, cft, cb,
  1150. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1151. case BLKIO_THROTL_io_serviced:
  1152. return blkio_read_blkg_stats(blkcg, cft, cb,
  1153. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1154. default:
  1155. BUG();
  1156. }
  1157. break;
  1158. default:
  1159. BUG();
  1160. }
  1161. return 0;
  1162. }
  1163. static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
  1164. {
  1165. struct blkio_group *blkg;
  1166. struct hlist_node *n;
  1167. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1168. return -EINVAL;
  1169. spin_lock(&blkio_list_lock);
  1170. spin_lock_irq(&blkcg->lock);
  1171. blkcg->weight = (unsigned int)val;
  1172. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1173. struct blkg_policy_data *pd = blkg->pd[plid];
  1174. if (!pd->conf.weight)
  1175. blkio_update_group_weight(blkg, plid, blkcg->weight);
  1176. }
  1177. spin_unlock_irq(&blkcg->lock);
  1178. spin_unlock(&blkio_list_lock);
  1179. return 0;
  1180. }
  1181. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1182. struct blkio_cgroup *blkcg;
  1183. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1184. int name = BLKIOFILE_ATTR(cft->private);
  1185. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1186. switch(plid) {
  1187. case BLKIO_POLICY_PROP:
  1188. switch(name) {
  1189. case BLKIO_PROP_weight:
  1190. return (u64)blkcg->weight;
  1191. }
  1192. break;
  1193. default:
  1194. BUG();
  1195. }
  1196. return 0;
  1197. }
  1198. static int
  1199. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1200. {
  1201. struct blkio_cgroup *blkcg;
  1202. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1203. int name = BLKIOFILE_ATTR(cft->private);
  1204. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1205. switch(plid) {
  1206. case BLKIO_POLICY_PROP:
  1207. switch(name) {
  1208. case BLKIO_PROP_weight:
  1209. return blkio_weight_write(blkcg, plid, val);
  1210. }
  1211. break;
  1212. default:
  1213. BUG();
  1214. }
  1215. return 0;
  1216. }
  1217. struct cftype blkio_files[] = {
  1218. {
  1219. .name = "weight_device",
  1220. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1221. BLKIO_PROP_weight_device),
  1222. .read_seq_string = blkiocg_file_read,
  1223. .write_string = blkiocg_file_write,
  1224. .max_write_len = 256,
  1225. },
  1226. {
  1227. .name = "weight",
  1228. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1229. BLKIO_PROP_weight),
  1230. .read_u64 = blkiocg_file_read_u64,
  1231. .write_u64 = blkiocg_file_write_u64,
  1232. },
  1233. {
  1234. .name = "time",
  1235. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1236. BLKIO_PROP_time),
  1237. .read_map = blkiocg_file_read_map,
  1238. },
  1239. {
  1240. .name = "sectors",
  1241. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1242. BLKIO_PROP_sectors),
  1243. .read_map = blkiocg_file_read_map,
  1244. },
  1245. {
  1246. .name = "io_service_bytes",
  1247. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1248. BLKIO_PROP_io_service_bytes),
  1249. .read_map = blkiocg_file_read_map,
  1250. },
  1251. {
  1252. .name = "io_serviced",
  1253. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1254. BLKIO_PROP_io_serviced),
  1255. .read_map = blkiocg_file_read_map,
  1256. },
  1257. {
  1258. .name = "io_service_time",
  1259. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1260. BLKIO_PROP_io_service_time),
  1261. .read_map = blkiocg_file_read_map,
  1262. },
  1263. {
  1264. .name = "io_wait_time",
  1265. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1266. BLKIO_PROP_io_wait_time),
  1267. .read_map = blkiocg_file_read_map,
  1268. },
  1269. {
  1270. .name = "io_merged",
  1271. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1272. BLKIO_PROP_io_merged),
  1273. .read_map = blkiocg_file_read_map,
  1274. },
  1275. {
  1276. .name = "io_queued",
  1277. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1278. BLKIO_PROP_io_queued),
  1279. .read_map = blkiocg_file_read_map,
  1280. },
  1281. {
  1282. .name = "reset_stats",
  1283. .write_u64 = blkiocg_reset_stats,
  1284. },
  1285. #ifdef CONFIG_BLK_DEV_THROTTLING
  1286. {
  1287. .name = "throttle.read_bps_device",
  1288. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1289. BLKIO_THROTL_read_bps_device),
  1290. .read_seq_string = blkiocg_file_read,
  1291. .write_string = blkiocg_file_write,
  1292. .max_write_len = 256,
  1293. },
  1294. {
  1295. .name = "throttle.write_bps_device",
  1296. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1297. BLKIO_THROTL_write_bps_device),
  1298. .read_seq_string = blkiocg_file_read,
  1299. .write_string = blkiocg_file_write,
  1300. .max_write_len = 256,
  1301. },
  1302. {
  1303. .name = "throttle.read_iops_device",
  1304. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1305. BLKIO_THROTL_read_iops_device),
  1306. .read_seq_string = blkiocg_file_read,
  1307. .write_string = blkiocg_file_write,
  1308. .max_write_len = 256,
  1309. },
  1310. {
  1311. .name = "throttle.write_iops_device",
  1312. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1313. BLKIO_THROTL_write_iops_device),
  1314. .read_seq_string = blkiocg_file_read,
  1315. .write_string = blkiocg_file_write,
  1316. .max_write_len = 256,
  1317. },
  1318. {
  1319. .name = "throttle.io_service_bytes",
  1320. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1321. BLKIO_THROTL_io_service_bytes),
  1322. .read_map = blkiocg_file_read_map,
  1323. },
  1324. {
  1325. .name = "throttle.io_serviced",
  1326. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1327. BLKIO_THROTL_io_serviced),
  1328. .read_map = blkiocg_file_read_map,
  1329. },
  1330. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1331. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1332. {
  1333. .name = "avg_queue_size",
  1334. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1335. BLKIO_PROP_avg_queue_size),
  1336. .read_map = blkiocg_file_read_map,
  1337. },
  1338. {
  1339. .name = "group_wait_time",
  1340. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1341. BLKIO_PROP_group_wait_time),
  1342. .read_map = blkiocg_file_read_map,
  1343. },
  1344. {
  1345. .name = "idle_time",
  1346. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1347. BLKIO_PROP_idle_time),
  1348. .read_map = blkiocg_file_read_map,
  1349. },
  1350. {
  1351. .name = "empty_time",
  1352. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1353. BLKIO_PROP_empty_time),
  1354. .read_map = blkiocg_file_read_map,
  1355. },
  1356. {
  1357. .name = "dequeue",
  1358. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1359. BLKIO_PROP_dequeue),
  1360. .read_map = blkiocg_file_read_map,
  1361. },
  1362. {
  1363. .name = "unaccounted_time",
  1364. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1365. BLKIO_PROP_unaccounted_time),
  1366. .read_map = blkiocg_file_read_map,
  1367. },
  1368. #endif
  1369. };
  1370. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1371. {
  1372. return cgroup_add_files(cgroup, subsys, blkio_files,
  1373. ARRAY_SIZE(blkio_files));
  1374. }
  1375. /**
  1376. * blkiocg_pre_destroy - cgroup pre_destroy callback
  1377. * @subsys: cgroup subsys
  1378. * @cgroup: cgroup of interest
  1379. *
  1380. * This function is called when @cgroup is about to go away and responsible
  1381. * for shooting down all blkgs associated with @cgroup. blkgs should be
  1382. * removed while holding both q and blkcg locks. As blkcg lock is nested
  1383. * inside q lock, this function performs reverse double lock dancing.
  1384. *
  1385. * This is the blkcg counterpart of ioc_release_fn().
  1386. */
  1387. static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
  1388. struct cgroup *cgroup)
  1389. {
  1390. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1391. spin_lock_irq(&blkcg->lock);
  1392. while (!hlist_empty(&blkcg->blkg_list)) {
  1393. struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
  1394. struct blkio_group, blkcg_node);
  1395. struct request_queue *q = blkg->q;
  1396. if (spin_trylock(q->queue_lock)) {
  1397. blkg_destroy(blkg);
  1398. spin_unlock(q->queue_lock);
  1399. } else {
  1400. spin_unlock_irq(&blkcg->lock);
  1401. cpu_relax();
  1402. spin_lock(&blkcg->lock);
  1403. }
  1404. }
  1405. spin_unlock_irq(&blkcg->lock);
  1406. return 0;
  1407. }
  1408. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1409. {
  1410. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1411. if (blkcg != &blkio_root_cgroup)
  1412. kfree(blkcg);
  1413. }
  1414. static struct cgroup_subsys_state *
  1415. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1416. {
  1417. struct blkio_cgroup *blkcg;
  1418. struct cgroup *parent = cgroup->parent;
  1419. if (!parent) {
  1420. blkcg = &blkio_root_cgroup;
  1421. goto done;
  1422. }
  1423. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1424. if (!blkcg)
  1425. return ERR_PTR(-ENOMEM);
  1426. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1427. done:
  1428. spin_lock_init(&blkcg->lock);
  1429. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1430. return &blkcg->css;
  1431. }
  1432. /**
  1433. * blkcg_init_queue - initialize blkcg part of request queue
  1434. * @q: request_queue to initialize
  1435. *
  1436. * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  1437. * part of new request_queue @q.
  1438. *
  1439. * RETURNS:
  1440. * 0 on success, -errno on failure.
  1441. */
  1442. int blkcg_init_queue(struct request_queue *q)
  1443. {
  1444. int ret;
  1445. might_sleep();
  1446. ret = blk_throtl_init(q);
  1447. if (ret)
  1448. return ret;
  1449. mutex_lock(&all_q_mutex);
  1450. INIT_LIST_HEAD(&q->all_q_node);
  1451. list_add_tail(&q->all_q_node, &all_q_list);
  1452. mutex_unlock(&all_q_mutex);
  1453. return 0;
  1454. }
  1455. /**
  1456. * blkcg_drain_queue - drain blkcg part of request_queue
  1457. * @q: request_queue to drain
  1458. *
  1459. * Called from blk_drain_queue(). Responsible for draining blkcg part.
  1460. */
  1461. void blkcg_drain_queue(struct request_queue *q)
  1462. {
  1463. lockdep_assert_held(q->queue_lock);
  1464. blk_throtl_drain(q);
  1465. }
  1466. /**
  1467. * blkcg_exit_queue - exit and release blkcg part of request_queue
  1468. * @q: request_queue being released
  1469. *
  1470. * Called from blk_release_queue(). Responsible for exiting blkcg part.
  1471. */
  1472. void blkcg_exit_queue(struct request_queue *q)
  1473. {
  1474. mutex_lock(&all_q_mutex);
  1475. list_del_init(&q->all_q_node);
  1476. mutex_unlock(&all_q_mutex);
  1477. blkg_destroy_all(q, true);
  1478. blk_throtl_exit(q);
  1479. }
  1480. /*
  1481. * We cannot support shared io contexts, as we have no mean to support
  1482. * two tasks with the same ioc in two different groups without major rework
  1483. * of the main cic data structures. For now we allow a task to change
  1484. * its cgroup only if it's the only owner of its ioc.
  1485. */
  1486. static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1487. struct cgroup_taskset *tset)
  1488. {
  1489. struct task_struct *task;
  1490. struct io_context *ioc;
  1491. int ret = 0;
  1492. /* task_lock() is needed to avoid races with exit_io_context() */
  1493. cgroup_taskset_for_each(task, cgrp, tset) {
  1494. task_lock(task);
  1495. ioc = task->io_context;
  1496. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1497. ret = -EINVAL;
  1498. task_unlock(task);
  1499. if (ret)
  1500. break;
  1501. }
  1502. return ret;
  1503. }
  1504. static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1505. struct cgroup_taskset *tset)
  1506. {
  1507. struct task_struct *task;
  1508. struct io_context *ioc;
  1509. cgroup_taskset_for_each(task, cgrp, tset) {
  1510. /* we don't lose anything even if ioc allocation fails */
  1511. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1512. if (ioc) {
  1513. ioc_cgroup_changed(ioc);
  1514. put_io_context(ioc);
  1515. }
  1516. }
  1517. }
  1518. static void blkcg_bypass_start(void)
  1519. __acquires(&all_q_mutex)
  1520. {
  1521. struct request_queue *q;
  1522. mutex_lock(&all_q_mutex);
  1523. list_for_each_entry(q, &all_q_list, all_q_node) {
  1524. blk_queue_bypass_start(q);
  1525. blkg_destroy_all(q, false);
  1526. }
  1527. }
  1528. static void blkcg_bypass_end(void)
  1529. __releases(&all_q_mutex)
  1530. {
  1531. struct request_queue *q;
  1532. list_for_each_entry(q, &all_q_list, all_q_node)
  1533. blk_queue_bypass_end(q);
  1534. mutex_unlock(&all_q_mutex);
  1535. }
  1536. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1537. {
  1538. struct request_queue *q;
  1539. blkcg_bypass_start();
  1540. spin_lock(&blkio_list_lock);
  1541. BUG_ON(blkio_policy[blkiop->plid]);
  1542. blkio_policy[blkiop->plid] = blkiop;
  1543. list_add_tail(&blkiop->list, &blkio_list);
  1544. spin_unlock(&blkio_list_lock);
  1545. list_for_each_entry(q, &all_q_list, all_q_node)
  1546. update_root_blkg_pd(q, blkiop->plid);
  1547. blkcg_bypass_end();
  1548. }
  1549. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1550. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1551. {
  1552. struct request_queue *q;
  1553. blkcg_bypass_start();
  1554. spin_lock(&blkio_list_lock);
  1555. BUG_ON(blkio_policy[blkiop->plid] != blkiop);
  1556. blkio_policy[blkiop->plid] = NULL;
  1557. list_del_init(&blkiop->list);
  1558. spin_unlock(&blkio_list_lock);
  1559. list_for_each_entry(q, &all_q_list, all_q_node)
  1560. update_root_blkg_pd(q, blkiop->plid);
  1561. blkcg_bypass_end();
  1562. }
  1563. EXPORT_SYMBOL_GPL(blkio_policy_unregister);