blk-cgroup.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include "blk-cgroup.h"
  23. #include "blk.h"
  24. #define MAX_KEY_LEN 100
  25. static DEFINE_SPINLOCK(blkio_list_lock);
  26. static LIST_HEAD(blkio_list);
  27. static DEFINE_MUTEX(all_q_mutex);
  28. static LIST_HEAD(all_q_list);
  29. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  30. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  31. static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
  32. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  33. struct cgroup *);
  34. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  35. struct cgroup_taskset *);
  36. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  37. struct cgroup_taskset *);
  38. static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
  39. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  40. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  41. /* for encoding cft->private value on file */
  42. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  43. /* What policy owns the file, proportional or throttle */
  44. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  45. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  46. struct cgroup_subsys blkio_subsys = {
  47. .name = "blkio",
  48. .create = blkiocg_create,
  49. .can_attach = blkiocg_can_attach,
  50. .attach = blkiocg_attach,
  51. .pre_destroy = blkiocg_pre_destroy,
  52. .destroy = blkiocg_destroy,
  53. .populate = blkiocg_populate,
  54. .subsys_id = blkio_subsys_id,
  55. .module = THIS_MODULE,
  56. };
  57. EXPORT_SYMBOL_GPL(blkio_subsys);
  58. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  59. {
  60. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  61. struct blkio_cgroup, css);
  62. }
  63. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  64. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  65. {
  66. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  67. struct blkio_cgroup, css);
  68. }
  69. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  70. static inline void blkio_update_group_weight(struct blkio_group *blkg,
  71. int plid, unsigned int weight)
  72. {
  73. struct blkio_policy_type *blkiop;
  74. list_for_each_entry(blkiop, &blkio_list, list) {
  75. /* If this policy does not own the blkg, do not send updates */
  76. if (blkiop->plid != plid)
  77. continue;
  78. if (blkiop->ops.blkio_update_group_weight_fn)
  79. blkiop->ops.blkio_update_group_weight_fn(blkg->q,
  80. blkg, weight);
  81. }
  82. }
  83. static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
  84. u64 bps, int fileid)
  85. {
  86. struct blkio_policy_type *blkiop;
  87. list_for_each_entry(blkiop, &blkio_list, list) {
  88. /* If this policy does not own the blkg, do not send updates */
  89. if (blkiop->plid != plid)
  90. continue;
  91. if (fileid == BLKIO_THROTL_read_bps_device
  92. && blkiop->ops.blkio_update_group_read_bps_fn)
  93. blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
  94. blkg, bps);
  95. if (fileid == BLKIO_THROTL_write_bps_device
  96. && blkiop->ops.blkio_update_group_write_bps_fn)
  97. blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
  98. blkg, bps);
  99. }
  100. }
  101. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  102. int plid, unsigned int iops,
  103. int fileid)
  104. {
  105. struct blkio_policy_type *blkiop;
  106. list_for_each_entry(blkiop, &blkio_list, list) {
  107. /* If this policy does not own the blkg, do not send updates */
  108. if (blkiop->plid != plid)
  109. continue;
  110. if (fileid == BLKIO_THROTL_read_iops_device
  111. && blkiop->ops.blkio_update_group_read_iops_fn)
  112. blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
  113. blkg, iops);
  114. if (fileid == BLKIO_THROTL_write_iops_device
  115. && blkiop->ops.blkio_update_group_write_iops_fn)
  116. blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
  117. blkg,iops);
  118. }
  119. }
  120. /*
  121. * Add to the appropriate stat variable depending on the request type.
  122. * This should be called with the blkg->stats_lock held.
  123. */
  124. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  125. bool sync)
  126. {
  127. if (direction)
  128. stat[BLKIO_STAT_WRITE] += add;
  129. else
  130. stat[BLKIO_STAT_READ] += add;
  131. if (sync)
  132. stat[BLKIO_STAT_SYNC] += add;
  133. else
  134. stat[BLKIO_STAT_ASYNC] += add;
  135. }
  136. /*
  137. * Decrements the appropriate stat variable if non-zero depending on the
  138. * request type. Panics on value being zero.
  139. * This should be called with the blkg->stats_lock held.
  140. */
  141. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  142. {
  143. if (direction) {
  144. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  145. stat[BLKIO_STAT_WRITE]--;
  146. } else {
  147. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  148. stat[BLKIO_STAT_READ]--;
  149. }
  150. if (sync) {
  151. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  152. stat[BLKIO_STAT_SYNC]--;
  153. } else {
  154. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  155. stat[BLKIO_STAT_ASYNC]--;
  156. }
  157. }
  158. #ifdef CONFIG_DEBUG_BLK_CGROUP
  159. /* This should be called with the blkg->stats_lock held. */
  160. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  161. struct blkio_policy_type *pol,
  162. struct blkio_group *curr_blkg)
  163. {
  164. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  165. if (blkio_blkg_waiting(&pd->stats))
  166. return;
  167. if (blkg == curr_blkg)
  168. return;
  169. pd->stats.start_group_wait_time = sched_clock();
  170. blkio_mark_blkg_waiting(&pd->stats);
  171. }
  172. /* This should be called with the blkg->stats_lock held. */
  173. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  174. {
  175. unsigned long long now;
  176. if (!blkio_blkg_waiting(stats))
  177. return;
  178. now = sched_clock();
  179. if (time_after64(now, stats->start_group_wait_time))
  180. stats->group_wait_time += now - stats->start_group_wait_time;
  181. blkio_clear_blkg_waiting(stats);
  182. }
  183. /* This should be called with the blkg->stats_lock held. */
  184. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  185. {
  186. unsigned long long now;
  187. if (!blkio_blkg_empty(stats))
  188. return;
  189. now = sched_clock();
  190. if (time_after64(now, stats->start_empty_time))
  191. stats->empty_time += now - stats->start_empty_time;
  192. blkio_clear_blkg_empty(stats);
  193. }
  194. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
  195. struct blkio_policy_type *pol)
  196. {
  197. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  198. unsigned long flags;
  199. spin_lock_irqsave(&blkg->stats_lock, flags);
  200. BUG_ON(blkio_blkg_idling(&pd->stats));
  201. pd->stats.start_idle_time = sched_clock();
  202. blkio_mark_blkg_idling(&pd->stats);
  203. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  204. }
  205. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  206. void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
  207. struct blkio_policy_type *pol)
  208. {
  209. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  210. unsigned long flags;
  211. unsigned long long now;
  212. struct blkio_group_stats *stats;
  213. spin_lock_irqsave(&blkg->stats_lock, flags);
  214. stats = &pd->stats;
  215. if (blkio_blkg_idling(stats)) {
  216. now = sched_clock();
  217. if (time_after64(now, stats->start_idle_time))
  218. stats->idle_time += now - stats->start_idle_time;
  219. blkio_clear_blkg_idling(stats);
  220. }
  221. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  222. }
  223. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  224. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
  225. struct blkio_policy_type *pol)
  226. {
  227. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  228. unsigned long flags;
  229. struct blkio_group_stats *stats;
  230. spin_lock_irqsave(&blkg->stats_lock, flags);
  231. stats = &pd->stats;
  232. stats->avg_queue_size_sum +=
  233. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  234. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  235. stats->avg_queue_size_samples++;
  236. blkio_update_group_wait_time(stats);
  237. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  238. }
  239. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  240. void blkiocg_set_start_empty_time(struct blkio_group *blkg,
  241. struct blkio_policy_type *pol)
  242. {
  243. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  244. unsigned long flags;
  245. struct blkio_group_stats *stats;
  246. spin_lock_irqsave(&blkg->stats_lock, flags);
  247. stats = &pd->stats;
  248. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  249. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  250. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  251. return;
  252. }
  253. /*
  254. * group is already marked empty. This can happen if cfqq got new
  255. * request in parent group and moved to this group while being added
  256. * to service tree. Just ignore the event and move on.
  257. */
  258. if(blkio_blkg_empty(stats)) {
  259. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  260. return;
  261. }
  262. stats->start_empty_time = sched_clock();
  263. blkio_mark_blkg_empty(stats);
  264. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  265. }
  266. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  267. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  268. struct blkio_policy_type *pol,
  269. unsigned long dequeue)
  270. {
  271. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  272. pd->stats.dequeue += dequeue;
  273. }
  274. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  275. #else
  276. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  277. struct blkio_policy_type *pol,
  278. struct blkio_group *curr_blkg) { }
  279. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
  280. #endif
  281. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  282. struct blkio_policy_type *pol,
  283. struct blkio_group *curr_blkg, bool direction,
  284. bool sync)
  285. {
  286. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  287. unsigned long flags;
  288. spin_lock_irqsave(&blkg->stats_lock, flags);
  289. blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  290. sync);
  291. blkio_end_empty_time(&pd->stats);
  292. blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
  293. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  294. }
  295. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  296. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  297. struct blkio_policy_type *pol,
  298. bool direction, bool sync)
  299. {
  300. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  301. unsigned long flags;
  302. spin_lock_irqsave(&blkg->stats_lock, flags);
  303. blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED],
  304. direction, sync);
  305. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  306. }
  307. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  308. void blkiocg_update_timeslice_used(struct blkio_group *blkg,
  309. struct blkio_policy_type *pol,
  310. unsigned long time,
  311. unsigned long unaccounted_time)
  312. {
  313. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  314. unsigned long flags;
  315. spin_lock_irqsave(&blkg->stats_lock, flags);
  316. pd->stats.time += time;
  317. #ifdef CONFIG_DEBUG_BLK_CGROUP
  318. pd->stats.unaccounted_time += unaccounted_time;
  319. #endif
  320. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  321. }
  322. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  323. /*
  324. * should be called under rcu read lock or queue lock to make sure blkg pointer
  325. * is valid.
  326. */
  327. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  328. struct blkio_policy_type *pol,
  329. uint64_t bytes, bool direction, bool sync)
  330. {
  331. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  332. struct blkio_group_stats_cpu *stats_cpu;
  333. unsigned long flags;
  334. /*
  335. * Disabling interrupts to provide mutual exclusion between two
  336. * writes on same cpu. It probably is not needed for 64bit. Not
  337. * optimizing that case yet.
  338. */
  339. local_irq_save(flags);
  340. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  341. u64_stats_update_begin(&stats_cpu->syncp);
  342. stats_cpu->sectors += bytes >> 9;
  343. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  344. 1, direction, sync);
  345. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  346. bytes, direction, sync);
  347. u64_stats_update_end(&stats_cpu->syncp);
  348. local_irq_restore(flags);
  349. }
  350. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  351. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  352. struct blkio_policy_type *pol,
  353. uint64_t start_time,
  354. uint64_t io_start_time, bool direction,
  355. bool sync)
  356. {
  357. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  358. struct blkio_group_stats *stats;
  359. unsigned long flags;
  360. unsigned long long now = sched_clock();
  361. spin_lock_irqsave(&blkg->stats_lock, flags);
  362. stats = &pd->stats;
  363. if (time_after64(now, io_start_time))
  364. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  365. now - io_start_time, direction, sync);
  366. if (time_after64(io_start_time, start_time))
  367. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  368. io_start_time - start_time, direction, sync);
  369. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  370. }
  371. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  372. /* Merged stats are per cpu. */
  373. void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  374. struct blkio_policy_type *pol,
  375. bool direction, bool sync)
  376. {
  377. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  378. struct blkio_group_stats_cpu *stats_cpu;
  379. unsigned long flags;
  380. /*
  381. * Disabling interrupts to provide mutual exclusion between two
  382. * writes on same cpu. It probably is not needed for 64bit. Not
  383. * optimizing that case yet.
  384. */
  385. local_irq_save(flags);
  386. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  387. u64_stats_update_begin(&stats_cpu->syncp);
  388. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  389. direction, sync);
  390. u64_stats_update_end(&stats_cpu->syncp);
  391. local_irq_restore(flags);
  392. }
  393. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  394. /**
  395. * blkg_free - free a blkg
  396. * @blkg: blkg to free
  397. *
  398. * Free @blkg which may be partially allocated.
  399. */
  400. static void blkg_free(struct blkio_group *blkg)
  401. {
  402. struct blkg_policy_data *pd;
  403. if (!blkg)
  404. return;
  405. pd = blkg->pd[blkg->plid];
  406. if (pd) {
  407. free_percpu(pd->stats_cpu);
  408. kfree(pd);
  409. }
  410. kfree(blkg);
  411. }
  412. /**
  413. * blkg_alloc - allocate a blkg
  414. * @blkcg: block cgroup the new blkg is associated with
  415. * @q: request_queue the new blkg is associated with
  416. * @pol: policy the new blkg is associated with
  417. *
  418. * Allocate a new blkg assocating @blkcg and @q for @pol.
  419. *
  420. * FIXME: Should be called with queue locked but currently isn't due to
  421. * percpu stat breakage.
  422. */
  423. static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
  424. struct request_queue *q,
  425. struct blkio_policy_type *pol)
  426. {
  427. struct blkio_group *blkg;
  428. struct blkg_policy_data *pd;
  429. /* alloc and init base part */
  430. blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
  431. if (!blkg)
  432. return NULL;
  433. spin_lock_init(&blkg->stats_lock);
  434. rcu_assign_pointer(blkg->q, q);
  435. INIT_LIST_HEAD(&blkg->q_node[0]);
  436. INIT_LIST_HEAD(&blkg->q_node[1]);
  437. blkg->blkcg = blkcg;
  438. blkg->plid = pol->plid;
  439. blkg->refcnt = 1;
  440. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  441. /* alloc per-policy data and attach it to blkg */
  442. pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
  443. q->node);
  444. if (!pd) {
  445. blkg_free(blkg);
  446. return NULL;
  447. }
  448. blkg->pd[pol->plid] = pd;
  449. pd->blkg = blkg;
  450. /* broken, read comment in the callsite */
  451. pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  452. if (!pd->stats_cpu) {
  453. blkg_free(blkg);
  454. return NULL;
  455. }
  456. /* invoke per-policy init */
  457. pol->ops.blkio_init_group_fn(blkg);
  458. return blkg;
  459. }
  460. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  461. struct request_queue *q,
  462. enum blkio_policy_id plid,
  463. bool for_root)
  464. __releases(q->queue_lock) __acquires(q->queue_lock)
  465. {
  466. struct blkio_policy_type *pol = blkio_policy[plid];
  467. struct blkio_group *blkg, *new_blkg;
  468. WARN_ON_ONCE(!rcu_read_lock_held());
  469. lockdep_assert_held(q->queue_lock);
  470. /*
  471. * This could be the first entry point of blkcg implementation and
  472. * we shouldn't allow anything to go through for a bypassing queue.
  473. * The following can be removed if blkg lookup is guaranteed to
  474. * fail on a bypassing queue.
  475. */
  476. if (unlikely(blk_queue_bypass(q)) && !for_root)
  477. return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  478. blkg = blkg_lookup(blkcg, q, plid);
  479. if (blkg)
  480. return blkg;
  481. /* blkg holds a reference to blkcg */
  482. if (!css_tryget(&blkcg->css))
  483. return ERR_PTR(-EINVAL);
  484. /*
  485. * Allocate and initialize.
  486. *
  487. * FIXME: The following is broken. Percpu memory allocation
  488. * requires %GFP_KERNEL context and can't be performed from IO
  489. * path. Allocation here should inherently be atomic and the
  490. * following lock dancing can be removed once the broken percpu
  491. * allocation is fixed.
  492. */
  493. spin_unlock_irq(q->queue_lock);
  494. rcu_read_unlock();
  495. new_blkg = blkg_alloc(blkcg, q, pol);
  496. rcu_read_lock();
  497. spin_lock_irq(q->queue_lock);
  498. /* did bypass get turned on inbetween? */
  499. if (unlikely(blk_queue_bypass(q)) && !for_root) {
  500. blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  501. goto out;
  502. }
  503. /* did someone beat us to it? */
  504. blkg = blkg_lookup(blkcg, q, plid);
  505. if (unlikely(blkg))
  506. goto out;
  507. /* did alloc fail? */
  508. if (unlikely(!new_blkg)) {
  509. blkg = ERR_PTR(-ENOMEM);
  510. goto out;
  511. }
  512. /* insert */
  513. spin_lock(&blkcg->lock);
  514. swap(blkg, new_blkg);
  515. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  516. pol->ops.blkio_link_group_fn(q, blkg);
  517. spin_unlock(&blkcg->lock);
  518. out:
  519. blkg_free(new_blkg);
  520. return blkg;
  521. }
  522. EXPORT_SYMBOL_GPL(blkg_lookup_create);
  523. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  524. {
  525. hlist_del_init_rcu(&blkg->blkcg_node);
  526. }
  527. /*
  528. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  529. * indicating that blk_group was unhashed by the time we got to it.
  530. */
  531. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  532. {
  533. struct blkio_cgroup *blkcg = blkg->blkcg;
  534. unsigned long flags;
  535. int ret = 1;
  536. spin_lock_irqsave(&blkcg->lock, flags);
  537. if (!hlist_unhashed(&blkg->blkcg_node)) {
  538. __blkiocg_del_blkio_group(blkg);
  539. ret = 0;
  540. }
  541. spin_unlock_irqrestore(&blkcg->lock, flags);
  542. return ret;
  543. }
  544. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  545. /* called under rcu_read_lock(). */
  546. struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  547. struct request_queue *q,
  548. enum blkio_policy_id plid)
  549. {
  550. struct blkio_group *blkg;
  551. struct hlist_node *n;
  552. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  553. if (blkg->q == q && blkg->plid == plid)
  554. return blkg;
  555. return NULL;
  556. }
  557. EXPORT_SYMBOL_GPL(blkg_lookup);
  558. void blkg_destroy_all(struct request_queue *q)
  559. {
  560. struct blkio_policy_type *pol;
  561. while (true) {
  562. bool done = true;
  563. spin_lock(&blkio_list_lock);
  564. spin_lock_irq(q->queue_lock);
  565. /*
  566. * clear_queue_fn() might return with non-empty group list
  567. * if it raced cgroup removal and lost. cgroup removal is
  568. * guaranteed to make forward progress and retrying after a
  569. * while is enough. This ugliness is scheduled to be
  570. * removed after locking update.
  571. */
  572. list_for_each_entry(pol, &blkio_list, list)
  573. if (!pol->ops.blkio_clear_queue_fn(q))
  574. done = false;
  575. spin_unlock_irq(q->queue_lock);
  576. spin_unlock(&blkio_list_lock);
  577. if (done)
  578. break;
  579. msleep(10); /* just some random duration I like */
  580. }
  581. }
  582. static void blkg_rcu_free(struct rcu_head *rcu_head)
  583. {
  584. blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
  585. }
  586. void __blkg_release(struct blkio_group *blkg)
  587. {
  588. /* release the extra blkcg reference this blkg has been holding */
  589. css_put(&blkg->blkcg->css);
  590. /*
  591. * A group is freed in rcu manner. But having an rcu lock does not
  592. * mean that one can access all the fields of blkg and assume these
  593. * are valid. For example, don't try to follow throtl_data and
  594. * request queue links.
  595. *
  596. * Having a reference to blkg under an rcu allows acess to only
  597. * values local to groups like group stats and group rate limits
  598. */
  599. call_rcu(&blkg->rcu_head, blkg_rcu_free);
  600. }
  601. EXPORT_SYMBOL_GPL(__blkg_release);
  602. static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
  603. {
  604. struct blkg_policy_data *pd = blkg->pd[plid];
  605. struct blkio_group_stats_cpu *stats_cpu;
  606. int i, j, k;
  607. /*
  608. * Note: On 64 bit arch this should not be an issue. This has the
  609. * possibility of returning some inconsistent value on 32bit arch
  610. * as 64bit update on 32bit is non atomic. Taking care of this
  611. * corner case makes code very complicated, like sending IPIs to
  612. * cpus, taking care of stats of offline cpus etc.
  613. *
  614. * reset stats is anyway more of a debug feature and this sounds a
  615. * corner case. So I am not complicating the code yet until and
  616. * unless this becomes a real issue.
  617. */
  618. for_each_possible_cpu(i) {
  619. stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
  620. stats_cpu->sectors = 0;
  621. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  622. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  623. stats_cpu->stat_arr_cpu[j][k] = 0;
  624. }
  625. }
  626. static int
  627. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  628. {
  629. struct blkio_cgroup *blkcg;
  630. struct blkio_group *blkg;
  631. struct blkio_group_stats *stats;
  632. struct hlist_node *n;
  633. uint64_t queued[BLKIO_STAT_TOTAL];
  634. int i;
  635. #ifdef CONFIG_DEBUG_BLK_CGROUP
  636. bool idling, waiting, empty;
  637. unsigned long long now = sched_clock();
  638. #endif
  639. blkcg = cgroup_to_blkio_cgroup(cgroup);
  640. spin_lock_irq(&blkcg->lock);
  641. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  642. struct blkg_policy_data *pd = blkg->pd[blkg->plid];
  643. spin_lock(&blkg->stats_lock);
  644. stats = &pd->stats;
  645. #ifdef CONFIG_DEBUG_BLK_CGROUP
  646. idling = blkio_blkg_idling(stats);
  647. waiting = blkio_blkg_waiting(stats);
  648. empty = blkio_blkg_empty(stats);
  649. #endif
  650. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  651. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  652. memset(stats, 0, sizeof(struct blkio_group_stats));
  653. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  654. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  655. #ifdef CONFIG_DEBUG_BLK_CGROUP
  656. if (idling) {
  657. blkio_mark_blkg_idling(stats);
  658. stats->start_idle_time = now;
  659. }
  660. if (waiting) {
  661. blkio_mark_blkg_waiting(stats);
  662. stats->start_group_wait_time = now;
  663. }
  664. if (empty) {
  665. blkio_mark_blkg_empty(stats);
  666. stats->start_empty_time = now;
  667. }
  668. #endif
  669. spin_unlock(&blkg->stats_lock);
  670. /* Reset Per cpu stats which don't take blkg->stats_lock */
  671. blkio_reset_stats_cpu(blkg, blkg->plid);
  672. }
  673. spin_unlock_irq(&blkcg->lock);
  674. return 0;
  675. }
  676. static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
  677. char *str, int chars_left, bool diskname_only)
  678. {
  679. snprintf(str, chars_left, "%s", dname);
  680. chars_left -= strlen(str);
  681. if (chars_left <= 0) {
  682. printk(KERN_WARNING
  683. "Possibly incorrect cgroup stat display format");
  684. return;
  685. }
  686. if (diskname_only)
  687. return;
  688. switch (type) {
  689. case BLKIO_STAT_READ:
  690. strlcat(str, " Read", chars_left);
  691. break;
  692. case BLKIO_STAT_WRITE:
  693. strlcat(str, " Write", chars_left);
  694. break;
  695. case BLKIO_STAT_SYNC:
  696. strlcat(str, " Sync", chars_left);
  697. break;
  698. case BLKIO_STAT_ASYNC:
  699. strlcat(str, " Async", chars_left);
  700. break;
  701. case BLKIO_STAT_TOTAL:
  702. strlcat(str, " Total", chars_left);
  703. break;
  704. default:
  705. strlcat(str, " Invalid", chars_left);
  706. }
  707. }
  708. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  709. struct cgroup_map_cb *cb, const char *dname)
  710. {
  711. blkio_get_key_name(0, dname, str, chars_left, true);
  712. cb->fill(cb, str, val);
  713. return val;
  714. }
  715. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
  716. enum stat_type_cpu type, enum stat_sub_type sub_type)
  717. {
  718. struct blkg_policy_data *pd = blkg->pd[plid];
  719. int cpu;
  720. struct blkio_group_stats_cpu *stats_cpu;
  721. u64 val = 0, tval;
  722. for_each_possible_cpu(cpu) {
  723. unsigned int start;
  724. stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
  725. do {
  726. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  727. if (type == BLKIO_STAT_CPU_SECTORS)
  728. tval = stats_cpu->sectors;
  729. else
  730. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  731. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  732. val += tval;
  733. }
  734. return val;
  735. }
  736. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
  737. struct cgroup_map_cb *cb, const char *dname,
  738. enum stat_type_cpu type)
  739. {
  740. uint64_t disk_total, val;
  741. char key_str[MAX_KEY_LEN];
  742. enum stat_sub_type sub_type;
  743. if (type == BLKIO_STAT_CPU_SECTORS) {
  744. val = blkio_read_stat_cpu(blkg, plid, type, 0);
  745. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
  746. dname);
  747. }
  748. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  749. sub_type++) {
  750. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  751. false);
  752. val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
  753. cb->fill(cb, key_str, val);
  754. }
  755. disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
  756. blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
  757. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  758. false);
  759. cb->fill(cb, key_str, disk_total);
  760. return disk_total;
  761. }
  762. /* This should be called with blkg->stats_lock held */
  763. static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
  764. struct cgroup_map_cb *cb, const char *dname,
  765. enum stat_type type)
  766. {
  767. struct blkg_policy_data *pd = blkg->pd[plid];
  768. uint64_t disk_total;
  769. char key_str[MAX_KEY_LEN];
  770. enum stat_sub_type sub_type;
  771. if (type == BLKIO_STAT_TIME)
  772. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  773. pd->stats.time, cb, dname);
  774. #ifdef CONFIG_DEBUG_BLK_CGROUP
  775. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  776. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  777. pd->stats.unaccounted_time, cb, dname);
  778. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  779. uint64_t sum = pd->stats.avg_queue_size_sum;
  780. uint64_t samples = pd->stats.avg_queue_size_samples;
  781. if (samples)
  782. do_div(sum, samples);
  783. else
  784. sum = 0;
  785. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  786. sum, cb, dname);
  787. }
  788. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  789. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  790. pd->stats.group_wait_time, cb, dname);
  791. if (type == BLKIO_STAT_IDLE_TIME)
  792. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  793. pd->stats.idle_time, cb, dname);
  794. if (type == BLKIO_STAT_EMPTY_TIME)
  795. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  796. pd->stats.empty_time, cb, dname);
  797. if (type == BLKIO_STAT_DEQUEUE)
  798. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  799. pd->stats.dequeue, cb, dname);
  800. #endif
  801. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  802. sub_type++) {
  803. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  804. false);
  805. cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]);
  806. }
  807. disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] +
  808. pd->stats.stat_arr[type][BLKIO_STAT_WRITE];
  809. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  810. false);
  811. cb->fill(cb, key_str, disk_total);
  812. return disk_total;
  813. }
  814. static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
  815. int fileid, struct blkio_cgroup *blkcg)
  816. {
  817. struct gendisk *disk = NULL;
  818. struct blkio_group *blkg = NULL;
  819. struct blkg_policy_data *pd;
  820. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  821. unsigned long major, minor;
  822. int i = 0, ret = -EINVAL;
  823. int part;
  824. dev_t dev;
  825. u64 temp;
  826. memset(s, 0, sizeof(s));
  827. while ((p = strsep(&buf, " ")) != NULL) {
  828. if (!*p)
  829. continue;
  830. s[i++] = p;
  831. /* Prevent from inputing too many things */
  832. if (i == 3)
  833. break;
  834. }
  835. if (i != 2)
  836. goto out;
  837. p = strsep(&s[0], ":");
  838. if (p != NULL)
  839. major_s = p;
  840. else
  841. goto out;
  842. minor_s = s[0];
  843. if (!minor_s)
  844. goto out;
  845. if (strict_strtoul(major_s, 10, &major))
  846. goto out;
  847. if (strict_strtoul(minor_s, 10, &minor))
  848. goto out;
  849. dev = MKDEV(major, minor);
  850. if (strict_strtoull(s[1], 10, &temp))
  851. goto out;
  852. disk = get_gendisk(dev, &part);
  853. if (!disk || part)
  854. goto out;
  855. rcu_read_lock();
  856. spin_lock_irq(disk->queue->queue_lock);
  857. blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
  858. spin_unlock_irq(disk->queue->queue_lock);
  859. if (IS_ERR(blkg)) {
  860. ret = PTR_ERR(blkg);
  861. goto out_unlock;
  862. }
  863. pd = blkg->pd[plid];
  864. switch (plid) {
  865. case BLKIO_POLICY_PROP:
  866. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  867. temp > BLKIO_WEIGHT_MAX)
  868. goto out_unlock;
  869. pd->conf.weight = temp;
  870. blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
  871. break;
  872. case BLKIO_POLICY_THROTL:
  873. switch(fileid) {
  874. case BLKIO_THROTL_read_bps_device:
  875. pd->conf.bps[READ] = temp;
  876. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  877. break;
  878. case BLKIO_THROTL_write_bps_device:
  879. pd->conf.bps[WRITE] = temp;
  880. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  881. break;
  882. case BLKIO_THROTL_read_iops_device:
  883. if (temp > THROTL_IOPS_MAX)
  884. goto out_unlock;
  885. pd->conf.iops[READ] = temp;
  886. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  887. break;
  888. case BLKIO_THROTL_write_iops_device:
  889. if (temp > THROTL_IOPS_MAX)
  890. goto out_unlock;
  891. pd->conf.iops[WRITE] = temp;
  892. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  893. break;
  894. }
  895. break;
  896. default:
  897. BUG();
  898. }
  899. ret = 0;
  900. out_unlock:
  901. rcu_read_unlock();
  902. out:
  903. put_disk(disk);
  904. /*
  905. * If queue was bypassing, we should retry. Do so after a short
  906. * msleep(). It isn't strictly necessary but queue can be
  907. * bypassing for some time and it's always nice to avoid busy
  908. * looping.
  909. */
  910. if (ret == -EBUSY) {
  911. msleep(10);
  912. return restart_syscall();
  913. }
  914. return ret;
  915. }
  916. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  917. const char *buffer)
  918. {
  919. int ret = 0;
  920. char *buf;
  921. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  922. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  923. int fileid = BLKIOFILE_ATTR(cft->private);
  924. buf = kstrdup(buffer, GFP_KERNEL);
  925. if (!buf)
  926. return -ENOMEM;
  927. ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
  928. kfree(buf);
  929. return ret;
  930. }
  931. static const char *blkg_dev_name(struct blkio_group *blkg)
  932. {
  933. /* some drivers (floppy) instantiate a queue w/o disk registered */
  934. if (blkg->q->backing_dev_info.dev)
  935. return dev_name(blkg->q->backing_dev_info.dev);
  936. return NULL;
  937. }
  938. static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
  939. struct seq_file *m)
  940. {
  941. int plid = BLKIOFILE_POLICY(cft->private);
  942. int fileid = BLKIOFILE_ATTR(cft->private);
  943. struct blkg_policy_data *pd = blkg->pd[plid];
  944. const char *dname = blkg_dev_name(blkg);
  945. int rw = WRITE;
  946. if (!dname)
  947. return;
  948. switch (plid) {
  949. case BLKIO_POLICY_PROP:
  950. if (pd->conf.weight)
  951. seq_printf(m, "%s\t%u\n",
  952. dname, pd->conf.weight);
  953. break;
  954. case BLKIO_POLICY_THROTL:
  955. switch (fileid) {
  956. case BLKIO_THROTL_read_bps_device:
  957. rw = READ;
  958. case BLKIO_THROTL_write_bps_device:
  959. if (pd->conf.bps[rw])
  960. seq_printf(m, "%s\t%llu\n",
  961. dname, pd->conf.bps[rw]);
  962. break;
  963. case BLKIO_THROTL_read_iops_device:
  964. rw = READ;
  965. case BLKIO_THROTL_write_iops_device:
  966. if (pd->conf.iops[rw])
  967. seq_printf(m, "%s\t%u\n",
  968. dname, pd->conf.iops[rw]);
  969. break;
  970. }
  971. break;
  972. default:
  973. BUG();
  974. }
  975. }
  976. /* cgroup files which read their data from policy nodes end up here */
  977. static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
  978. struct seq_file *m)
  979. {
  980. struct blkio_group *blkg;
  981. struct hlist_node *n;
  982. spin_lock_irq(&blkcg->lock);
  983. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  984. if (BLKIOFILE_POLICY(cft->private) == blkg->plid)
  985. blkio_print_group_conf(cft, blkg, m);
  986. spin_unlock_irq(&blkcg->lock);
  987. }
  988. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  989. struct seq_file *m)
  990. {
  991. struct blkio_cgroup *blkcg;
  992. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  993. int name = BLKIOFILE_ATTR(cft->private);
  994. blkcg = cgroup_to_blkio_cgroup(cgrp);
  995. switch(plid) {
  996. case BLKIO_POLICY_PROP:
  997. switch(name) {
  998. case BLKIO_PROP_weight_device:
  999. blkio_read_conf(cft, blkcg, m);
  1000. return 0;
  1001. default:
  1002. BUG();
  1003. }
  1004. break;
  1005. case BLKIO_POLICY_THROTL:
  1006. switch(name){
  1007. case BLKIO_THROTL_read_bps_device:
  1008. case BLKIO_THROTL_write_bps_device:
  1009. case BLKIO_THROTL_read_iops_device:
  1010. case BLKIO_THROTL_write_iops_device:
  1011. blkio_read_conf(cft, blkcg, m);
  1012. return 0;
  1013. default:
  1014. BUG();
  1015. }
  1016. break;
  1017. default:
  1018. BUG();
  1019. }
  1020. return 0;
  1021. }
  1022. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1023. struct cftype *cft, struct cgroup_map_cb *cb,
  1024. enum stat_type type, bool show_total, bool pcpu)
  1025. {
  1026. struct blkio_group *blkg;
  1027. struct hlist_node *n;
  1028. uint64_t cgroup_total = 0;
  1029. rcu_read_lock();
  1030. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1031. const char *dname = blkg_dev_name(blkg);
  1032. int plid = BLKIOFILE_POLICY(cft->private);
  1033. if (!dname || plid != blkg->plid)
  1034. continue;
  1035. if (pcpu) {
  1036. cgroup_total += blkio_get_stat_cpu(blkg, plid,
  1037. cb, dname, type);
  1038. } else {
  1039. spin_lock_irq(&blkg->stats_lock);
  1040. cgroup_total += blkio_get_stat(blkg, plid,
  1041. cb, dname, type);
  1042. spin_unlock_irq(&blkg->stats_lock);
  1043. }
  1044. }
  1045. if (show_total)
  1046. cb->fill(cb, "Total", cgroup_total);
  1047. rcu_read_unlock();
  1048. return 0;
  1049. }
  1050. /* All map kind of cgroup file get serviced by this function */
  1051. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1052. struct cgroup_map_cb *cb)
  1053. {
  1054. struct blkio_cgroup *blkcg;
  1055. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1056. int name = BLKIOFILE_ATTR(cft->private);
  1057. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1058. switch(plid) {
  1059. case BLKIO_POLICY_PROP:
  1060. switch(name) {
  1061. case BLKIO_PROP_time:
  1062. return blkio_read_blkg_stats(blkcg, cft, cb,
  1063. BLKIO_STAT_TIME, 0, 0);
  1064. case BLKIO_PROP_sectors:
  1065. return blkio_read_blkg_stats(blkcg, cft, cb,
  1066. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1067. case BLKIO_PROP_io_service_bytes:
  1068. return blkio_read_blkg_stats(blkcg, cft, cb,
  1069. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1070. case BLKIO_PROP_io_serviced:
  1071. return blkio_read_blkg_stats(blkcg, cft, cb,
  1072. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1073. case BLKIO_PROP_io_service_time:
  1074. return blkio_read_blkg_stats(blkcg, cft, cb,
  1075. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1076. case BLKIO_PROP_io_wait_time:
  1077. return blkio_read_blkg_stats(blkcg, cft, cb,
  1078. BLKIO_STAT_WAIT_TIME, 1, 0);
  1079. case BLKIO_PROP_io_merged:
  1080. return blkio_read_blkg_stats(blkcg, cft, cb,
  1081. BLKIO_STAT_CPU_MERGED, 1, 1);
  1082. case BLKIO_PROP_io_queued:
  1083. return blkio_read_blkg_stats(blkcg, cft, cb,
  1084. BLKIO_STAT_QUEUED, 1, 0);
  1085. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1086. case BLKIO_PROP_unaccounted_time:
  1087. return blkio_read_blkg_stats(blkcg, cft, cb,
  1088. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1089. case BLKIO_PROP_dequeue:
  1090. return blkio_read_blkg_stats(blkcg, cft, cb,
  1091. BLKIO_STAT_DEQUEUE, 0, 0);
  1092. case BLKIO_PROP_avg_queue_size:
  1093. return blkio_read_blkg_stats(blkcg, cft, cb,
  1094. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1095. case BLKIO_PROP_group_wait_time:
  1096. return blkio_read_blkg_stats(blkcg, cft, cb,
  1097. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1098. case BLKIO_PROP_idle_time:
  1099. return blkio_read_blkg_stats(blkcg, cft, cb,
  1100. BLKIO_STAT_IDLE_TIME, 0, 0);
  1101. case BLKIO_PROP_empty_time:
  1102. return blkio_read_blkg_stats(blkcg, cft, cb,
  1103. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1104. #endif
  1105. default:
  1106. BUG();
  1107. }
  1108. break;
  1109. case BLKIO_POLICY_THROTL:
  1110. switch(name){
  1111. case BLKIO_THROTL_io_service_bytes:
  1112. return blkio_read_blkg_stats(blkcg, cft, cb,
  1113. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1114. case BLKIO_THROTL_io_serviced:
  1115. return blkio_read_blkg_stats(blkcg, cft, cb,
  1116. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1117. default:
  1118. BUG();
  1119. }
  1120. break;
  1121. default:
  1122. BUG();
  1123. }
  1124. return 0;
  1125. }
  1126. static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
  1127. {
  1128. struct blkio_group *blkg;
  1129. struct hlist_node *n;
  1130. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1131. return -EINVAL;
  1132. spin_lock(&blkio_list_lock);
  1133. spin_lock_irq(&blkcg->lock);
  1134. blkcg->weight = (unsigned int)val;
  1135. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1136. struct blkg_policy_data *pd = blkg->pd[blkg->plid];
  1137. if (blkg->plid == plid && !pd->conf.weight)
  1138. blkio_update_group_weight(blkg, plid, blkcg->weight);
  1139. }
  1140. spin_unlock_irq(&blkcg->lock);
  1141. spin_unlock(&blkio_list_lock);
  1142. return 0;
  1143. }
  1144. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1145. struct blkio_cgroup *blkcg;
  1146. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1147. int name = BLKIOFILE_ATTR(cft->private);
  1148. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1149. switch(plid) {
  1150. case BLKIO_POLICY_PROP:
  1151. switch(name) {
  1152. case BLKIO_PROP_weight:
  1153. return (u64)blkcg->weight;
  1154. }
  1155. break;
  1156. default:
  1157. BUG();
  1158. }
  1159. return 0;
  1160. }
  1161. static int
  1162. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1163. {
  1164. struct blkio_cgroup *blkcg;
  1165. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1166. int name = BLKIOFILE_ATTR(cft->private);
  1167. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1168. switch(plid) {
  1169. case BLKIO_POLICY_PROP:
  1170. switch(name) {
  1171. case BLKIO_PROP_weight:
  1172. return blkio_weight_write(blkcg, plid, val);
  1173. }
  1174. break;
  1175. default:
  1176. BUG();
  1177. }
  1178. return 0;
  1179. }
  1180. struct cftype blkio_files[] = {
  1181. {
  1182. .name = "weight_device",
  1183. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1184. BLKIO_PROP_weight_device),
  1185. .read_seq_string = blkiocg_file_read,
  1186. .write_string = blkiocg_file_write,
  1187. .max_write_len = 256,
  1188. },
  1189. {
  1190. .name = "weight",
  1191. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1192. BLKIO_PROP_weight),
  1193. .read_u64 = blkiocg_file_read_u64,
  1194. .write_u64 = blkiocg_file_write_u64,
  1195. },
  1196. {
  1197. .name = "time",
  1198. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1199. BLKIO_PROP_time),
  1200. .read_map = blkiocg_file_read_map,
  1201. },
  1202. {
  1203. .name = "sectors",
  1204. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1205. BLKIO_PROP_sectors),
  1206. .read_map = blkiocg_file_read_map,
  1207. },
  1208. {
  1209. .name = "io_service_bytes",
  1210. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1211. BLKIO_PROP_io_service_bytes),
  1212. .read_map = blkiocg_file_read_map,
  1213. },
  1214. {
  1215. .name = "io_serviced",
  1216. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1217. BLKIO_PROP_io_serviced),
  1218. .read_map = blkiocg_file_read_map,
  1219. },
  1220. {
  1221. .name = "io_service_time",
  1222. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1223. BLKIO_PROP_io_service_time),
  1224. .read_map = blkiocg_file_read_map,
  1225. },
  1226. {
  1227. .name = "io_wait_time",
  1228. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1229. BLKIO_PROP_io_wait_time),
  1230. .read_map = blkiocg_file_read_map,
  1231. },
  1232. {
  1233. .name = "io_merged",
  1234. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1235. BLKIO_PROP_io_merged),
  1236. .read_map = blkiocg_file_read_map,
  1237. },
  1238. {
  1239. .name = "io_queued",
  1240. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1241. BLKIO_PROP_io_queued),
  1242. .read_map = blkiocg_file_read_map,
  1243. },
  1244. {
  1245. .name = "reset_stats",
  1246. .write_u64 = blkiocg_reset_stats,
  1247. },
  1248. #ifdef CONFIG_BLK_DEV_THROTTLING
  1249. {
  1250. .name = "throttle.read_bps_device",
  1251. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1252. BLKIO_THROTL_read_bps_device),
  1253. .read_seq_string = blkiocg_file_read,
  1254. .write_string = blkiocg_file_write,
  1255. .max_write_len = 256,
  1256. },
  1257. {
  1258. .name = "throttle.write_bps_device",
  1259. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1260. BLKIO_THROTL_write_bps_device),
  1261. .read_seq_string = blkiocg_file_read,
  1262. .write_string = blkiocg_file_write,
  1263. .max_write_len = 256,
  1264. },
  1265. {
  1266. .name = "throttle.read_iops_device",
  1267. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1268. BLKIO_THROTL_read_iops_device),
  1269. .read_seq_string = blkiocg_file_read,
  1270. .write_string = blkiocg_file_write,
  1271. .max_write_len = 256,
  1272. },
  1273. {
  1274. .name = "throttle.write_iops_device",
  1275. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1276. BLKIO_THROTL_write_iops_device),
  1277. .read_seq_string = blkiocg_file_read,
  1278. .write_string = blkiocg_file_write,
  1279. .max_write_len = 256,
  1280. },
  1281. {
  1282. .name = "throttle.io_service_bytes",
  1283. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1284. BLKIO_THROTL_io_service_bytes),
  1285. .read_map = blkiocg_file_read_map,
  1286. },
  1287. {
  1288. .name = "throttle.io_serviced",
  1289. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1290. BLKIO_THROTL_io_serviced),
  1291. .read_map = blkiocg_file_read_map,
  1292. },
  1293. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1294. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1295. {
  1296. .name = "avg_queue_size",
  1297. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1298. BLKIO_PROP_avg_queue_size),
  1299. .read_map = blkiocg_file_read_map,
  1300. },
  1301. {
  1302. .name = "group_wait_time",
  1303. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1304. BLKIO_PROP_group_wait_time),
  1305. .read_map = blkiocg_file_read_map,
  1306. },
  1307. {
  1308. .name = "idle_time",
  1309. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1310. BLKIO_PROP_idle_time),
  1311. .read_map = blkiocg_file_read_map,
  1312. },
  1313. {
  1314. .name = "empty_time",
  1315. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1316. BLKIO_PROP_empty_time),
  1317. .read_map = blkiocg_file_read_map,
  1318. },
  1319. {
  1320. .name = "dequeue",
  1321. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1322. BLKIO_PROP_dequeue),
  1323. .read_map = blkiocg_file_read_map,
  1324. },
  1325. {
  1326. .name = "unaccounted_time",
  1327. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1328. BLKIO_PROP_unaccounted_time),
  1329. .read_map = blkiocg_file_read_map,
  1330. },
  1331. #endif
  1332. };
  1333. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1334. {
  1335. return cgroup_add_files(cgroup, subsys, blkio_files,
  1336. ARRAY_SIZE(blkio_files));
  1337. }
  1338. static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
  1339. struct cgroup *cgroup)
  1340. {
  1341. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1342. unsigned long flags;
  1343. struct blkio_group *blkg;
  1344. struct request_queue *q;
  1345. struct blkio_policy_type *blkiop;
  1346. rcu_read_lock();
  1347. do {
  1348. spin_lock_irqsave(&blkcg->lock, flags);
  1349. if (hlist_empty(&blkcg->blkg_list)) {
  1350. spin_unlock_irqrestore(&blkcg->lock, flags);
  1351. break;
  1352. }
  1353. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1354. blkcg_node);
  1355. q = rcu_dereference(blkg->q);
  1356. __blkiocg_del_blkio_group(blkg);
  1357. spin_unlock_irqrestore(&blkcg->lock, flags);
  1358. /*
  1359. * This blkio_group is being unlinked as associated cgroup is
  1360. * going away. Let all the IO controlling policies know about
  1361. * this event.
  1362. */
  1363. spin_lock(&blkio_list_lock);
  1364. list_for_each_entry(blkiop, &blkio_list, list) {
  1365. if (blkiop->plid != blkg->plid)
  1366. continue;
  1367. blkiop->ops.blkio_unlink_group_fn(q, blkg);
  1368. }
  1369. spin_unlock(&blkio_list_lock);
  1370. } while (1);
  1371. rcu_read_unlock();
  1372. return 0;
  1373. }
  1374. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1375. {
  1376. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1377. if (blkcg != &blkio_root_cgroup)
  1378. kfree(blkcg);
  1379. }
  1380. static struct cgroup_subsys_state *
  1381. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1382. {
  1383. struct blkio_cgroup *blkcg;
  1384. struct cgroup *parent = cgroup->parent;
  1385. if (!parent) {
  1386. blkcg = &blkio_root_cgroup;
  1387. goto done;
  1388. }
  1389. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1390. if (!blkcg)
  1391. return ERR_PTR(-ENOMEM);
  1392. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1393. done:
  1394. spin_lock_init(&blkcg->lock);
  1395. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1396. return &blkcg->css;
  1397. }
  1398. /**
  1399. * blkcg_init_queue - initialize blkcg part of request queue
  1400. * @q: request_queue to initialize
  1401. *
  1402. * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  1403. * part of new request_queue @q.
  1404. *
  1405. * RETURNS:
  1406. * 0 on success, -errno on failure.
  1407. */
  1408. int blkcg_init_queue(struct request_queue *q)
  1409. {
  1410. int ret;
  1411. might_sleep();
  1412. ret = blk_throtl_init(q);
  1413. if (ret)
  1414. return ret;
  1415. mutex_lock(&all_q_mutex);
  1416. INIT_LIST_HEAD(&q->all_q_node);
  1417. list_add_tail(&q->all_q_node, &all_q_list);
  1418. mutex_unlock(&all_q_mutex);
  1419. return 0;
  1420. }
  1421. /**
  1422. * blkcg_drain_queue - drain blkcg part of request_queue
  1423. * @q: request_queue to drain
  1424. *
  1425. * Called from blk_drain_queue(). Responsible for draining blkcg part.
  1426. */
  1427. void blkcg_drain_queue(struct request_queue *q)
  1428. {
  1429. lockdep_assert_held(q->queue_lock);
  1430. blk_throtl_drain(q);
  1431. }
  1432. /**
  1433. * blkcg_exit_queue - exit and release blkcg part of request_queue
  1434. * @q: request_queue being released
  1435. *
  1436. * Called from blk_release_queue(). Responsible for exiting blkcg part.
  1437. */
  1438. void blkcg_exit_queue(struct request_queue *q)
  1439. {
  1440. mutex_lock(&all_q_mutex);
  1441. list_del_init(&q->all_q_node);
  1442. mutex_unlock(&all_q_mutex);
  1443. blk_throtl_exit(q);
  1444. }
  1445. /*
  1446. * We cannot support shared io contexts, as we have no mean to support
  1447. * two tasks with the same ioc in two different groups without major rework
  1448. * of the main cic data structures. For now we allow a task to change
  1449. * its cgroup only if it's the only owner of its ioc.
  1450. */
  1451. static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1452. struct cgroup_taskset *tset)
  1453. {
  1454. struct task_struct *task;
  1455. struct io_context *ioc;
  1456. int ret = 0;
  1457. /* task_lock() is needed to avoid races with exit_io_context() */
  1458. cgroup_taskset_for_each(task, cgrp, tset) {
  1459. task_lock(task);
  1460. ioc = task->io_context;
  1461. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1462. ret = -EINVAL;
  1463. task_unlock(task);
  1464. if (ret)
  1465. break;
  1466. }
  1467. return ret;
  1468. }
  1469. static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1470. struct cgroup_taskset *tset)
  1471. {
  1472. struct task_struct *task;
  1473. struct io_context *ioc;
  1474. cgroup_taskset_for_each(task, cgrp, tset) {
  1475. /* we don't lose anything even if ioc allocation fails */
  1476. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1477. if (ioc) {
  1478. ioc_cgroup_changed(ioc);
  1479. put_io_context(ioc);
  1480. }
  1481. }
  1482. }
  1483. static void blkcg_bypass_start(void)
  1484. __acquires(&all_q_mutex)
  1485. {
  1486. struct request_queue *q;
  1487. mutex_lock(&all_q_mutex);
  1488. list_for_each_entry(q, &all_q_list, all_q_node) {
  1489. blk_queue_bypass_start(q);
  1490. blkg_destroy_all(q);
  1491. }
  1492. }
  1493. static void blkcg_bypass_end(void)
  1494. __releases(&all_q_mutex)
  1495. {
  1496. struct request_queue *q;
  1497. list_for_each_entry(q, &all_q_list, all_q_node)
  1498. blk_queue_bypass_end(q);
  1499. mutex_unlock(&all_q_mutex);
  1500. }
  1501. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1502. {
  1503. blkcg_bypass_start();
  1504. spin_lock(&blkio_list_lock);
  1505. BUG_ON(blkio_policy[blkiop->plid]);
  1506. blkio_policy[blkiop->plid] = blkiop;
  1507. list_add_tail(&blkiop->list, &blkio_list);
  1508. spin_unlock(&blkio_list_lock);
  1509. blkcg_bypass_end();
  1510. }
  1511. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1512. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1513. {
  1514. blkcg_bypass_start();
  1515. spin_lock(&blkio_list_lock);
  1516. BUG_ON(blkio_policy[blkiop->plid] != blkiop);
  1517. blkio_policy[blkiop->plid] = NULL;
  1518. list_del_init(&blkiop->list);
  1519. spin_unlock(&blkio_list_lock);
  1520. blkcg_bypass_end();
  1521. }
  1522. EXPORT_SYMBOL_GPL(blkio_policy_unregister);