blk-cgroup.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include "blk-cgroup.h"
  23. #include "blk.h"
  24. #define MAX_KEY_LEN 100
  25. static DEFINE_SPINLOCK(blkio_list_lock);
  26. static LIST_HEAD(blkio_list);
  27. static DEFINE_MUTEX(all_q_mutex);
  28. static LIST_HEAD(all_q_list);
  29. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  30. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  31. static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
  32. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  33. struct cgroup *);
  34. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  35. struct cgroup_taskset *);
  36. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  37. struct cgroup_taskset *);
  38. static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
  39. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  40. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  41. /* for encoding cft->private value on file */
  42. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  43. /* What policy owns the file, proportional or throttle */
  44. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  45. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  46. struct cgroup_subsys blkio_subsys = {
  47. .name = "blkio",
  48. .create = blkiocg_create,
  49. .can_attach = blkiocg_can_attach,
  50. .attach = blkiocg_attach,
  51. .pre_destroy = blkiocg_pre_destroy,
  52. .destroy = blkiocg_destroy,
  53. .populate = blkiocg_populate,
  54. .subsys_id = blkio_subsys_id,
  55. .module = THIS_MODULE,
  56. };
  57. EXPORT_SYMBOL_GPL(blkio_subsys);
  58. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  59. {
  60. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  61. struct blkio_cgroup, css);
  62. }
  63. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  64. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  65. {
  66. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  67. struct blkio_cgroup, css);
  68. }
  69. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  70. static inline void blkio_update_group_weight(struct blkio_group *blkg,
  71. int plid, unsigned int weight)
  72. {
  73. struct blkio_policy_type *blkiop;
  74. list_for_each_entry(blkiop, &blkio_list, list) {
  75. /* If this policy does not own the blkg, do not send updates */
  76. if (blkiop->plid != plid)
  77. continue;
  78. if (blkiop->ops.blkio_update_group_weight_fn)
  79. blkiop->ops.blkio_update_group_weight_fn(blkg->q,
  80. blkg, weight);
  81. }
  82. }
  83. static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
  84. u64 bps, int fileid)
  85. {
  86. struct blkio_policy_type *blkiop;
  87. list_for_each_entry(blkiop, &blkio_list, list) {
  88. /* If this policy does not own the blkg, do not send updates */
  89. if (blkiop->plid != plid)
  90. continue;
  91. if (fileid == BLKIO_THROTL_read_bps_device
  92. && blkiop->ops.blkio_update_group_read_bps_fn)
  93. blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
  94. blkg, bps);
  95. if (fileid == BLKIO_THROTL_write_bps_device
  96. && blkiop->ops.blkio_update_group_write_bps_fn)
  97. blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
  98. blkg, bps);
  99. }
  100. }
  101. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  102. int plid, unsigned int iops,
  103. int fileid)
  104. {
  105. struct blkio_policy_type *blkiop;
  106. list_for_each_entry(blkiop, &blkio_list, list) {
  107. /* If this policy does not own the blkg, do not send updates */
  108. if (blkiop->plid != plid)
  109. continue;
  110. if (fileid == BLKIO_THROTL_read_iops_device
  111. && blkiop->ops.blkio_update_group_read_iops_fn)
  112. blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
  113. blkg, iops);
  114. if (fileid == BLKIO_THROTL_write_iops_device
  115. && blkiop->ops.blkio_update_group_write_iops_fn)
  116. blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
  117. blkg,iops);
  118. }
  119. }
  120. /*
  121. * Add to the appropriate stat variable depending on the request type.
  122. * This should be called with the blkg->stats_lock held.
  123. */
  124. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  125. bool sync)
  126. {
  127. if (direction)
  128. stat[BLKIO_STAT_WRITE] += add;
  129. else
  130. stat[BLKIO_STAT_READ] += add;
  131. if (sync)
  132. stat[BLKIO_STAT_SYNC] += add;
  133. else
  134. stat[BLKIO_STAT_ASYNC] += add;
  135. }
  136. /*
  137. * Decrements the appropriate stat variable if non-zero depending on the
  138. * request type. Panics on value being zero.
  139. * This should be called with the blkg->stats_lock held.
  140. */
  141. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  142. {
  143. if (direction) {
  144. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  145. stat[BLKIO_STAT_WRITE]--;
  146. } else {
  147. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  148. stat[BLKIO_STAT_READ]--;
  149. }
  150. if (sync) {
  151. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  152. stat[BLKIO_STAT_SYNC]--;
  153. } else {
  154. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  155. stat[BLKIO_STAT_ASYNC]--;
  156. }
  157. }
  158. #ifdef CONFIG_DEBUG_BLK_CGROUP
  159. /* This should be called with the blkg->stats_lock held. */
  160. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  161. struct blkio_policy_type *pol,
  162. struct blkio_group *curr_blkg)
  163. {
  164. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  165. if (blkio_blkg_waiting(&pd->stats))
  166. return;
  167. if (blkg == curr_blkg)
  168. return;
  169. pd->stats.start_group_wait_time = sched_clock();
  170. blkio_mark_blkg_waiting(&pd->stats);
  171. }
  172. /* This should be called with the blkg->stats_lock held. */
  173. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  174. {
  175. unsigned long long now;
  176. if (!blkio_blkg_waiting(stats))
  177. return;
  178. now = sched_clock();
  179. if (time_after64(now, stats->start_group_wait_time))
  180. stats->group_wait_time += now - stats->start_group_wait_time;
  181. blkio_clear_blkg_waiting(stats);
  182. }
  183. /* This should be called with the blkg->stats_lock held. */
  184. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  185. {
  186. unsigned long long now;
  187. if (!blkio_blkg_empty(stats))
  188. return;
  189. now = sched_clock();
  190. if (time_after64(now, stats->start_empty_time))
  191. stats->empty_time += now - stats->start_empty_time;
  192. blkio_clear_blkg_empty(stats);
  193. }
  194. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
  195. struct blkio_policy_type *pol)
  196. {
  197. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  198. unsigned long flags;
  199. spin_lock_irqsave(&blkg->stats_lock, flags);
  200. BUG_ON(blkio_blkg_idling(&pd->stats));
  201. pd->stats.start_idle_time = sched_clock();
  202. blkio_mark_blkg_idling(&pd->stats);
  203. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  204. }
  205. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  206. void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
  207. struct blkio_policy_type *pol)
  208. {
  209. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  210. unsigned long flags;
  211. unsigned long long now;
  212. struct blkio_group_stats *stats;
  213. spin_lock_irqsave(&blkg->stats_lock, flags);
  214. stats = &pd->stats;
  215. if (blkio_blkg_idling(stats)) {
  216. now = sched_clock();
  217. if (time_after64(now, stats->start_idle_time))
  218. stats->idle_time += now - stats->start_idle_time;
  219. blkio_clear_blkg_idling(stats);
  220. }
  221. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  222. }
  223. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  224. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
  225. struct blkio_policy_type *pol)
  226. {
  227. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  228. unsigned long flags;
  229. struct blkio_group_stats *stats;
  230. spin_lock_irqsave(&blkg->stats_lock, flags);
  231. stats = &pd->stats;
  232. stats->avg_queue_size_sum +=
  233. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  234. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  235. stats->avg_queue_size_samples++;
  236. blkio_update_group_wait_time(stats);
  237. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  238. }
  239. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  240. void blkiocg_set_start_empty_time(struct blkio_group *blkg,
  241. struct blkio_policy_type *pol)
  242. {
  243. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  244. unsigned long flags;
  245. struct blkio_group_stats *stats;
  246. spin_lock_irqsave(&blkg->stats_lock, flags);
  247. stats = &pd->stats;
  248. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  249. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  250. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  251. return;
  252. }
  253. /*
  254. * group is already marked empty. This can happen if cfqq got new
  255. * request in parent group and moved to this group while being added
  256. * to service tree. Just ignore the event and move on.
  257. */
  258. if(blkio_blkg_empty(stats)) {
  259. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  260. return;
  261. }
  262. stats->start_empty_time = sched_clock();
  263. blkio_mark_blkg_empty(stats);
  264. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  265. }
  266. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  267. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  268. struct blkio_policy_type *pol,
  269. unsigned long dequeue)
  270. {
  271. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  272. pd->stats.dequeue += dequeue;
  273. }
  274. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  275. #else
  276. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  277. struct blkio_policy_type *pol,
  278. struct blkio_group *curr_blkg) { }
  279. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
  280. #endif
  281. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  282. struct blkio_policy_type *pol,
  283. struct blkio_group *curr_blkg, bool direction,
  284. bool sync)
  285. {
  286. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  287. unsigned long flags;
  288. spin_lock_irqsave(&blkg->stats_lock, flags);
  289. blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  290. sync);
  291. blkio_end_empty_time(&pd->stats);
  292. blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
  293. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  294. }
  295. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  296. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  297. struct blkio_policy_type *pol,
  298. bool direction, bool sync)
  299. {
  300. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  301. unsigned long flags;
  302. spin_lock_irqsave(&blkg->stats_lock, flags);
  303. blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED],
  304. direction, sync);
  305. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  306. }
  307. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  308. void blkiocg_update_timeslice_used(struct blkio_group *blkg,
  309. struct blkio_policy_type *pol,
  310. unsigned long time,
  311. unsigned long unaccounted_time)
  312. {
  313. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  314. unsigned long flags;
  315. spin_lock_irqsave(&blkg->stats_lock, flags);
  316. pd->stats.time += time;
  317. #ifdef CONFIG_DEBUG_BLK_CGROUP
  318. pd->stats.unaccounted_time += unaccounted_time;
  319. #endif
  320. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  321. }
  322. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  323. /*
  324. * should be called under rcu read lock or queue lock to make sure blkg pointer
  325. * is valid.
  326. */
  327. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  328. struct blkio_policy_type *pol,
  329. uint64_t bytes, bool direction, bool sync)
  330. {
  331. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  332. struct blkio_group_stats_cpu *stats_cpu;
  333. unsigned long flags;
  334. /*
  335. * Disabling interrupts to provide mutual exclusion between two
  336. * writes on same cpu. It probably is not needed for 64bit. Not
  337. * optimizing that case yet.
  338. */
  339. local_irq_save(flags);
  340. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  341. u64_stats_update_begin(&stats_cpu->syncp);
  342. stats_cpu->sectors += bytes >> 9;
  343. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  344. 1, direction, sync);
  345. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  346. bytes, direction, sync);
  347. u64_stats_update_end(&stats_cpu->syncp);
  348. local_irq_restore(flags);
  349. }
  350. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  351. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  352. struct blkio_policy_type *pol,
  353. uint64_t start_time,
  354. uint64_t io_start_time, bool direction,
  355. bool sync)
  356. {
  357. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  358. struct blkio_group_stats *stats;
  359. unsigned long flags;
  360. unsigned long long now = sched_clock();
  361. spin_lock_irqsave(&blkg->stats_lock, flags);
  362. stats = &pd->stats;
  363. if (time_after64(now, io_start_time))
  364. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  365. now - io_start_time, direction, sync);
  366. if (time_after64(io_start_time, start_time))
  367. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  368. io_start_time - start_time, direction, sync);
  369. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  370. }
  371. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  372. /* Merged stats are per cpu. */
  373. void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  374. struct blkio_policy_type *pol,
  375. bool direction, bool sync)
  376. {
  377. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  378. struct blkio_group_stats_cpu *stats_cpu;
  379. unsigned long flags;
  380. /*
  381. * Disabling interrupts to provide mutual exclusion between two
  382. * writes on same cpu. It probably is not needed for 64bit. Not
  383. * optimizing that case yet.
  384. */
  385. local_irq_save(flags);
  386. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  387. u64_stats_update_begin(&stats_cpu->syncp);
  388. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  389. direction, sync);
  390. u64_stats_update_end(&stats_cpu->syncp);
  391. local_irq_restore(flags);
  392. }
  393. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  394. /**
  395. * blkg_free - free a blkg
  396. * @blkg: blkg to free
  397. *
  398. * Free @blkg which may be partially allocated.
  399. */
  400. static void blkg_free(struct blkio_group *blkg)
  401. {
  402. int i;
  403. if (!blkg)
  404. return;
  405. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  406. struct blkg_policy_data *pd = blkg->pd[i];
  407. if (pd) {
  408. free_percpu(pd->stats_cpu);
  409. kfree(pd);
  410. }
  411. }
  412. kfree(blkg);
  413. }
  414. /**
  415. * blkg_alloc - allocate a blkg
  416. * @blkcg: block cgroup the new blkg is associated with
  417. * @q: request_queue the new blkg is associated with
  418. *
  419. * Allocate a new blkg assocating @blkcg and @q.
  420. *
  421. * FIXME: Should be called with queue locked but currently isn't due to
  422. * percpu stat breakage.
  423. */
  424. static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
  425. struct request_queue *q)
  426. {
  427. struct blkio_group *blkg;
  428. int i;
  429. /* alloc and init base part */
  430. blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
  431. if (!blkg)
  432. return NULL;
  433. spin_lock_init(&blkg->stats_lock);
  434. rcu_assign_pointer(blkg->q, q);
  435. INIT_LIST_HEAD(&blkg->q_node);
  436. blkg->blkcg = blkcg;
  437. blkg->refcnt = 1;
  438. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  439. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  440. struct blkio_policy_type *pol = blkio_policy[i];
  441. struct blkg_policy_data *pd;
  442. if (!pol)
  443. continue;
  444. /* alloc per-policy data and attach it to blkg */
  445. pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
  446. q->node);
  447. if (!pd) {
  448. blkg_free(blkg);
  449. return NULL;
  450. }
  451. blkg->pd[i] = pd;
  452. pd->blkg = blkg;
  453. /* broken, read comment in the callsite */
  454. pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  455. if (!pd->stats_cpu) {
  456. blkg_free(blkg);
  457. return NULL;
  458. }
  459. }
  460. /* invoke per-policy init */
  461. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  462. struct blkio_policy_type *pol = blkio_policy[i];
  463. if (pol)
  464. pol->ops.blkio_init_group_fn(blkg);
  465. }
  466. return blkg;
  467. }
  468. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  469. struct request_queue *q,
  470. enum blkio_policy_id plid,
  471. bool for_root)
  472. __releases(q->queue_lock) __acquires(q->queue_lock)
  473. {
  474. struct blkio_group *blkg, *new_blkg;
  475. WARN_ON_ONCE(!rcu_read_lock_held());
  476. lockdep_assert_held(q->queue_lock);
  477. /*
  478. * This could be the first entry point of blkcg implementation and
  479. * we shouldn't allow anything to go through for a bypassing queue.
  480. * The following can be removed if blkg lookup is guaranteed to
  481. * fail on a bypassing queue.
  482. */
  483. if (unlikely(blk_queue_bypass(q)) && !for_root)
  484. return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  485. blkg = blkg_lookup(blkcg, q);
  486. if (blkg)
  487. return blkg;
  488. /* blkg holds a reference to blkcg */
  489. if (!css_tryget(&blkcg->css))
  490. return ERR_PTR(-EINVAL);
  491. /*
  492. * Allocate and initialize.
  493. *
  494. * FIXME: The following is broken. Percpu memory allocation
  495. * requires %GFP_KERNEL context and can't be performed from IO
  496. * path. Allocation here should inherently be atomic and the
  497. * following lock dancing can be removed once the broken percpu
  498. * allocation is fixed.
  499. */
  500. spin_unlock_irq(q->queue_lock);
  501. rcu_read_unlock();
  502. new_blkg = blkg_alloc(blkcg, q);
  503. rcu_read_lock();
  504. spin_lock_irq(q->queue_lock);
  505. /* did bypass get turned on inbetween? */
  506. if (unlikely(blk_queue_bypass(q)) && !for_root) {
  507. blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  508. goto out;
  509. }
  510. /* did someone beat us to it? */
  511. blkg = blkg_lookup(blkcg, q);
  512. if (unlikely(blkg))
  513. goto out;
  514. /* did alloc fail? */
  515. if (unlikely(!new_blkg)) {
  516. blkg = ERR_PTR(-ENOMEM);
  517. goto out;
  518. }
  519. /* insert */
  520. spin_lock(&blkcg->lock);
  521. swap(blkg, new_blkg);
  522. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  523. list_add(&blkg->q_node, &q->blkg_list);
  524. q->nr_blkgs++;
  525. spin_unlock(&blkcg->lock);
  526. out:
  527. blkg_free(new_blkg);
  528. return blkg;
  529. }
  530. EXPORT_SYMBOL_GPL(blkg_lookup_create);
  531. /* called under rcu_read_lock(). */
  532. struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  533. struct request_queue *q)
  534. {
  535. struct blkio_group *blkg;
  536. struct hlist_node *n;
  537. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  538. if (blkg->q == q)
  539. return blkg;
  540. return NULL;
  541. }
  542. EXPORT_SYMBOL_GPL(blkg_lookup);
  543. static void blkg_destroy(struct blkio_group *blkg)
  544. {
  545. struct request_queue *q = blkg->q;
  546. struct blkio_cgroup *blkcg = blkg->blkcg;
  547. lockdep_assert_held(q->queue_lock);
  548. lockdep_assert_held(&blkcg->lock);
  549. /* Something wrong if we are trying to remove same group twice */
  550. WARN_ON_ONCE(list_empty(&blkg->q_node));
  551. WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
  552. list_del_init(&blkg->q_node);
  553. hlist_del_init_rcu(&blkg->blkcg_node);
  554. WARN_ON_ONCE(q->nr_blkgs <= 0);
  555. q->nr_blkgs--;
  556. /*
  557. * Put the reference taken at the time of creation so that when all
  558. * queues are gone, group can be destroyed.
  559. */
  560. blkg_put(blkg);
  561. }
  562. /*
  563. * XXX: This updates blkg policy data in-place for root blkg, which is
  564. * necessary across elevator switch and policy registration as root blkgs
  565. * aren't shot down. This broken and racy implementation is temporary.
  566. * Eventually, blkg shoot down will be replaced by proper in-place update.
  567. */
  568. void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
  569. {
  570. struct blkio_policy_type *pol = blkio_policy[plid];
  571. struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
  572. struct blkg_policy_data *pd;
  573. if (!blkg)
  574. return;
  575. kfree(blkg->pd[plid]);
  576. blkg->pd[plid] = NULL;
  577. if (!pol)
  578. return;
  579. pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
  580. WARN_ON_ONCE(!pd);
  581. pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  582. WARN_ON_ONCE(!pd->stats_cpu);
  583. blkg->pd[plid] = pd;
  584. pd->blkg = blkg;
  585. pol->ops.blkio_init_group_fn(blkg);
  586. }
  587. EXPORT_SYMBOL_GPL(update_root_blkg_pd);
  588. /**
  589. * blkg_destroy_all - destroy all blkgs associated with a request_queue
  590. * @q: request_queue of interest
  591. * @destroy_root: whether to destroy root blkg or not
  592. *
  593. * Destroy blkgs associated with @q. If @destroy_root is %true, all are
  594. * destroyed; otherwise, root blkg is left alone.
  595. */
  596. void blkg_destroy_all(struct request_queue *q, bool destroy_root)
  597. {
  598. struct blkio_group *blkg, *n;
  599. spin_lock_irq(q->queue_lock);
  600. list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
  601. struct blkio_cgroup *blkcg = blkg->blkcg;
  602. /* skip root? */
  603. if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
  604. continue;
  605. spin_lock(&blkcg->lock);
  606. blkg_destroy(blkg);
  607. spin_unlock(&blkcg->lock);
  608. }
  609. spin_unlock_irq(q->queue_lock);
  610. }
  611. EXPORT_SYMBOL_GPL(blkg_destroy_all);
  612. static void blkg_rcu_free(struct rcu_head *rcu_head)
  613. {
  614. blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
  615. }
  616. void __blkg_release(struct blkio_group *blkg)
  617. {
  618. /* release the extra blkcg reference this blkg has been holding */
  619. css_put(&blkg->blkcg->css);
  620. /*
  621. * A group is freed in rcu manner. But having an rcu lock does not
  622. * mean that one can access all the fields of blkg and assume these
  623. * are valid. For example, don't try to follow throtl_data and
  624. * request queue links.
  625. *
  626. * Having a reference to blkg under an rcu allows acess to only
  627. * values local to groups like group stats and group rate limits
  628. */
  629. call_rcu(&blkg->rcu_head, blkg_rcu_free);
  630. }
  631. EXPORT_SYMBOL_GPL(__blkg_release);
  632. static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
  633. {
  634. struct blkg_policy_data *pd = blkg->pd[plid];
  635. struct blkio_group_stats_cpu *stats_cpu;
  636. int i, j, k;
  637. /*
  638. * Note: On 64 bit arch this should not be an issue. This has the
  639. * possibility of returning some inconsistent value on 32bit arch
  640. * as 64bit update on 32bit is non atomic. Taking care of this
  641. * corner case makes code very complicated, like sending IPIs to
  642. * cpus, taking care of stats of offline cpus etc.
  643. *
  644. * reset stats is anyway more of a debug feature and this sounds a
  645. * corner case. So I am not complicating the code yet until and
  646. * unless this becomes a real issue.
  647. */
  648. for_each_possible_cpu(i) {
  649. stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
  650. stats_cpu->sectors = 0;
  651. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  652. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  653. stats_cpu->stat_arr_cpu[j][k] = 0;
  654. }
  655. }
  656. static int
  657. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  658. {
  659. struct blkio_cgroup *blkcg;
  660. struct blkio_group *blkg;
  661. struct blkio_group_stats *stats;
  662. struct hlist_node *n;
  663. uint64_t queued[BLKIO_STAT_TOTAL];
  664. int i;
  665. #ifdef CONFIG_DEBUG_BLK_CGROUP
  666. bool idling, waiting, empty;
  667. unsigned long long now = sched_clock();
  668. #endif
  669. blkcg = cgroup_to_blkio_cgroup(cgroup);
  670. spin_lock(&blkio_list_lock);
  671. spin_lock_irq(&blkcg->lock);
  672. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  673. struct blkio_policy_type *pol;
  674. list_for_each_entry(pol, &blkio_list, list) {
  675. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  676. spin_lock(&blkg->stats_lock);
  677. stats = &pd->stats;
  678. #ifdef CONFIG_DEBUG_BLK_CGROUP
  679. idling = blkio_blkg_idling(stats);
  680. waiting = blkio_blkg_waiting(stats);
  681. empty = blkio_blkg_empty(stats);
  682. #endif
  683. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  684. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  685. memset(stats, 0, sizeof(struct blkio_group_stats));
  686. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  687. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  688. #ifdef CONFIG_DEBUG_BLK_CGROUP
  689. if (idling) {
  690. blkio_mark_blkg_idling(stats);
  691. stats->start_idle_time = now;
  692. }
  693. if (waiting) {
  694. blkio_mark_blkg_waiting(stats);
  695. stats->start_group_wait_time = now;
  696. }
  697. if (empty) {
  698. blkio_mark_blkg_empty(stats);
  699. stats->start_empty_time = now;
  700. }
  701. #endif
  702. spin_unlock(&blkg->stats_lock);
  703. /* Reset Per cpu stats which don't take blkg->stats_lock */
  704. blkio_reset_stats_cpu(blkg, pol->plid);
  705. }
  706. }
  707. spin_unlock_irq(&blkcg->lock);
  708. spin_unlock(&blkio_list_lock);
  709. return 0;
  710. }
  711. static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
  712. char *str, int chars_left, bool diskname_only)
  713. {
  714. snprintf(str, chars_left, "%s", dname);
  715. chars_left -= strlen(str);
  716. if (chars_left <= 0) {
  717. printk(KERN_WARNING
  718. "Possibly incorrect cgroup stat display format");
  719. return;
  720. }
  721. if (diskname_only)
  722. return;
  723. switch (type) {
  724. case BLKIO_STAT_READ:
  725. strlcat(str, " Read", chars_left);
  726. break;
  727. case BLKIO_STAT_WRITE:
  728. strlcat(str, " Write", chars_left);
  729. break;
  730. case BLKIO_STAT_SYNC:
  731. strlcat(str, " Sync", chars_left);
  732. break;
  733. case BLKIO_STAT_ASYNC:
  734. strlcat(str, " Async", chars_left);
  735. break;
  736. case BLKIO_STAT_TOTAL:
  737. strlcat(str, " Total", chars_left);
  738. break;
  739. default:
  740. strlcat(str, " Invalid", chars_left);
  741. }
  742. }
  743. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  744. struct cgroup_map_cb *cb, const char *dname)
  745. {
  746. blkio_get_key_name(0, dname, str, chars_left, true);
  747. cb->fill(cb, str, val);
  748. return val;
  749. }
  750. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
  751. enum stat_type_cpu type, enum stat_sub_type sub_type)
  752. {
  753. struct blkg_policy_data *pd = blkg->pd[plid];
  754. int cpu;
  755. struct blkio_group_stats_cpu *stats_cpu;
  756. u64 val = 0, tval;
  757. for_each_possible_cpu(cpu) {
  758. unsigned int start;
  759. stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
  760. do {
  761. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  762. if (type == BLKIO_STAT_CPU_SECTORS)
  763. tval = stats_cpu->sectors;
  764. else
  765. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  766. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  767. val += tval;
  768. }
  769. return val;
  770. }
  771. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
  772. struct cgroup_map_cb *cb, const char *dname,
  773. enum stat_type_cpu type)
  774. {
  775. uint64_t disk_total, val;
  776. char key_str[MAX_KEY_LEN];
  777. enum stat_sub_type sub_type;
  778. if (type == BLKIO_STAT_CPU_SECTORS) {
  779. val = blkio_read_stat_cpu(blkg, plid, type, 0);
  780. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
  781. dname);
  782. }
  783. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  784. sub_type++) {
  785. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  786. false);
  787. val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
  788. cb->fill(cb, key_str, val);
  789. }
  790. disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
  791. blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
  792. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  793. false);
  794. cb->fill(cb, key_str, disk_total);
  795. return disk_total;
  796. }
  797. /* This should be called with blkg->stats_lock held */
  798. static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
  799. struct cgroup_map_cb *cb, const char *dname,
  800. enum stat_type type)
  801. {
  802. struct blkg_policy_data *pd = blkg->pd[plid];
  803. uint64_t disk_total;
  804. char key_str[MAX_KEY_LEN];
  805. enum stat_sub_type sub_type;
  806. if (type == BLKIO_STAT_TIME)
  807. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  808. pd->stats.time, cb, dname);
  809. #ifdef CONFIG_DEBUG_BLK_CGROUP
  810. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  811. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  812. pd->stats.unaccounted_time, cb, dname);
  813. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  814. uint64_t sum = pd->stats.avg_queue_size_sum;
  815. uint64_t samples = pd->stats.avg_queue_size_samples;
  816. if (samples)
  817. do_div(sum, samples);
  818. else
  819. sum = 0;
  820. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  821. sum, cb, dname);
  822. }
  823. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  824. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  825. pd->stats.group_wait_time, cb, dname);
  826. if (type == BLKIO_STAT_IDLE_TIME)
  827. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  828. pd->stats.idle_time, cb, dname);
  829. if (type == BLKIO_STAT_EMPTY_TIME)
  830. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  831. pd->stats.empty_time, cb, dname);
  832. if (type == BLKIO_STAT_DEQUEUE)
  833. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  834. pd->stats.dequeue, cb, dname);
  835. #endif
  836. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  837. sub_type++) {
  838. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  839. false);
  840. cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]);
  841. }
  842. disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] +
  843. pd->stats.stat_arr[type][BLKIO_STAT_WRITE];
  844. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  845. false);
  846. cb->fill(cb, key_str, disk_total);
  847. return disk_total;
  848. }
  849. static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
  850. int fileid, struct blkio_cgroup *blkcg)
  851. {
  852. struct gendisk *disk = NULL;
  853. struct blkio_group *blkg = NULL;
  854. struct blkg_policy_data *pd;
  855. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  856. unsigned long major, minor;
  857. int i = 0, ret = -EINVAL;
  858. int part;
  859. dev_t dev;
  860. u64 temp;
  861. memset(s, 0, sizeof(s));
  862. while ((p = strsep(&buf, " ")) != NULL) {
  863. if (!*p)
  864. continue;
  865. s[i++] = p;
  866. /* Prevent from inputing too many things */
  867. if (i == 3)
  868. break;
  869. }
  870. if (i != 2)
  871. goto out;
  872. p = strsep(&s[0], ":");
  873. if (p != NULL)
  874. major_s = p;
  875. else
  876. goto out;
  877. minor_s = s[0];
  878. if (!minor_s)
  879. goto out;
  880. if (strict_strtoul(major_s, 10, &major))
  881. goto out;
  882. if (strict_strtoul(minor_s, 10, &minor))
  883. goto out;
  884. dev = MKDEV(major, minor);
  885. if (strict_strtoull(s[1], 10, &temp))
  886. goto out;
  887. disk = get_gendisk(dev, &part);
  888. if (!disk || part)
  889. goto out;
  890. rcu_read_lock();
  891. spin_lock_irq(disk->queue->queue_lock);
  892. blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
  893. spin_unlock_irq(disk->queue->queue_lock);
  894. if (IS_ERR(blkg)) {
  895. ret = PTR_ERR(blkg);
  896. goto out_unlock;
  897. }
  898. pd = blkg->pd[plid];
  899. switch (plid) {
  900. case BLKIO_POLICY_PROP:
  901. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  902. temp > BLKIO_WEIGHT_MAX)
  903. goto out_unlock;
  904. pd->conf.weight = temp;
  905. blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
  906. break;
  907. case BLKIO_POLICY_THROTL:
  908. switch(fileid) {
  909. case BLKIO_THROTL_read_bps_device:
  910. pd->conf.bps[READ] = temp;
  911. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  912. break;
  913. case BLKIO_THROTL_write_bps_device:
  914. pd->conf.bps[WRITE] = temp;
  915. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  916. break;
  917. case BLKIO_THROTL_read_iops_device:
  918. if (temp > THROTL_IOPS_MAX)
  919. goto out_unlock;
  920. pd->conf.iops[READ] = temp;
  921. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  922. break;
  923. case BLKIO_THROTL_write_iops_device:
  924. if (temp > THROTL_IOPS_MAX)
  925. goto out_unlock;
  926. pd->conf.iops[WRITE] = temp;
  927. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  928. break;
  929. }
  930. break;
  931. default:
  932. BUG();
  933. }
  934. ret = 0;
  935. out_unlock:
  936. rcu_read_unlock();
  937. out:
  938. put_disk(disk);
  939. /*
  940. * If queue was bypassing, we should retry. Do so after a short
  941. * msleep(). It isn't strictly necessary but queue can be
  942. * bypassing for some time and it's always nice to avoid busy
  943. * looping.
  944. */
  945. if (ret == -EBUSY) {
  946. msleep(10);
  947. return restart_syscall();
  948. }
  949. return ret;
  950. }
  951. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  952. const char *buffer)
  953. {
  954. int ret = 0;
  955. char *buf;
  956. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  957. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  958. int fileid = BLKIOFILE_ATTR(cft->private);
  959. buf = kstrdup(buffer, GFP_KERNEL);
  960. if (!buf)
  961. return -ENOMEM;
  962. ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
  963. kfree(buf);
  964. return ret;
  965. }
  966. static const char *blkg_dev_name(struct blkio_group *blkg)
  967. {
  968. /* some drivers (floppy) instantiate a queue w/o disk registered */
  969. if (blkg->q->backing_dev_info.dev)
  970. return dev_name(blkg->q->backing_dev_info.dev);
  971. return NULL;
  972. }
  973. static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
  974. struct seq_file *m)
  975. {
  976. int plid = BLKIOFILE_POLICY(cft->private);
  977. int fileid = BLKIOFILE_ATTR(cft->private);
  978. struct blkg_policy_data *pd = blkg->pd[plid];
  979. const char *dname = blkg_dev_name(blkg);
  980. int rw = WRITE;
  981. if (!dname)
  982. return;
  983. switch (plid) {
  984. case BLKIO_POLICY_PROP:
  985. if (pd->conf.weight)
  986. seq_printf(m, "%s\t%u\n",
  987. dname, pd->conf.weight);
  988. break;
  989. case BLKIO_POLICY_THROTL:
  990. switch (fileid) {
  991. case BLKIO_THROTL_read_bps_device:
  992. rw = READ;
  993. case BLKIO_THROTL_write_bps_device:
  994. if (pd->conf.bps[rw])
  995. seq_printf(m, "%s\t%llu\n",
  996. dname, pd->conf.bps[rw]);
  997. break;
  998. case BLKIO_THROTL_read_iops_device:
  999. rw = READ;
  1000. case BLKIO_THROTL_write_iops_device:
  1001. if (pd->conf.iops[rw])
  1002. seq_printf(m, "%s\t%u\n",
  1003. dname, pd->conf.iops[rw]);
  1004. break;
  1005. }
  1006. break;
  1007. default:
  1008. BUG();
  1009. }
  1010. }
  1011. /* cgroup files which read their data from policy nodes end up here */
  1012. static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
  1013. struct seq_file *m)
  1014. {
  1015. struct blkio_group *blkg;
  1016. struct hlist_node *n;
  1017. spin_lock_irq(&blkcg->lock);
  1018. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  1019. blkio_print_group_conf(cft, blkg, m);
  1020. spin_unlock_irq(&blkcg->lock);
  1021. }
  1022. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  1023. struct seq_file *m)
  1024. {
  1025. struct blkio_cgroup *blkcg;
  1026. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1027. int name = BLKIOFILE_ATTR(cft->private);
  1028. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1029. switch(plid) {
  1030. case BLKIO_POLICY_PROP:
  1031. switch(name) {
  1032. case BLKIO_PROP_weight_device:
  1033. blkio_read_conf(cft, blkcg, m);
  1034. return 0;
  1035. default:
  1036. BUG();
  1037. }
  1038. break;
  1039. case BLKIO_POLICY_THROTL:
  1040. switch(name){
  1041. case BLKIO_THROTL_read_bps_device:
  1042. case BLKIO_THROTL_write_bps_device:
  1043. case BLKIO_THROTL_read_iops_device:
  1044. case BLKIO_THROTL_write_iops_device:
  1045. blkio_read_conf(cft, blkcg, m);
  1046. return 0;
  1047. default:
  1048. BUG();
  1049. }
  1050. break;
  1051. default:
  1052. BUG();
  1053. }
  1054. return 0;
  1055. }
  1056. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1057. struct cftype *cft, struct cgroup_map_cb *cb,
  1058. enum stat_type type, bool show_total, bool pcpu)
  1059. {
  1060. struct blkio_group *blkg;
  1061. struct hlist_node *n;
  1062. uint64_t cgroup_total = 0;
  1063. rcu_read_lock();
  1064. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1065. const char *dname = blkg_dev_name(blkg);
  1066. int plid = BLKIOFILE_POLICY(cft->private);
  1067. if (!dname)
  1068. continue;
  1069. if (pcpu) {
  1070. cgroup_total += blkio_get_stat_cpu(blkg, plid,
  1071. cb, dname, type);
  1072. } else {
  1073. spin_lock_irq(&blkg->stats_lock);
  1074. cgroup_total += blkio_get_stat(blkg, plid,
  1075. cb, dname, type);
  1076. spin_unlock_irq(&blkg->stats_lock);
  1077. }
  1078. }
  1079. if (show_total)
  1080. cb->fill(cb, "Total", cgroup_total);
  1081. rcu_read_unlock();
  1082. return 0;
  1083. }
  1084. /* All map kind of cgroup file get serviced by this function */
  1085. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1086. struct cgroup_map_cb *cb)
  1087. {
  1088. struct blkio_cgroup *blkcg;
  1089. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1090. int name = BLKIOFILE_ATTR(cft->private);
  1091. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1092. switch(plid) {
  1093. case BLKIO_POLICY_PROP:
  1094. switch(name) {
  1095. case BLKIO_PROP_time:
  1096. return blkio_read_blkg_stats(blkcg, cft, cb,
  1097. BLKIO_STAT_TIME, 0, 0);
  1098. case BLKIO_PROP_sectors:
  1099. return blkio_read_blkg_stats(blkcg, cft, cb,
  1100. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1101. case BLKIO_PROP_io_service_bytes:
  1102. return blkio_read_blkg_stats(blkcg, cft, cb,
  1103. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1104. case BLKIO_PROP_io_serviced:
  1105. return blkio_read_blkg_stats(blkcg, cft, cb,
  1106. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1107. case BLKIO_PROP_io_service_time:
  1108. return blkio_read_blkg_stats(blkcg, cft, cb,
  1109. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1110. case BLKIO_PROP_io_wait_time:
  1111. return blkio_read_blkg_stats(blkcg, cft, cb,
  1112. BLKIO_STAT_WAIT_TIME, 1, 0);
  1113. case BLKIO_PROP_io_merged:
  1114. return blkio_read_blkg_stats(blkcg, cft, cb,
  1115. BLKIO_STAT_CPU_MERGED, 1, 1);
  1116. case BLKIO_PROP_io_queued:
  1117. return blkio_read_blkg_stats(blkcg, cft, cb,
  1118. BLKIO_STAT_QUEUED, 1, 0);
  1119. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1120. case BLKIO_PROP_unaccounted_time:
  1121. return blkio_read_blkg_stats(blkcg, cft, cb,
  1122. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1123. case BLKIO_PROP_dequeue:
  1124. return blkio_read_blkg_stats(blkcg, cft, cb,
  1125. BLKIO_STAT_DEQUEUE, 0, 0);
  1126. case BLKIO_PROP_avg_queue_size:
  1127. return blkio_read_blkg_stats(blkcg, cft, cb,
  1128. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1129. case BLKIO_PROP_group_wait_time:
  1130. return blkio_read_blkg_stats(blkcg, cft, cb,
  1131. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1132. case BLKIO_PROP_idle_time:
  1133. return blkio_read_blkg_stats(blkcg, cft, cb,
  1134. BLKIO_STAT_IDLE_TIME, 0, 0);
  1135. case BLKIO_PROP_empty_time:
  1136. return blkio_read_blkg_stats(blkcg, cft, cb,
  1137. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1138. #endif
  1139. default:
  1140. BUG();
  1141. }
  1142. break;
  1143. case BLKIO_POLICY_THROTL:
  1144. switch(name){
  1145. case BLKIO_THROTL_io_service_bytes:
  1146. return blkio_read_blkg_stats(blkcg, cft, cb,
  1147. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1148. case BLKIO_THROTL_io_serviced:
  1149. return blkio_read_blkg_stats(blkcg, cft, cb,
  1150. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1151. default:
  1152. BUG();
  1153. }
  1154. break;
  1155. default:
  1156. BUG();
  1157. }
  1158. return 0;
  1159. }
  1160. static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
  1161. {
  1162. struct blkio_group *blkg;
  1163. struct hlist_node *n;
  1164. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1165. return -EINVAL;
  1166. spin_lock(&blkio_list_lock);
  1167. spin_lock_irq(&blkcg->lock);
  1168. blkcg->weight = (unsigned int)val;
  1169. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1170. struct blkg_policy_data *pd = blkg->pd[plid];
  1171. if (!pd->conf.weight)
  1172. blkio_update_group_weight(blkg, plid, blkcg->weight);
  1173. }
  1174. spin_unlock_irq(&blkcg->lock);
  1175. spin_unlock(&blkio_list_lock);
  1176. return 0;
  1177. }
  1178. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1179. struct blkio_cgroup *blkcg;
  1180. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1181. int name = BLKIOFILE_ATTR(cft->private);
  1182. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1183. switch(plid) {
  1184. case BLKIO_POLICY_PROP:
  1185. switch(name) {
  1186. case BLKIO_PROP_weight:
  1187. return (u64)blkcg->weight;
  1188. }
  1189. break;
  1190. default:
  1191. BUG();
  1192. }
  1193. return 0;
  1194. }
  1195. static int
  1196. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1197. {
  1198. struct blkio_cgroup *blkcg;
  1199. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1200. int name = BLKIOFILE_ATTR(cft->private);
  1201. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1202. switch(plid) {
  1203. case BLKIO_POLICY_PROP:
  1204. switch(name) {
  1205. case BLKIO_PROP_weight:
  1206. return blkio_weight_write(blkcg, plid, val);
  1207. }
  1208. break;
  1209. default:
  1210. BUG();
  1211. }
  1212. return 0;
  1213. }
  1214. struct cftype blkio_files[] = {
  1215. {
  1216. .name = "weight_device",
  1217. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1218. BLKIO_PROP_weight_device),
  1219. .read_seq_string = blkiocg_file_read,
  1220. .write_string = blkiocg_file_write,
  1221. .max_write_len = 256,
  1222. },
  1223. {
  1224. .name = "weight",
  1225. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1226. BLKIO_PROP_weight),
  1227. .read_u64 = blkiocg_file_read_u64,
  1228. .write_u64 = blkiocg_file_write_u64,
  1229. },
  1230. {
  1231. .name = "time",
  1232. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1233. BLKIO_PROP_time),
  1234. .read_map = blkiocg_file_read_map,
  1235. },
  1236. {
  1237. .name = "sectors",
  1238. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1239. BLKIO_PROP_sectors),
  1240. .read_map = blkiocg_file_read_map,
  1241. },
  1242. {
  1243. .name = "io_service_bytes",
  1244. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1245. BLKIO_PROP_io_service_bytes),
  1246. .read_map = blkiocg_file_read_map,
  1247. },
  1248. {
  1249. .name = "io_serviced",
  1250. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1251. BLKIO_PROP_io_serviced),
  1252. .read_map = blkiocg_file_read_map,
  1253. },
  1254. {
  1255. .name = "io_service_time",
  1256. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1257. BLKIO_PROP_io_service_time),
  1258. .read_map = blkiocg_file_read_map,
  1259. },
  1260. {
  1261. .name = "io_wait_time",
  1262. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1263. BLKIO_PROP_io_wait_time),
  1264. .read_map = blkiocg_file_read_map,
  1265. },
  1266. {
  1267. .name = "io_merged",
  1268. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1269. BLKIO_PROP_io_merged),
  1270. .read_map = blkiocg_file_read_map,
  1271. },
  1272. {
  1273. .name = "io_queued",
  1274. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1275. BLKIO_PROP_io_queued),
  1276. .read_map = blkiocg_file_read_map,
  1277. },
  1278. {
  1279. .name = "reset_stats",
  1280. .write_u64 = blkiocg_reset_stats,
  1281. },
  1282. #ifdef CONFIG_BLK_DEV_THROTTLING
  1283. {
  1284. .name = "throttle.read_bps_device",
  1285. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1286. BLKIO_THROTL_read_bps_device),
  1287. .read_seq_string = blkiocg_file_read,
  1288. .write_string = blkiocg_file_write,
  1289. .max_write_len = 256,
  1290. },
  1291. {
  1292. .name = "throttle.write_bps_device",
  1293. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1294. BLKIO_THROTL_write_bps_device),
  1295. .read_seq_string = blkiocg_file_read,
  1296. .write_string = blkiocg_file_write,
  1297. .max_write_len = 256,
  1298. },
  1299. {
  1300. .name = "throttle.read_iops_device",
  1301. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1302. BLKIO_THROTL_read_iops_device),
  1303. .read_seq_string = blkiocg_file_read,
  1304. .write_string = blkiocg_file_write,
  1305. .max_write_len = 256,
  1306. },
  1307. {
  1308. .name = "throttle.write_iops_device",
  1309. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1310. BLKIO_THROTL_write_iops_device),
  1311. .read_seq_string = blkiocg_file_read,
  1312. .write_string = blkiocg_file_write,
  1313. .max_write_len = 256,
  1314. },
  1315. {
  1316. .name = "throttle.io_service_bytes",
  1317. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1318. BLKIO_THROTL_io_service_bytes),
  1319. .read_map = blkiocg_file_read_map,
  1320. },
  1321. {
  1322. .name = "throttle.io_serviced",
  1323. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1324. BLKIO_THROTL_io_serviced),
  1325. .read_map = blkiocg_file_read_map,
  1326. },
  1327. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1328. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1329. {
  1330. .name = "avg_queue_size",
  1331. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1332. BLKIO_PROP_avg_queue_size),
  1333. .read_map = blkiocg_file_read_map,
  1334. },
  1335. {
  1336. .name = "group_wait_time",
  1337. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1338. BLKIO_PROP_group_wait_time),
  1339. .read_map = blkiocg_file_read_map,
  1340. },
  1341. {
  1342. .name = "idle_time",
  1343. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1344. BLKIO_PROP_idle_time),
  1345. .read_map = blkiocg_file_read_map,
  1346. },
  1347. {
  1348. .name = "empty_time",
  1349. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1350. BLKIO_PROP_empty_time),
  1351. .read_map = blkiocg_file_read_map,
  1352. },
  1353. {
  1354. .name = "dequeue",
  1355. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1356. BLKIO_PROP_dequeue),
  1357. .read_map = blkiocg_file_read_map,
  1358. },
  1359. {
  1360. .name = "unaccounted_time",
  1361. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1362. BLKIO_PROP_unaccounted_time),
  1363. .read_map = blkiocg_file_read_map,
  1364. },
  1365. #endif
  1366. };
  1367. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1368. {
  1369. return cgroup_add_files(cgroup, subsys, blkio_files,
  1370. ARRAY_SIZE(blkio_files));
  1371. }
  1372. /**
  1373. * blkiocg_pre_destroy - cgroup pre_destroy callback
  1374. * @subsys: cgroup subsys
  1375. * @cgroup: cgroup of interest
  1376. *
  1377. * This function is called when @cgroup is about to go away and responsible
  1378. * for shooting down all blkgs associated with @cgroup. blkgs should be
  1379. * removed while holding both q and blkcg locks. As blkcg lock is nested
  1380. * inside q lock, this function performs reverse double lock dancing.
  1381. *
  1382. * This is the blkcg counterpart of ioc_release_fn().
  1383. */
  1384. static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
  1385. struct cgroup *cgroup)
  1386. {
  1387. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1388. rcu_read_lock();
  1389. spin_lock_irq(&blkcg->lock);
  1390. while (!hlist_empty(&blkcg->blkg_list)) {
  1391. struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
  1392. struct blkio_group, blkcg_node);
  1393. struct request_queue *q = rcu_dereference(blkg->q);
  1394. if (spin_trylock(q->queue_lock)) {
  1395. blkg_destroy(blkg);
  1396. spin_unlock(q->queue_lock);
  1397. } else {
  1398. spin_unlock_irq(&blkcg->lock);
  1399. rcu_read_unlock();
  1400. cpu_relax();
  1401. rcu_read_lock();
  1402. spin_lock(&blkcg->lock);
  1403. }
  1404. }
  1405. spin_unlock_irq(&blkcg->lock);
  1406. rcu_read_unlock();
  1407. return 0;
  1408. }
  1409. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1410. {
  1411. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1412. if (blkcg != &blkio_root_cgroup)
  1413. kfree(blkcg);
  1414. }
  1415. static struct cgroup_subsys_state *
  1416. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1417. {
  1418. struct blkio_cgroup *blkcg;
  1419. struct cgroup *parent = cgroup->parent;
  1420. if (!parent) {
  1421. blkcg = &blkio_root_cgroup;
  1422. goto done;
  1423. }
  1424. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1425. if (!blkcg)
  1426. return ERR_PTR(-ENOMEM);
  1427. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1428. done:
  1429. spin_lock_init(&blkcg->lock);
  1430. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1431. return &blkcg->css;
  1432. }
  1433. /**
  1434. * blkcg_init_queue - initialize blkcg part of request queue
  1435. * @q: request_queue to initialize
  1436. *
  1437. * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  1438. * part of new request_queue @q.
  1439. *
  1440. * RETURNS:
  1441. * 0 on success, -errno on failure.
  1442. */
  1443. int blkcg_init_queue(struct request_queue *q)
  1444. {
  1445. int ret;
  1446. might_sleep();
  1447. ret = blk_throtl_init(q);
  1448. if (ret)
  1449. return ret;
  1450. mutex_lock(&all_q_mutex);
  1451. INIT_LIST_HEAD(&q->all_q_node);
  1452. list_add_tail(&q->all_q_node, &all_q_list);
  1453. mutex_unlock(&all_q_mutex);
  1454. return 0;
  1455. }
  1456. /**
  1457. * blkcg_drain_queue - drain blkcg part of request_queue
  1458. * @q: request_queue to drain
  1459. *
  1460. * Called from blk_drain_queue(). Responsible for draining blkcg part.
  1461. */
  1462. void blkcg_drain_queue(struct request_queue *q)
  1463. {
  1464. lockdep_assert_held(q->queue_lock);
  1465. blk_throtl_drain(q);
  1466. }
  1467. /**
  1468. * blkcg_exit_queue - exit and release blkcg part of request_queue
  1469. * @q: request_queue being released
  1470. *
  1471. * Called from blk_release_queue(). Responsible for exiting blkcg part.
  1472. */
  1473. void blkcg_exit_queue(struct request_queue *q)
  1474. {
  1475. mutex_lock(&all_q_mutex);
  1476. list_del_init(&q->all_q_node);
  1477. mutex_unlock(&all_q_mutex);
  1478. blkg_destroy_all(q, true);
  1479. blk_throtl_exit(q);
  1480. }
  1481. /*
  1482. * We cannot support shared io contexts, as we have no mean to support
  1483. * two tasks with the same ioc in two different groups without major rework
  1484. * of the main cic data structures. For now we allow a task to change
  1485. * its cgroup only if it's the only owner of its ioc.
  1486. */
  1487. static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1488. struct cgroup_taskset *tset)
  1489. {
  1490. struct task_struct *task;
  1491. struct io_context *ioc;
  1492. int ret = 0;
  1493. /* task_lock() is needed to avoid races with exit_io_context() */
  1494. cgroup_taskset_for_each(task, cgrp, tset) {
  1495. task_lock(task);
  1496. ioc = task->io_context;
  1497. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1498. ret = -EINVAL;
  1499. task_unlock(task);
  1500. if (ret)
  1501. break;
  1502. }
  1503. return ret;
  1504. }
  1505. static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1506. struct cgroup_taskset *tset)
  1507. {
  1508. struct task_struct *task;
  1509. struct io_context *ioc;
  1510. cgroup_taskset_for_each(task, cgrp, tset) {
  1511. /* we don't lose anything even if ioc allocation fails */
  1512. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1513. if (ioc) {
  1514. ioc_cgroup_changed(ioc);
  1515. put_io_context(ioc);
  1516. }
  1517. }
  1518. }
  1519. static void blkcg_bypass_start(void)
  1520. __acquires(&all_q_mutex)
  1521. {
  1522. struct request_queue *q;
  1523. mutex_lock(&all_q_mutex);
  1524. list_for_each_entry(q, &all_q_list, all_q_node) {
  1525. blk_queue_bypass_start(q);
  1526. blkg_destroy_all(q, false);
  1527. }
  1528. }
  1529. static void blkcg_bypass_end(void)
  1530. __releases(&all_q_mutex)
  1531. {
  1532. struct request_queue *q;
  1533. list_for_each_entry(q, &all_q_list, all_q_node)
  1534. blk_queue_bypass_end(q);
  1535. mutex_unlock(&all_q_mutex);
  1536. }
  1537. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1538. {
  1539. struct request_queue *q;
  1540. blkcg_bypass_start();
  1541. spin_lock(&blkio_list_lock);
  1542. BUG_ON(blkio_policy[blkiop->plid]);
  1543. blkio_policy[blkiop->plid] = blkiop;
  1544. list_add_tail(&blkiop->list, &blkio_list);
  1545. spin_unlock(&blkio_list_lock);
  1546. list_for_each_entry(q, &all_q_list, all_q_node)
  1547. update_root_blkg_pd(q, blkiop->plid);
  1548. blkcg_bypass_end();
  1549. }
  1550. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1551. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1552. {
  1553. struct request_queue *q;
  1554. blkcg_bypass_start();
  1555. spin_lock(&blkio_list_lock);
  1556. BUG_ON(blkio_policy[blkiop->plid] != blkiop);
  1557. blkio_policy[blkiop->plid] = NULL;
  1558. list_del_init(&blkiop->list);
  1559. spin_unlock(&blkio_list_lock);
  1560. list_for_each_entry(q, &all_q_list, all_q_node)
  1561. update_root_blkg_pd(q, blkiop->plid);
  1562. blkcg_bypass_end();
  1563. }
  1564. EXPORT_SYMBOL_GPL(blkio_policy_unregister);