blk-cgroup.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include "blk-cgroup.h"
  23. #include "blk.h"
  24. #define MAX_KEY_LEN 100
  25. static DEFINE_SPINLOCK(blkio_list_lock);
  26. static LIST_HEAD(blkio_list);
  27. static DEFINE_MUTEX(all_q_mutex);
  28. static LIST_HEAD(all_q_list);
  29. /* List of groups pending per cpu stats allocation */
  30. static DEFINE_SPINLOCK(alloc_list_lock);
  31. static LIST_HEAD(alloc_list);
  32. static void blkio_stat_alloc_fn(struct work_struct *);
  33. static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
  34. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  35. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  36. static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
  37. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  38. struct cgroup *);
  39. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  40. struct cgroup_taskset *);
  41. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  42. struct cgroup_taskset *);
  43. static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
  44. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  45. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  46. /* for encoding cft->private value on file */
  47. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  48. /* What policy owns the file, proportional or throttle */
  49. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  50. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  51. struct cgroup_subsys blkio_subsys = {
  52. .name = "blkio",
  53. .create = blkiocg_create,
  54. .can_attach = blkiocg_can_attach,
  55. .attach = blkiocg_attach,
  56. .pre_destroy = blkiocg_pre_destroy,
  57. .destroy = blkiocg_destroy,
  58. .populate = blkiocg_populate,
  59. .subsys_id = blkio_subsys_id,
  60. .module = THIS_MODULE,
  61. };
  62. EXPORT_SYMBOL_GPL(blkio_subsys);
  63. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  64. {
  65. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  66. struct blkio_cgroup, css);
  67. }
  68. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  69. static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  70. {
  71. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  72. struct blkio_cgroup, css);
  73. }
  74. struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
  75. {
  76. if (bio && bio->bi_css)
  77. return container_of(bio->bi_css, struct blkio_cgroup, css);
  78. return task_blkio_cgroup(current);
  79. }
  80. EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
  81. static inline void blkio_update_group_weight(struct blkio_group *blkg,
  82. int plid, unsigned int weight)
  83. {
  84. struct blkio_policy_type *blkiop;
  85. list_for_each_entry(blkiop, &blkio_list, list) {
  86. /* If this policy does not own the blkg, do not send updates */
  87. if (blkiop->plid != plid)
  88. continue;
  89. if (blkiop->ops.blkio_update_group_weight_fn)
  90. blkiop->ops.blkio_update_group_weight_fn(blkg->q,
  91. blkg, weight);
  92. }
  93. }
  94. static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
  95. u64 bps, int fileid)
  96. {
  97. struct blkio_policy_type *blkiop;
  98. list_for_each_entry(blkiop, &blkio_list, list) {
  99. /* If this policy does not own the blkg, do not send updates */
  100. if (blkiop->plid != plid)
  101. continue;
  102. if (fileid == BLKIO_THROTL_read_bps_device
  103. && blkiop->ops.blkio_update_group_read_bps_fn)
  104. blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
  105. blkg, bps);
  106. if (fileid == BLKIO_THROTL_write_bps_device
  107. && blkiop->ops.blkio_update_group_write_bps_fn)
  108. blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
  109. blkg, bps);
  110. }
  111. }
  112. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  113. int plid, unsigned int iops,
  114. int fileid)
  115. {
  116. struct blkio_policy_type *blkiop;
  117. list_for_each_entry(blkiop, &blkio_list, list) {
  118. /* If this policy does not own the blkg, do not send updates */
  119. if (blkiop->plid != plid)
  120. continue;
  121. if (fileid == BLKIO_THROTL_read_iops_device
  122. && blkiop->ops.blkio_update_group_read_iops_fn)
  123. blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
  124. blkg, iops);
  125. if (fileid == BLKIO_THROTL_write_iops_device
  126. && blkiop->ops.blkio_update_group_write_iops_fn)
  127. blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
  128. blkg,iops);
  129. }
  130. }
  131. /*
  132. * Add to the appropriate stat variable depending on the request type.
  133. * This should be called with the blkg->stats_lock held.
  134. */
  135. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  136. bool sync)
  137. {
  138. if (direction)
  139. stat[BLKIO_STAT_WRITE] += add;
  140. else
  141. stat[BLKIO_STAT_READ] += add;
  142. if (sync)
  143. stat[BLKIO_STAT_SYNC] += add;
  144. else
  145. stat[BLKIO_STAT_ASYNC] += add;
  146. }
  147. /*
  148. * Decrements the appropriate stat variable if non-zero depending on the
  149. * request type. Panics on value being zero.
  150. * This should be called with the blkg->stats_lock held.
  151. */
  152. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  153. {
  154. if (direction) {
  155. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  156. stat[BLKIO_STAT_WRITE]--;
  157. } else {
  158. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  159. stat[BLKIO_STAT_READ]--;
  160. }
  161. if (sync) {
  162. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  163. stat[BLKIO_STAT_SYNC]--;
  164. } else {
  165. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  166. stat[BLKIO_STAT_ASYNC]--;
  167. }
  168. }
  169. #ifdef CONFIG_DEBUG_BLK_CGROUP
  170. /* This should be called with the blkg->stats_lock held. */
  171. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  172. struct blkio_policy_type *pol,
  173. struct blkio_group *curr_blkg)
  174. {
  175. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  176. if (blkio_blkg_waiting(&pd->stats))
  177. return;
  178. if (blkg == curr_blkg)
  179. return;
  180. pd->stats.start_group_wait_time = sched_clock();
  181. blkio_mark_blkg_waiting(&pd->stats);
  182. }
  183. /* This should be called with the blkg->stats_lock held. */
  184. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  185. {
  186. unsigned long long now;
  187. if (!blkio_blkg_waiting(stats))
  188. return;
  189. now = sched_clock();
  190. if (time_after64(now, stats->start_group_wait_time))
  191. stats->group_wait_time += now - stats->start_group_wait_time;
  192. blkio_clear_blkg_waiting(stats);
  193. }
  194. /* This should be called with the blkg->stats_lock held. */
  195. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  196. {
  197. unsigned long long now;
  198. if (!blkio_blkg_empty(stats))
  199. return;
  200. now = sched_clock();
  201. if (time_after64(now, stats->start_empty_time))
  202. stats->empty_time += now - stats->start_empty_time;
  203. blkio_clear_blkg_empty(stats);
  204. }
  205. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
  206. struct blkio_policy_type *pol)
  207. {
  208. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  209. unsigned long flags;
  210. spin_lock_irqsave(&blkg->stats_lock, flags);
  211. BUG_ON(blkio_blkg_idling(&pd->stats));
  212. pd->stats.start_idle_time = sched_clock();
  213. blkio_mark_blkg_idling(&pd->stats);
  214. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  215. }
  216. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  217. void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
  218. struct blkio_policy_type *pol)
  219. {
  220. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  221. unsigned long flags;
  222. unsigned long long now;
  223. struct blkio_group_stats *stats;
  224. spin_lock_irqsave(&blkg->stats_lock, flags);
  225. stats = &pd->stats;
  226. if (blkio_blkg_idling(stats)) {
  227. now = sched_clock();
  228. if (time_after64(now, stats->start_idle_time))
  229. stats->idle_time += now - stats->start_idle_time;
  230. blkio_clear_blkg_idling(stats);
  231. }
  232. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  233. }
  234. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  235. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
  236. struct blkio_policy_type *pol)
  237. {
  238. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  239. unsigned long flags;
  240. struct blkio_group_stats *stats;
  241. spin_lock_irqsave(&blkg->stats_lock, flags);
  242. stats = &pd->stats;
  243. stats->avg_queue_size_sum +=
  244. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  245. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  246. stats->avg_queue_size_samples++;
  247. blkio_update_group_wait_time(stats);
  248. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  249. }
  250. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  251. void blkiocg_set_start_empty_time(struct blkio_group *blkg,
  252. struct blkio_policy_type *pol)
  253. {
  254. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  255. unsigned long flags;
  256. struct blkio_group_stats *stats;
  257. spin_lock_irqsave(&blkg->stats_lock, flags);
  258. stats = &pd->stats;
  259. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  260. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  261. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  262. return;
  263. }
  264. /*
  265. * group is already marked empty. This can happen if cfqq got new
  266. * request in parent group and moved to this group while being added
  267. * to service tree. Just ignore the event and move on.
  268. */
  269. if(blkio_blkg_empty(stats)) {
  270. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  271. return;
  272. }
  273. stats->start_empty_time = sched_clock();
  274. blkio_mark_blkg_empty(stats);
  275. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  276. }
  277. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  278. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  279. struct blkio_policy_type *pol,
  280. unsigned long dequeue)
  281. {
  282. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  283. pd->stats.dequeue += dequeue;
  284. }
  285. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  286. #else
  287. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  288. struct blkio_policy_type *pol,
  289. struct blkio_group *curr_blkg) { }
  290. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
  291. #endif
  292. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  293. struct blkio_policy_type *pol,
  294. struct blkio_group *curr_blkg, bool direction,
  295. bool sync)
  296. {
  297. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  298. unsigned long flags;
  299. spin_lock_irqsave(&blkg->stats_lock, flags);
  300. blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  301. sync);
  302. blkio_end_empty_time(&pd->stats);
  303. blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
  304. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  305. }
  306. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  307. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  308. struct blkio_policy_type *pol,
  309. bool direction, bool sync)
  310. {
  311. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  312. unsigned long flags;
  313. spin_lock_irqsave(&blkg->stats_lock, flags);
  314. blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED],
  315. direction, sync);
  316. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  317. }
  318. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  319. void blkiocg_update_timeslice_used(struct blkio_group *blkg,
  320. struct blkio_policy_type *pol,
  321. unsigned long time,
  322. unsigned long unaccounted_time)
  323. {
  324. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  325. unsigned long flags;
  326. spin_lock_irqsave(&blkg->stats_lock, flags);
  327. pd->stats.time += time;
  328. #ifdef CONFIG_DEBUG_BLK_CGROUP
  329. pd->stats.unaccounted_time += unaccounted_time;
  330. #endif
  331. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  332. }
  333. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  334. /*
  335. * should be called under rcu read lock or queue lock to make sure blkg pointer
  336. * is valid.
  337. */
  338. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  339. struct blkio_policy_type *pol,
  340. uint64_t bytes, bool direction, bool sync)
  341. {
  342. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  343. struct blkio_group_stats_cpu *stats_cpu;
  344. unsigned long flags;
  345. /* If per cpu stats are not allocated yet, don't do any accounting. */
  346. if (pd->stats_cpu == NULL)
  347. return;
  348. /*
  349. * Disabling interrupts to provide mutual exclusion between two
  350. * writes on same cpu. It probably is not needed for 64bit. Not
  351. * optimizing that case yet.
  352. */
  353. local_irq_save(flags);
  354. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  355. u64_stats_update_begin(&stats_cpu->syncp);
  356. stats_cpu->sectors += bytes >> 9;
  357. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  358. 1, direction, sync);
  359. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  360. bytes, direction, sync);
  361. u64_stats_update_end(&stats_cpu->syncp);
  362. local_irq_restore(flags);
  363. }
  364. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  365. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  366. struct blkio_policy_type *pol,
  367. uint64_t start_time,
  368. uint64_t io_start_time, bool direction,
  369. bool sync)
  370. {
  371. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  372. struct blkio_group_stats *stats;
  373. unsigned long flags;
  374. unsigned long long now = sched_clock();
  375. spin_lock_irqsave(&blkg->stats_lock, flags);
  376. stats = &pd->stats;
  377. if (time_after64(now, io_start_time))
  378. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  379. now - io_start_time, direction, sync);
  380. if (time_after64(io_start_time, start_time))
  381. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  382. io_start_time - start_time, direction, sync);
  383. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  384. }
  385. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  386. /* Merged stats are per cpu. */
  387. void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  388. struct blkio_policy_type *pol,
  389. bool direction, bool sync)
  390. {
  391. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  392. struct blkio_group_stats *stats;
  393. unsigned long flags;
  394. spin_lock_irqsave(&blkg->stats_lock, flags);
  395. stats = &pd->stats;
  396. blkio_add_stat(stats->stat_arr[BLKIO_STAT_MERGED], 1, direction, sync);
  397. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  398. }
  399. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  400. /*
  401. * Worker for allocating per cpu stat for blk groups. This is scheduled on
  402. * the system_nrt_wq once there are some groups on the alloc_list waiting
  403. * for allocation.
  404. */
  405. static void blkio_stat_alloc_fn(struct work_struct *work)
  406. {
  407. static void *pcpu_stats[BLKIO_NR_POLICIES];
  408. struct delayed_work *dwork = to_delayed_work(work);
  409. struct blkio_group *blkg;
  410. int i;
  411. bool empty = false;
  412. alloc_stats:
  413. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  414. if (pcpu_stats[i] != NULL)
  415. continue;
  416. pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
  417. /* Allocation failed. Try again after some time. */
  418. if (pcpu_stats[i] == NULL) {
  419. queue_delayed_work(system_nrt_wq, dwork,
  420. msecs_to_jiffies(10));
  421. return;
  422. }
  423. }
  424. spin_lock_irq(&blkio_list_lock);
  425. spin_lock(&alloc_list_lock);
  426. /* cgroup got deleted or queue exited. */
  427. if (!list_empty(&alloc_list)) {
  428. blkg = list_first_entry(&alloc_list, struct blkio_group,
  429. alloc_node);
  430. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  431. struct blkg_policy_data *pd = blkg->pd[i];
  432. if (blkio_policy[i] && pd && !pd->stats_cpu)
  433. swap(pd->stats_cpu, pcpu_stats[i]);
  434. }
  435. list_del_init(&blkg->alloc_node);
  436. }
  437. empty = list_empty(&alloc_list);
  438. spin_unlock(&alloc_list_lock);
  439. spin_unlock_irq(&blkio_list_lock);
  440. if (!empty)
  441. goto alloc_stats;
  442. }
  443. /**
  444. * blkg_free - free a blkg
  445. * @blkg: blkg to free
  446. *
  447. * Free @blkg which may be partially allocated.
  448. */
  449. static void blkg_free(struct blkio_group *blkg)
  450. {
  451. int i;
  452. if (!blkg)
  453. return;
  454. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  455. struct blkg_policy_data *pd = blkg->pd[i];
  456. if (pd) {
  457. free_percpu(pd->stats_cpu);
  458. kfree(pd);
  459. }
  460. }
  461. kfree(blkg);
  462. }
  463. /**
  464. * blkg_alloc - allocate a blkg
  465. * @blkcg: block cgroup the new blkg is associated with
  466. * @q: request_queue the new blkg is associated with
  467. *
  468. * Allocate a new blkg assocating @blkcg and @q.
  469. */
  470. static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
  471. struct request_queue *q)
  472. {
  473. struct blkio_group *blkg;
  474. int i;
  475. /* alloc and init base part */
  476. blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
  477. if (!blkg)
  478. return NULL;
  479. spin_lock_init(&blkg->stats_lock);
  480. blkg->q = q;
  481. INIT_LIST_HEAD(&blkg->q_node);
  482. INIT_LIST_HEAD(&blkg->alloc_node);
  483. blkg->blkcg = blkcg;
  484. blkg->refcnt = 1;
  485. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  486. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  487. struct blkio_policy_type *pol = blkio_policy[i];
  488. struct blkg_policy_data *pd;
  489. if (!pol)
  490. continue;
  491. /* alloc per-policy data and attach it to blkg */
  492. pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
  493. q->node);
  494. if (!pd) {
  495. blkg_free(blkg);
  496. return NULL;
  497. }
  498. blkg->pd[i] = pd;
  499. pd->blkg = blkg;
  500. }
  501. /* invoke per-policy init */
  502. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  503. struct blkio_policy_type *pol = blkio_policy[i];
  504. if (pol)
  505. pol->ops.blkio_init_group_fn(blkg);
  506. }
  507. return blkg;
  508. }
  509. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  510. struct request_queue *q,
  511. enum blkio_policy_id plid,
  512. bool for_root)
  513. __releases(q->queue_lock) __acquires(q->queue_lock)
  514. {
  515. struct blkio_group *blkg;
  516. WARN_ON_ONCE(!rcu_read_lock_held());
  517. lockdep_assert_held(q->queue_lock);
  518. /*
  519. * This could be the first entry point of blkcg implementation and
  520. * we shouldn't allow anything to go through for a bypassing queue.
  521. * The following can be removed if blkg lookup is guaranteed to
  522. * fail on a bypassing queue.
  523. */
  524. if (unlikely(blk_queue_bypass(q)) && !for_root)
  525. return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  526. blkg = blkg_lookup(blkcg, q);
  527. if (blkg)
  528. return blkg;
  529. /* blkg holds a reference to blkcg */
  530. if (!css_tryget(&blkcg->css))
  531. return ERR_PTR(-EINVAL);
  532. /*
  533. * Allocate and initialize.
  534. */
  535. blkg = blkg_alloc(blkcg, q);
  536. /* did alloc fail? */
  537. if (unlikely(!blkg)) {
  538. blkg = ERR_PTR(-ENOMEM);
  539. goto out;
  540. }
  541. /* insert */
  542. spin_lock(&blkcg->lock);
  543. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  544. list_add(&blkg->q_node, &q->blkg_list);
  545. spin_unlock(&blkcg->lock);
  546. spin_lock(&alloc_list_lock);
  547. list_add(&blkg->alloc_node, &alloc_list);
  548. /* Queue per cpu stat allocation from worker thread. */
  549. queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
  550. spin_unlock(&alloc_list_lock);
  551. out:
  552. return blkg;
  553. }
  554. EXPORT_SYMBOL_GPL(blkg_lookup_create);
  555. /* called under rcu_read_lock(). */
  556. struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  557. struct request_queue *q)
  558. {
  559. struct blkio_group *blkg;
  560. struct hlist_node *n;
  561. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  562. if (blkg->q == q)
  563. return blkg;
  564. return NULL;
  565. }
  566. EXPORT_SYMBOL_GPL(blkg_lookup);
  567. static void blkg_destroy(struct blkio_group *blkg)
  568. {
  569. struct request_queue *q = blkg->q;
  570. struct blkio_cgroup *blkcg = blkg->blkcg;
  571. lockdep_assert_held(q->queue_lock);
  572. lockdep_assert_held(&blkcg->lock);
  573. /* Something wrong if we are trying to remove same group twice */
  574. WARN_ON_ONCE(list_empty(&blkg->q_node));
  575. WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
  576. list_del_init(&blkg->q_node);
  577. hlist_del_init_rcu(&blkg->blkcg_node);
  578. spin_lock(&alloc_list_lock);
  579. list_del_init(&blkg->alloc_node);
  580. spin_unlock(&alloc_list_lock);
  581. /*
  582. * Put the reference taken at the time of creation so that when all
  583. * queues are gone, group can be destroyed.
  584. */
  585. blkg_put(blkg);
  586. }
  587. /*
  588. * XXX: This updates blkg policy data in-place for root blkg, which is
  589. * necessary across elevator switch and policy registration as root blkgs
  590. * aren't shot down. This broken and racy implementation is temporary.
  591. * Eventually, blkg shoot down will be replaced by proper in-place update.
  592. */
  593. void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
  594. {
  595. struct blkio_policy_type *pol = blkio_policy[plid];
  596. struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
  597. struct blkg_policy_data *pd;
  598. if (!blkg)
  599. return;
  600. kfree(blkg->pd[plid]);
  601. blkg->pd[plid] = NULL;
  602. if (!pol)
  603. return;
  604. pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
  605. WARN_ON_ONCE(!pd);
  606. pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  607. WARN_ON_ONCE(!pd->stats_cpu);
  608. blkg->pd[plid] = pd;
  609. pd->blkg = blkg;
  610. pol->ops.blkio_init_group_fn(blkg);
  611. }
  612. EXPORT_SYMBOL_GPL(update_root_blkg_pd);
  613. /**
  614. * blkg_destroy_all - destroy all blkgs associated with a request_queue
  615. * @q: request_queue of interest
  616. * @destroy_root: whether to destroy root blkg or not
  617. *
  618. * Destroy blkgs associated with @q. If @destroy_root is %true, all are
  619. * destroyed; otherwise, root blkg is left alone.
  620. */
  621. void blkg_destroy_all(struct request_queue *q, bool destroy_root)
  622. {
  623. struct blkio_group *blkg, *n;
  624. spin_lock_irq(q->queue_lock);
  625. list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
  626. struct blkio_cgroup *blkcg = blkg->blkcg;
  627. /* skip root? */
  628. if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
  629. continue;
  630. spin_lock(&blkcg->lock);
  631. blkg_destroy(blkg);
  632. spin_unlock(&blkcg->lock);
  633. }
  634. spin_unlock_irq(q->queue_lock);
  635. }
  636. EXPORT_SYMBOL_GPL(blkg_destroy_all);
  637. static void blkg_rcu_free(struct rcu_head *rcu_head)
  638. {
  639. blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
  640. }
  641. void __blkg_release(struct blkio_group *blkg)
  642. {
  643. /* release the extra blkcg reference this blkg has been holding */
  644. css_put(&blkg->blkcg->css);
  645. /*
  646. * A group is freed in rcu manner. But having an rcu lock does not
  647. * mean that one can access all the fields of blkg and assume these
  648. * are valid. For example, don't try to follow throtl_data and
  649. * request queue links.
  650. *
  651. * Having a reference to blkg under an rcu allows acess to only
  652. * values local to groups like group stats and group rate limits
  653. */
  654. call_rcu(&blkg->rcu_head, blkg_rcu_free);
  655. }
  656. EXPORT_SYMBOL_GPL(__blkg_release);
  657. static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
  658. {
  659. struct blkg_policy_data *pd = blkg->pd[plid];
  660. struct blkio_group_stats_cpu *stats_cpu;
  661. int i, j, k;
  662. if (pd->stats_cpu == NULL)
  663. return;
  664. /*
  665. * Note: On 64 bit arch this should not be an issue. This has the
  666. * possibility of returning some inconsistent value on 32bit arch
  667. * as 64bit update on 32bit is non atomic. Taking care of this
  668. * corner case makes code very complicated, like sending IPIs to
  669. * cpus, taking care of stats of offline cpus etc.
  670. *
  671. * reset stats is anyway more of a debug feature and this sounds a
  672. * corner case. So I am not complicating the code yet until and
  673. * unless this becomes a real issue.
  674. */
  675. for_each_possible_cpu(i) {
  676. stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
  677. stats_cpu->sectors = 0;
  678. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  679. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  680. stats_cpu->stat_arr_cpu[j][k] = 0;
  681. }
  682. }
  683. static int
  684. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  685. {
  686. struct blkio_cgroup *blkcg;
  687. struct blkio_group *blkg;
  688. struct blkio_group_stats *stats;
  689. struct hlist_node *n;
  690. uint64_t queued[BLKIO_STAT_TOTAL];
  691. int i;
  692. #ifdef CONFIG_DEBUG_BLK_CGROUP
  693. bool idling, waiting, empty;
  694. unsigned long long now = sched_clock();
  695. #endif
  696. blkcg = cgroup_to_blkio_cgroup(cgroup);
  697. spin_lock(&blkio_list_lock);
  698. spin_lock_irq(&blkcg->lock);
  699. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  700. struct blkio_policy_type *pol;
  701. list_for_each_entry(pol, &blkio_list, list) {
  702. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  703. spin_lock(&blkg->stats_lock);
  704. stats = &pd->stats;
  705. #ifdef CONFIG_DEBUG_BLK_CGROUP
  706. idling = blkio_blkg_idling(stats);
  707. waiting = blkio_blkg_waiting(stats);
  708. empty = blkio_blkg_empty(stats);
  709. #endif
  710. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  711. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  712. memset(stats, 0, sizeof(struct blkio_group_stats));
  713. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  714. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  715. #ifdef CONFIG_DEBUG_BLK_CGROUP
  716. if (idling) {
  717. blkio_mark_blkg_idling(stats);
  718. stats->start_idle_time = now;
  719. }
  720. if (waiting) {
  721. blkio_mark_blkg_waiting(stats);
  722. stats->start_group_wait_time = now;
  723. }
  724. if (empty) {
  725. blkio_mark_blkg_empty(stats);
  726. stats->start_empty_time = now;
  727. }
  728. #endif
  729. spin_unlock(&blkg->stats_lock);
  730. /* Reset Per cpu stats which don't take blkg->stats_lock */
  731. blkio_reset_stats_cpu(blkg, pol->plid);
  732. }
  733. }
  734. spin_unlock_irq(&blkcg->lock);
  735. spin_unlock(&blkio_list_lock);
  736. return 0;
  737. }
  738. static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
  739. char *str, int chars_left, bool diskname_only)
  740. {
  741. snprintf(str, chars_left, "%s", dname);
  742. chars_left -= strlen(str);
  743. if (chars_left <= 0) {
  744. printk(KERN_WARNING
  745. "Possibly incorrect cgroup stat display format");
  746. return;
  747. }
  748. if (diskname_only)
  749. return;
  750. switch (type) {
  751. case BLKIO_STAT_READ:
  752. strlcat(str, " Read", chars_left);
  753. break;
  754. case BLKIO_STAT_WRITE:
  755. strlcat(str, " Write", chars_left);
  756. break;
  757. case BLKIO_STAT_SYNC:
  758. strlcat(str, " Sync", chars_left);
  759. break;
  760. case BLKIO_STAT_ASYNC:
  761. strlcat(str, " Async", chars_left);
  762. break;
  763. case BLKIO_STAT_TOTAL:
  764. strlcat(str, " Total", chars_left);
  765. break;
  766. default:
  767. strlcat(str, " Invalid", chars_left);
  768. }
  769. }
  770. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  771. struct cgroup_map_cb *cb, const char *dname)
  772. {
  773. blkio_get_key_name(0, dname, str, chars_left, true);
  774. cb->fill(cb, str, val);
  775. return val;
  776. }
  777. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
  778. enum stat_type_cpu type, enum stat_sub_type sub_type)
  779. {
  780. struct blkg_policy_data *pd = blkg->pd[plid];
  781. int cpu;
  782. struct blkio_group_stats_cpu *stats_cpu;
  783. u64 val = 0, tval;
  784. if (pd->stats_cpu == NULL)
  785. return val;
  786. for_each_possible_cpu(cpu) {
  787. unsigned int start;
  788. stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
  789. do {
  790. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  791. if (type == BLKIO_STAT_CPU_SECTORS)
  792. tval = stats_cpu->sectors;
  793. else
  794. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  795. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  796. val += tval;
  797. }
  798. return val;
  799. }
  800. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
  801. struct cgroup_map_cb *cb, const char *dname,
  802. enum stat_type_cpu type)
  803. {
  804. uint64_t disk_total, val;
  805. char key_str[MAX_KEY_LEN];
  806. enum stat_sub_type sub_type;
  807. if (type == BLKIO_STAT_CPU_SECTORS) {
  808. val = blkio_read_stat_cpu(blkg, plid, type, 0);
  809. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
  810. dname);
  811. }
  812. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  813. sub_type++) {
  814. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  815. false);
  816. val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
  817. cb->fill(cb, key_str, val);
  818. }
  819. disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
  820. blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
  821. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  822. false);
  823. cb->fill(cb, key_str, disk_total);
  824. return disk_total;
  825. }
  826. /* This should be called with blkg->stats_lock held */
  827. static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
  828. struct cgroup_map_cb *cb, const char *dname,
  829. enum stat_type type)
  830. {
  831. struct blkg_policy_data *pd = blkg->pd[plid];
  832. uint64_t disk_total;
  833. char key_str[MAX_KEY_LEN];
  834. enum stat_sub_type sub_type;
  835. if (type == BLKIO_STAT_TIME)
  836. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  837. pd->stats.time, cb, dname);
  838. #ifdef CONFIG_DEBUG_BLK_CGROUP
  839. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  840. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  841. pd->stats.unaccounted_time, cb, dname);
  842. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  843. uint64_t sum = pd->stats.avg_queue_size_sum;
  844. uint64_t samples = pd->stats.avg_queue_size_samples;
  845. if (samples)
  846. do_div(sum, samples);
  847. else
  848. sum = 0;
  849. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  850. sum, cb, dname);
  851. }
  852. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  853. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  854. pd->stats.group_wait_time, cb, dname);
  855. if (type == BLKIO_STAT_IDLE_TIME)
  856. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  857. pd->stats.idle_time, cb, dname);
  858. if (type == BLKIO_STAT_EMPTY_TIME)
  859. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  860. pd->stats.empty_time, cb, dname);
  861. if (type == BLKIO_STAT_DEQUEUE)
  862. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  863. pd->stats.dequeue, cb, dname);
  864. #endif
  865. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  866. sub_type++) {
  867. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  868. false);
  869. cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]);
  870. }
  871. disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] +
  872. pd->stats.stat_arr[type][BLKIO_STAT_WRITE];
  873. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  874. false);
  875. cb->fill(cb, key_str, disk_total);
  876. return disk_total;
  877. }
  878. static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
  879. int fileid, struct blkio_cgroup *blkcg)
  880. {
  881. struct gendisk *disk = NULL;
  882. struct blkio_group *blkg = NULL;
  883. struct blkg_policy_data *pd;
  884. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  885. unsigned long major, minor;
  886. int i = 0, ret = -EINVAL;
  887. int part;
  888. dev_t dev;
  889. u64 temp;
  890. memset(s, 0, sizeof(s));
  891. while ((p = strsep(&buf, " ")) != NULL) {
  892. if (!*p)
  893. continue;
  894. s[i++] = p;
  895. /* Prevent from inputing too many things */
  896. if (i == 3)
  897. break;
  898. }
  899. if (i != 2)
  900. goto out;
  901. p = strsep(&s[0], ":");
  902. if (p != NULL)
  903. major_s = p;
  904. else
  905. goto out;
  906. minor_s = s[0];
  907. if (!minor_s)
  908. goto out;
  909. if (strict_strtoul(major_s, 10, &major))
  910. goto out;
  911. if (strict_strtoul(minor_s, 10, &minor))
  912. goto out;
  913. dev = MKDEV(major, minor);
  914. if (strict_strtoull(s[1], 10, &temp))
  915. goto out;
  916. disk = get_gendisk(dev, &part);
  917. if (!disk || part)
  918. goto out;
  919. rcu_read_lock();
  920. spin_lock_irq(disk->queue->queue_lock);
  921. blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
  922. spin_unlock_irq(disk->queue->queue_lock);
  923. if (IS_ERR(blkg)) {
  924. ret = PTR_ERR(blkg);
  925. goto out_unlock;
  926. }
  927. pd = blkg->pd[plid];
  928. switch (plid) {
  929. case BLKIO_POLICY_PROP:
  930. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  931. temp > BLKIO_WEIGHT_MAX)
  932. goto out_unlock;
  933. pd->conf.weight = temp;
  934. blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
  935. break;
  936. case BLKIO_POLICY_THROTL:
  937. switch(fileid) {
  938. case BLKIO_THROTL_read_bps_device:
  939. pd->conf.bps[READ] = temp;
  940. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  941. break;
  942. case BLKIO_THROTL_write_bps_device:
  943. pd->conf.bps[WRITE] = temp;
  944. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  945. break;
  946. case BLKIO_THROTL_read_iops_device:
  947. if (temp > THROTL_IOPS_MAX)
  948. goto out_unlock;
  949. pd->conf.iops[READ] = temp;
  950. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  951. break;
  952. case BLKIO_THROTL_write_iops_device:
  953. if (temp > THROTL_IOPS_MAX)
  954. goto out_unlock;
  955. pd->conf.iops[WRITE] = temp;
  956. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  957. break;
  958. }
  959. break;
  960. default:
  961. BUG();
  962. }
  963. ret = 0;
  964. out_unlock:
  965. rcu_read_unlock();
  966. out:
  967. put_disk(disk);
  968. /*
  969. * If queue was bypassing, we should retry. Do so after a short
  970. * msleep(). It isn't strictly necessary but queue can be
  971. * bypassing for some time and it's always nice to avoid busy
  972. * looping.
  973. */
  974. if (ret == -EBUSY) {
  975. msleep(10);
  976. return restart_syscall();
  977. }
  978. return ret;
  979. }
  980. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  981. const char *buffer)
  982. {
  983. int ret = 0;
  984. char *buf;
  985. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  986. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  987. int fileid = BLKIOFILE_ATTR(cft->private);
  988. buf = kstrdup(buffer, GFP_KERNEL);
  989. if (!buf)
  990. return -ENOMEM;
  991. ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
  992. kfree(buf);
  993. return ret;
  994. }
  995. static const char *blkg_dev_name(struct blkio_group *blkg)
  996. {
  997. /* some drivers (floppy) instantiate a queue w/o disk registered */
  998. if (blkg->q->backing_dev_info.dev)
  999. return dev_name(blkg->q->backing_dev_info.dev);
  1000. return NULL;
  1001. }
  1002. static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
  1003. struct seq_file *m)
  1004. {
  1005. int plid = BLKIOFILE_POLICY(cft->private);
  1006. int fileid = BLKIOFILE_ATTR(cft->private);
  1007. struct blkg_policy_data *pd = blkg->pd[plid];
  1008. const char *dname = blkg_dev_name(blkg);
  1009. int rw = WRITE;
  1010. if (!dname)
  1011. return;
  1012. switch (plid) {
  1013. case BLKIO_POLICY_PROP:
  1014. if (pd->conf.weight)
  1015. seq_printf(m, "%s\t%u\n",
  1016. dname, pd->conf.weight);
  1017. break;
  1018. case BLKIO_POLICY_THROTL:
  1019. switch (fileid) {
  1020. case BLKIO_THROTL_read_bps_device:
  1021. rw = READ;
  1022. case BLKIO_THROTL_write_bps_device:
  1023. if (pd->conf.bps[rw])
  1024. seq_printf(m, "%s\t%llu\n",
  1025. dname, pd->conf.bps[rw]);
  1026. break;
  1027. case BLKIO_THROTL_read_iops_device:
  1028. rw = READ;
  1029. case BLKIO_THROTL_write_iops_device:
  1030. if (pd->conf.iops[rw])
  1031. seq_printf(m, "%s\t%u\n",
  1032. dname, pd->conf.iops[rw]);
  1033. break;
  1034. }
  1035. break;
  1036. default:
  1037. BUG();
  1038. }
  1039. }
  1040. /* cgroup files which read their data from policy nodes end up here */
  1041. static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
  1042. struct seq_file *m)
  1043. {
  1044. struct blkio_group *blkg;
  1045. struct hlist_node *n;
  1046. spin_lock_irq(&blkcg->lock);
  1047. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  1048. blkio_print_group_conf(cft, blkg, m);
  1049. spin_unlock_irq(&blkcg->lock);
  1050. }
  1051. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  1052. struct seq_file *m)
  1053. {
  1054. struct blkio_cgroup *blkcg;
  1055. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1056. int name = BLKIOFILE_ATTR(cft->private);
  1057. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1058. switch(plid) {
  1059. case BLKIO_POLICY_PROP:
  1060. switch(name) {
  1061. case BLKIO_PROP_weight_device:
  1062. blkio_read_conf(cft, blkcg, m);
  1063. return 0;
  1064. default:
  1065. BUG();
  1066. }
  1067. break;
  1068. case BLKIO_POLICY_THROTL:
  1069. switch(name){
  1070. case BLKIO_THROTL_read_bps_device:
  1071. case BLKIO_THROTL_write_bps_device:
  1072. case BLKIO_THROTL_read_iops_device:
  1073. case BLKIO_THROTL_write_iops_device:
  1074. blkio_read_conf(cft, blkcg, m);
  1075. return 0;
  1076. default:
  1077. BUG();
  1078. }
  1079. break;
  1080. default:
  1081. BUG();
  1082. }
  1083. return 0;
  1084. }
  1085. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1086. struct cftype *cft, struct cgroup_map_cb *cb,
  1087. enum stat_type type, bool show_total, bool pcpu)
  1088. {
  1089. struct blkio_group *blkg;
  1090. struct hlist_node *n;
  1091. uint64_t cgroup_total = 0;
  1092. spin_lock_irq(&blkcg->lock);
  1093. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1094. const char *dname = blkg_dev_name(blkg);
  1095. int plid = BLKIOFILE_POLICY(cft->private);
  1096. if (!dname)
  1097. continue;
  1098. if (pcpu) {
  1099. cgroup_total += blkio_get_stat_cpu(blkg, plid,
  1100. cb, dname, type);
  1101. } else {
  1102. spin_lock(&blkg->stats_lock);
  1103. cgroup_total += blkio_get_stat(blkg, plid,
  1104. cb, dname, type);
  1105. spin_unlock(&blkg->stats_lock);
  1106. }
  1107. }
  1108. if (show_total)
  1109. cb->fill(cb, "Total", cgroup_total);
  1110. spin_unlock_irq(&blkcg->lock);
  1111. return 0;
  1112. }
  1113. /* All map kind of cgroup file get serviced by this function */
  1114. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1115. struct cgroup_map_cb *cb)
  1116. {
  1117. struct blkio_cgroup *blkcg;
  1118. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1119. int name = BLKIOFILE_ATTR(cft->private);
  1120. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1121. switch(plid) {
  1122. case BLKIO_POLICY_PROP:
  1123. switch(name) {
  1124. case BLKIO_PROP_time:
  1125. return blkio_read_blkg_stats(blkcg, cft, cb,
  1126. BLKIO_STAT_TIME, 0, 0);
  1127. case BLKIO_PROP_sectors:
  1128. return blkio_read_blkg_stats(blkcg, cft, cb,
  1129. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1130. case BLKIO_PROP_io_service_bytes:
  1131. return blkio_read_blkg_stats(blkcg, cft, cb,
  1132. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1133. case BLKIO_PROP_io_serviced:
  1134. return blkio_read_blkg_stats(blkcg, cft, cb,
  1135. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1136. case BLKIO_PROP_io_service_time:
  1137. return blkio_read_blkg_stats(blkcg, cft, cb,
  1138. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1139. case BLKIO_PROP_io_wait_time:
  1140. return blkio_read_blkg_stats(blkcg, cft, cb,
  1141. BLKIO_STAT_WAIT_TIME, 1, 0);
  1142. case BLKIO_PROP_io_merged:
  1143. return blkio_read_blkg_stats(blkcg, cft, cb,
  1144. BLKIO_STAT_MERGED, 1, 0);
  1145. case BLKIO_PROP_io_queued:
  1146. return blkio_read_blkg_stats(blkcg, cft, cb,
  1147. BLKIO_STAT_QUEUED, 1, 0);
  1148. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1149. case BLKIO_PROP_unaccounted_time:
  1150. return blkio_read_blkg_stats(blkcg, cft, cb,
  1151. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1152. case BLKIO_PROP_dequeue:
  1153. return blkio_read_blkg_stats(blkcg, cft, cb,
  1154. BLKIO_STAT_DEQUEUE, 0, 0);
  1155. case BLKIO_PROP_avg_queue_size:
  1156. return blkio_read_blkg_stats(blkcg, cft, cb,
  1157. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1158. case BLKIO_PROP_group_wait_time:
  1159. return blkio_read_blkg_stats(blkcg, cft, cb,
  1160. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1161. case BLKIO_PROP_idle_time:
  1162. return blkio_read_blkg_stats(blkcg, cft, cb,
  1163. BLKIO_STAT_IDLE_TIME, 0, 0);
  1164. case BLKIO_PROP_empty_time:
  1165. return blkio_read_blkg_stats(blkcg, cft, cb,
  1166. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1167. #endif
  1168. default:
  1169. BUG();
  1170. }
  1171. break;
  1172. case BLKIO_POLICY_THROTL:
  1173. switch(name){
  1174. case BLKIO_THROTL_io_service_bytes:
  1175. return blkio_read_blkg_stats(blkcg, cft, cb,
  1176. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1177. case BLKIO_THROTL_io_serviced:
  1178. return blkio_read_blkg_stats(blkcg, cft, cb,
  1179. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1180. default:
  1181. BUG();
  1182. }
  1183. break;
  1184. default:
  1185. BUG();
  1186. }
  1187. return 0;
  1188. }
  1189. static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
  1190. {
  1191. struct blkio_group *blkg;
  1192. struct hlist_node *n;
  1193. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1194. return -EINVAL;
  1195. spin_lock(&blkio_list_lock);
  1196. spin_lock_irq(&blkcg->lock);
  1197. blkcg->weight = (unsigned int)val;
  1198. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1199. struct blkg_policy_data *pd = blkg->pd[plid];
  1200. if (!pd->conf.weight)
  1201. blkio_update_group_weight(blkg, plid, blkcg->weight);
  1202. }
  1203. spin_unlock_irq(&blkcg->lock);
  1204. spin_unlock(&blkio_list_lock);
  1205. return 0;
  1206. }
  1207. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1208. struct blkio_cgroup *blkcg;
  1209. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1210. int name = BLKIOFILE_ATTR(cft->private);
  1211. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1212. switch(plid) {
  1213. case BLKIO_POLICY_PROP:
  1214. switch(name) {
  1215. case BLKIO_PROP_weight:
  1216. return (u64)blkcg->weight;
  1217. }
  1218. break;
  1219. default:
  1220. BUG();
  1221. }
  1222. return 0;
  1223. }
  1224. static int
  1225. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1226. {
  1227. struct blkio_cgroup *blkcg;
  1228. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1229. int name = BLKIOFILE_ATTR(cft->private);
  1230. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1231. switch(plid) {
  1232. case BLKIO_POLICY_PROP:
  1233. switch(name) {
  1234. case BLKIO_PROP_weight:
  1235. return blkio_weight_write(blkcg, plid, val);
  1236. }
  1237. break;
  1238. default:
  1239. BUG();
  1240. }
  1241. return 0;
  1242. }
  1243. struct cftype blkio_files[] = {
  1244. {
  1245. .name = "weight_device",
  1246. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1247. BLKIO_PROP_weight_device),
  1248. .read_seq_string = blkiocg_file_read,
  1249. .write_string = blkiocg_file_write,
  1250. .max_write_len = 256,
  1251. },
  1252. {
  1253. .name = "weight",
  1254. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1255. BLKIO_PROP_weight),
  1256. .read_u64 = blkiocg_file_read_u64,
  1257. .write_u64 = blkiocg_file_write_u64,
  1258. },
  1259. {
  1260. .name = "time",
  1261. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1262. BLKIO_PROP_time),
  1263. .read_map = blkiocg_file_read_map,
  1264. },
  1265. {
  1266. .name = "sectors",
  1267. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1268. BLKIO_PROP_sectors),
  1269. .read_map = blkiocg_file_read_map,
  1270. },
  1271. {
  1272. .name = "io_service_bytes",
  1273. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1274. BLKIO_PROP_io_service_bytes),
  1275. .read_map = blkiocg_file_read_map,
  1276. },
  1277. {
  1278. .name = "io_serviced",
  1279. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1280. BLKIO_PROP_io_serviced),
  1281. .read_map = blkiocg_file_read_map,
  1282. },
  1283. {
  1284. .name = "io_service_time",
  1285. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1286. BLKIO_PROP_io_service_time),
  1287. .read_map = blkiocg_file_read_map,
  1288. },
  1289. {
  1290. .name = "io_wait_time",
  1291. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1292. BLKIO_PROP_io_wait_time),
  1293. .read_map = blkiocg_file_read_map,
  1294. },
  1295. {
  1296. .name = "io_merged",
  1297. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1298. BLKIO_PROP_io_merged),
  1299. .read_map = blkiocg_file_read_map,
  1300. },
  1301. {
  1302. .name = "io_queued",
  1303. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1304. BLKIO_PROP_io_queued),
  1305. .read_map = blkiocg_file_read_map,
  1306. },
  1307. {
  1308. .name = "reset_stats",
  1309. .write_u64 = blkiocg_reset_stats,
  1310. },
  1311. #ifdef CONFIG_BLK_DEV_THROTTLING
  1312. {
  1313. .name = "throttle.read_bps_device",
  1314. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1315. BLKIO_THROTL_read_bps_device),
  1316. .read_seq_string = blkiocg_file_read,
  1317. .write_string = blkiocg_file_write,
  1318. .max_write_len = 256,
  1319. },
  1320. {
  1321. .name = "throttle.write_bps_device",
  1322. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1323. BLKIO_THROTL_write_bps_device),
  1324. .read_seq_string = blkiocg_file_read,
  1325. .write_string = blkiocg_file_write,
  1326. .max_write_len = 256,
  1327. },
  1328. {
  1329. .name = "throttle.read_iops_device",
  1330. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1331. BLKIO_THROTL_read_iops_device),
  1332. .read_seq_string = blkiocg_file_read,
  1333. .write_string = blkiocg_file_write,
  1334. .max_write_len = 256,
  1335. },
  1336. {
  1337. .name = "throttle.write_iops_device",
  1338. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1339. BLKIO_THROTL_write_iops_device),
  1340. .read_seq_string = blkiocg_file_read,
  1341. .write_string = blkiocg_file_write,
  1342. .max_write_len = 256,
  1343. },
  1344. {
  1345. .name = "throttle.io_service_bytes",
  1346. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1347. BLKIO_THROTL_io_service_bytes),
  1348. .read_map = blkiocg_file_read_map,
  1349. },
  1350. {
  1351. .name = "throttle.io_serviced",
  1352. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1353. BLKIO_THROTL_io_serviced),
  1354. .read_map = blkiocg_file_read_map,
  1355. },
  1356. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1357. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1358. {
  1359. .name = "avg_queue_size",
  1360. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1361. BLKIO_PROP_avg_queue_size),
  1362. .read_map = blkiocg_file_read_map,
  1363. },
  1364. {
  1365. .name = "group_wait_time",
  1366. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1367. BLKIO_PROP_group_wait_time),
  1368. .read_map = blkiocg_file_read_map,
  1369. },
  1370. {
  1371. .name = "idle_time",
  1372. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1373. BLKIO_PROP_idle_time),
  1374. .read_map = blkiocg_file_read_map,
  1375. },
  1376. {
  1377. .name = "empty_time",
  1378. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1379. BLKIO_PROP_empty_time),
  1380. .read_map = blkiocg_file_read_map,
  1381. },
  1382. {
  1383. .name = "dequeue",
  1384. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1385. BLKIO_PROP_dequeue),
  1386. .read_map = blkiocg_file_read_map,
  1387. },
  1388. {
  1389. .name = "unaccounted_time",
  1390. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1391. BLKIO_PROP_unaccounted_time),
  1392. .read_map = blkiocg_file_read_map,
  1393. },
  1394. #endif
  1395. };
  1396. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1397. {
  1398. return cgroup_add_files(cgroup, subsys, blkio_files,
  1399. ARRAY_SIZE(blkio_files));
  1400. }
  1401. /**
  1402. * blkiocg_pre_destroy - cgroup pre_destroy callback
  1403. * @subsys: cgroup subsys
  1404. * @cgroup: cgroup of interest
  1405. *
  1406. * This function is called when @cgroup is about to go away and responsible
  1407. * for shooting down all blkgs associated with @cgroup. blkgs should be
  1408. * removed while holding both q and blkcg locks. As blkcg lock is nested
  1409. * inside q lock, this function performs reverse double lock dancing.
  1410. *
  1411. * This is the blkcg counterpart of ioc_release_fn().
  1412. */
  1413. static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
  1414. struct cgroup *cgroup)
  1415. {
  1416. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1417. spin_lock_irq(&blkcg->lock);
  1418. while (!hlist_empty(&blkcg->blkg_list)) {
  1419. struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
  1420. struct blkio_group, blkcg_node);
  1421. struct request_queue *q = blkg->q;
  1422. if (spin_trylock(q->queue_lock)) {
  1423. blkg_destroy(blkg);
  1424. spin_unlock(q->queue_lock);
  1425. } else {
  1426. spin_unlock_irq(&blkcg->lock);
  1427. cpu_relax();
  1428. spin_lock(&blkcg->lock);
  1429. }
  1430. }
  1431. spin_unlock_irq(&blkcg->lock);
  1432. return 0;
  1433. }
  1434. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1435. {
  1436. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1437. if (blkcg != &blkio_root_cgroup)
  1438. kfree(blkcg);
  1439. }
  1440. static struct cgroup_subsys_state *
  1441. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1442. {
  1443. struct blkio_cgroup *blkcg;
  1444. struct cgroup *parent = cgroup->parent;
  1445. if (!parent) {
  1446. blkcg = &blkio_root_cgroup;
  1447. goto done;
  1448. }
  1449. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1450. if (!blkcg)
  1451. return ERR_PTR(-ENOMEM);
  1452. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1453. done:
  1454. spin_lock_init(&blkcg->lock);
  1455. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1456. return &blkcg->css;
  1457. }
  1458. /**
  1459. * blkcg_init_queue - initialize blkcg part of request queue
  1460. * @q: request_queue to initialize
  1461. *
  1462. * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  1463. * part of new request_queue @q.
  1464. *
  1465. * RETURNS:
  1466. * 0 on success, -errno on failure.
  1467. */
  1468. int blkcg_init_queue(struct request_queue *q)
  1469. {
  1470. int ret;
  1471. might_sleep();
  1472. ret = blk_throtl_init(q);
  1473. if (ret)
  1474. return ret;
  1475. mutex_lock(&all_q_mutex);
  1476. INIT_LIST_HEAD(&q->all_q_node);
  1477. list_add_tail(&q->all_q_node, &all_q_list);
  1478. mutex_unlock(&all_q_mutex);
  1479. return 0;
  1480. }
  1481. /**
  1482. * blkcg_drain_queue - drain blkcg part of request_queue
  1483. * @q: request_queue to drain
  1484. *
  1485. * Called from blk_drain_queue(). Responsible for draining blkcg part.
  1486. */
  1487. void blkcg_drain_queue(struct request_queue *q)
  1488. {
  1489. lockdep_assert_held(q->queue_lock);
  1490. blk_throtl_drain(q);
  1491. }
  1492. /**
  1493. * blkcg_exit_queue - exit and release blkcg part of request_queue
  1494. * @q: request_queue being released
  1495. *
  1496. * Called from blk_release_queue(). Responsible for exiting blkcg part.
  1497. */
  1498. void blkcg_exit_queue(struct request_queue *q)
  1499. {
  1500. mutex_lock(&all_q_mutex);
  1501. list_del_init(&q->all_q_node);
  1502. mutex_unlock(&all_q_mutex);
  1503. blkg_destroy_all(q, true);
  1504. blk_throtl_exit(q);
  1505. }
  1506. /*
  1507. * We cannot support shared io contexts, as we have no mean to support
  1508. * two tasks with the same ioc in two different groups without major rework
  1509. * of the main cic data structures. For now we allow a task to change
  1510. * its cgroup only if it's the only owner of its ioc.
  1511. */
  1512. static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1513. struct cgroup_taskset *tset)
  1514. {
  1515. struct task_struct *task;
  1516. struct io_context *ioc;
  1517. int ret = 0;
  1518. /* task_lock() is needed to avoid races with exit_io_context() */
  1519. cgroup_taskset_for_each(task, cgrp, tset) {
  1520. task_lock(task);
  1521. ioc = task->io_context;
  1522. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1523. ret = -EINVAL;
  1524. task_unlock(task);
  1525. if (ret)
  1526. break;
  1527. }
  1528. return ret;
  1529. }
  1530. static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1531. struct cgroup_taskset *tset)
  1532. {
  1533. struct task_struct *task;
  1534. struct io_context *ioc;
  1535. cgroup_taskset_for_each(task, cgrp, tset) {
  1536. /* we don't lose anything even if ioc allocation fails */
  1537. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1538. if (ioc) {
  1539. ioc_cgroup_changed(ioc);
  1540. put_io_context(ioc);
  1541. }
  1542. }
  1543. }
  1544. static void blkcg_bypass_start(void)
  1545. __acquires(&all_q_mutex)
  1546. {
  1547. struct request_queue *q;
  1548. mutex_lock(&all_q_mutex);
  1549. list_for_each_entry(q, &all_q_list, all_q_node) {
  1550. blk_queue_bypass_start(q);
  1551. blkg_destroy_all(q, false);
  1552. }
  1553. }
  1554. static void blkcg_bypass_end(void)
  1555. __releases(&all_q_mutex)
  1556. {
  1557. struct request_queue *q;
  1558. list_for_each_entry(q, &all_q_list, all_q_node)
  1559. blk_queue_bypass_end(q);
  1560. mutex_unlock(&all_q_mutex);
  1561. }
  1562. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1563. {
  1564. struct request_queue *q;
  1565. blkcg_bypass_start();
  1566. spin_lock(&blkio_list_lock);
  1567. BUG_ON(blkio_policy[blkiop->plid]);
  1568. blkio_policy[blkiop->plid] = blkiop;
  1569. list_add_tail(&blkiop->list, &blkio_list);
  1570. spin_unlock(&blkio_list_lock);
  1571. list_for_each_entry(q, &all_q_list, all_q_node)
  1572. update_root_blkg_pd(q, blkiop->plid);
  1573. blkcg_bypass_end();
  1574. }
  1575. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1576. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1577. {
  1578. struct request_queue *q;
  1579. blkcg_bypass_start();
  1580. spin_lock(&blkio_list_lock);
  1581. BUG_ON(blkio_policy[blkiop->plid] != blkiop);
  1582. blkio_policy[blkiop->plid] = NULL;
  1583. list_del_init(&blkiop->list);
  1584. spin_unlock(&blkio_list_lock);
  1585. list_for_each_entry(q, &all_q_list, all_q_node)
  1586. update_root_blkg_pd(q, blkiop->plid);
  1587. blkcg_bypass_end();
  1588. }
  1589. EXPORT_SYMBOL_GPL(blkio_policy_unregister);