blk-cgroup.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include "blk-cgroup.h"
  21. #include <linux/genhd.h>
  22. #define MAX_KEY_LEN 100
  23. static DEFINE_SPINLOCK(blkio_list_lock);
  24. static LIST_HEAD(blkio_list);
  25. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  26. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  27. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  28. struct cgroup *);
  29. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  30. struct task_struct *, bool);
  31. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  32. struct cgroup *, struct task_struct *, bool);
  33. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  34. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  35. /* for encoding cft->private value on file */
  36. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  37. /* What policy owns the file, proportional or throttle */
  38. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  39. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  40. struct cgroup_subsys blkio_subsys = {
  41. .name = "blkio",
  42. .create = blkiocg_create,
  43. .can_attach = blkiocg_can_attach,
  44. .attach = blkiocg_attach,
  45. .destroy = blkiocg_destroy,
  46. .populate = blkiocg_populate,
  47. #ifdef CONFIG_BLK_CGROUP
  48. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  49. .subsys_id = blkio_subsys_id,
  50. #endif
  51. .use_id = 1,
  52. .module = THIS_MODULE,
  53. };
  54. EXPORT_SYMBOL_GPL(blkio_subsys);
  55. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  56. struct blkio_policy_node *pn)
  57. {
  58. list_add(&pn->node, &blkcg->policy_list);
  59. }
  60. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  61. struct blkio_group *blkg)
  62. {
  63. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  64. if (blkg->plid == plid)
  65. return 1;
  66. return 0;
  67. }
  68. /* Determines if policy node matches cgroup file being accessed */
  69. static inline bool pn_matches_cftype(struct cftype *cft,
  70. struct blkio_policy_node *pn)
  71. {
  72. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  73. int fileid = BLKIOFILE_ATTR(cft->private);
  74. return (plid == pn->plid && fileid == pn->fileid);
  75. }
  76. /* Must be called with blkcg->lock held */
  77. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  78. {
  79. list_del(&pn->node);
  80. }
  81. /* Must be called with blkcg->lock held */
  82. static struct blkio_policy_node *
  83. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  84. enum blkio_policy_id plid, int fileid)
  85. {
  86. struct blkio_policy_node *pn;
  87. list_for_each_entry(pn, &blkcg->policy_list, node) {
  88. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  89. return pn;
  90. }
  91. return NULL;
  92. }
  93. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  94. {
  95. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  96. struct blkio_cgroup, css);
  97. }
  98. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  99. static inline void
  100. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  101. {
  102. struct blkio_policy_type *blkiop;
  103. list_for_each_entry(blkiop, &blkio_list, list) {
  104. /* If this policy does not own the blkg, do not send updates */
  105. if (blkiop->plid != blkg->plid)
  106. continue;
  107. if (blkiop->ops.blkio_update_group_weight_fn)
  108. blkiop->ops.blkio_update_group_weight_fn(blkg->key,
  109. blkg, weight);
  110. }
  111. }
  112. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  113. int fileid)
  114. {
  115. struct blkio_policy_type *blkiop;
  116. list_for_each_entry(blkiop, &blkio_list, list) {
  117. /* If this policy does not own the blkg, do not send updates */
  118. if (blkiop->plid != blkg->plid)
  119. continue;
  120. if (fileid == BLKIO_THROTL_read_bps_device
  121. && blkiop->ops.blkio_update_group_read_bps_fn)
  122. blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
  123. blkg, bps);
  124. if (fileid == BLKIO_THROTL_write_bps_device
  125. && blkiop->ops.blkio_update_group_write_bps_fn)
  126. blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
  127. blkg, bps);
  128. }
  129. }
  130. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  131. unsigned int iops, int fileid)
  132. {
  133. struct blkio_policy_type *blkiop;
  134. list_for_each_entry(blkiop, &blkio_list, list) {
  135. /* If this policy does not own the blkg, do not send updates */
  136. if (blkiop->plid != blkg->plid)
  137. continue;
  138. if (fileid == BLKIO_THROTL_read_iops_device
  139. && blkiop->ops.blkio_update_group_read_iops_fn)
  140. blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
  141. blkg, iops);
  142. if (fileid == BLKIO_THROTL_write_iops_device
  143. && blkiop->ops.blkio_update_group_write_iops_fn)
  144. blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
  145. blkg,iops);
  146. }
  147. }
  148. /*
  149. * Add to the appropriate stat variable depending on the request type.
  150. * This should be called with the blkg->stats_lock held.
  151. */
  152. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  153. bool sync)
  154. {
  155. if (direction)
  156. stat[BLKIO_STAT_WRITE] += add;
  157. else
  158. stat[BLKIO_STAT_READ] += add;
  159. if (sync)
  160. stat[BLKIO_STAT_SYNC] += add;
  161. else
  162. stat[BLKIO_STAT_ASYNC] += add;
  163. }
  164. /*
  165. * Decrements the appropriate stat variable if non-zero depending on the
  166. * request type. Panics on value being zero.
  167. * This should be called with the blkg->stats_lock held.
  168. */
  169. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  170. {
  171. if (direction) {
  172. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  173. stat[BLKIO_STAT_WRITE]--;
  174. } else {
  175. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  176. stat[BLKIO_STAT_READ]--;
  177. }
  178. if (sync) {
  179. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  180. stat[BLKIO_STAT_SYNC]--;
  181. } else {
  182. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  183. stat[BLKIO_STAT_ASYNC]--;
  184. }
  185. }
  186. #ifdef CONFIG_DEBUG_BLK_CGROUP
  187. /* This should be called with the blkg->stats_lock held. */
  188. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  189. struct blkio_group *curr_blkg)
  190. {
  191. if (blkio_blkg_waiting(&blkg->stats))
  192. return;
  193. if (blkg == curr_blkg)
  194. return;
  195. blkg->stats.start_group_wait_time = sched_clock();
  196. blkio_mark_blkg_waiting(&blkg->stats);
  197. }
  198. /* This should be called with the blkg->stats_lock held. */
  199. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  200. {
  201. unsigned long long now;
  202. if (!blkio_blkg_waiting(stats))
  203. return;
  204. now = sched_clock();
  205. if (time_after64(now, stats->start_group_wait_time))
  206. stats->group_wait_time += now - stats->start_group_wait_time;
  207. blkio_clear_blkg_waiting(stats);
  208. }
  209. /* This should be called with the blkg->stats_lock held. */
  210. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  211. {
  212. unsigned long long now;
  213. if (!blkio_blkg_empty(stats))
  214. return;
  215. now = sched_clock();
  216. if (time_after64(now, stats->start_empty_time))
  217. stats->empty_time += now - stats->start_empty_time;
  218. blkio_clear_blkg_empty(stats);
  219. }
  220. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  221. {
  222. unsigned long flags;
  223. spin_lock_irqsave(&blkg->stats_lock, flags);
  224. BUG_ON(blkio_blkg_idling(&blkg->stats));
  225. blkg->stats.start_idle_time = sched_clock();
  226. blkio_mark_blkg_idling(&blkg->stats);
  227. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  228. }
  229. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  230. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  231. {
  232. unsigned long flags;
  233. unsigned long long now;
  234. struct blkio_group_stats *stats;
  235. spin_lock_irqsave(&blkg->stats_lock, flags);
  236. stats = &blkg->stats;
  237. if (blkio_blkg_idling(stats)) {
  238. now = sched_clock();
  239. if (time_after64(now, stats->start_idle_time))
  240. stats->idle_time += now - stats->start_idle_time;
  241. blkio_clear_blkg_idling(stats);
  242. }
  243. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  244. }
  245. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  246. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  247. {
  248. unsigned long flags;
  249. struct blkio_group_stats *stats;
  250. spin_lock_irqsave(&blkg->stats_lock, flags);
  251. stats = &blkg->stats;
  252. stats->avg_queue_size_sum +=
  253. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  254. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  255. stats->avg_queue_size_samples++;
  256. blkio_update_group_wait_time(stats);
  257. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  258. }
  259. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  260. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  261. {
  262. unsigned long flags;
  263. struct blkio_group_stats *stats;
  264. spin_lock_irqsave(&blkg->stats_lock, flags);
  265. stats = &blkg->stats;
  266. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  267. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  268. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  269. return;
  270. }
  271. /*
  272. * group is already marked empty. This can happen if cfqq got new
  273. * request in parent group and moved to this group while being added
  274. * to service tree. Just ignore the event and move on.
  275. */
  276. if(blkio_blkg_empty(stats)) {
  277. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  278. return;
  279. }
  280. stats->start_empty_time = sched_clock();
  281. blkio_mark_blkg_empty(stats);
  282. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  283. }
  284. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  285. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  286. unsigned long dequeue)
  287. {
  288. blkg->stats.dequeue += dequeue;
  289. }
  290. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  291. #else
  292. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  293. struct blkio_group *curr_blkg) {}
  294. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  295. #endif
  296. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  297. struct blkio_group *curr_blkg, bool direction,
  298. bool sync)
  299. {
  300. unsigned long flags;
  301. spin_lock_irqsave(&blkg->stats_lock, flags);
  302. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  303. sync);
  304. blkio_end_empty_time(&blkg->stats);
  305. blkio_set_start_group_wait_time(blkg, curr_blkg);
  306. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  307. }
  308. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  309. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  310. bool direction, bool sync)
  311. {
  312. unsigned long flags;
  313. spin_lock_irqsave(&blkg->stats_lock, flags);
  314. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  315. direction, sync);
  316. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  317. }
  318. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  319. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
  320. {
  321. unsigned long flags;
  322. spin_lock_irqsave(&blkg->stats_lock, flags);
  323. blkg->stats.time += time;
  324. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  325. }
  326. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  327. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  328. uint64_t bytes, bool direction, bool sync)
  329. {
  330. struct blkio_group_stats *stats;
  331. unsigned long flags;
  332. spin_lock_irqsave(&blkg->stats_lock, flags);
  333. stats = &blkg->stats;
  334. stats->sectors += bytes >> 9;
  335. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
  336. sync);
  337. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
  338. direction, sync);
  339. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  340. }
  341. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  342. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  343. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  344. {
  345. struct blkio_group_stats *stats;
  346. unsigned long flags;
  347. unsigned long long now = sched_clock();
  348. spin_lock_irqsave(&blkg->stats_lock, flags);
  349. stats = &blkg->stats;
  350. if (time_after64(now, io_start_time))
  351. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  352. now - io_start_time, direction, sync);
  353. if (time_after64(io_start_time, start_time))
  354. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  355. io_start_time - start_time, direction, sync);
  356. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  357. }
  358. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  359. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  360. bool sync)
  361. {
  362. unsigned long flags;
  363. spin_lock_irqsave(&blkg->stats_lock, flags);
  364. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
  365. sync);
  366. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  367. }
  368. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  369. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  370. struct blkio_group *blkg, void *key, dev_t dev,
  371. enum blkio_policy_id plid)
  372. {
  373. unsigned long flags;
  374. spin_lock_irqsave(&blkcg->lock, flags);
  375. spin_lock_init(&blkg->stats_lock);
  376. rcu_assign_pointer(blkg->key, key);
  377. blkg->blkcg_id = css_id(&blkcg->css);
  378. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  379. blkg->plid = plid;
  380. spin_unlock_irqrestore(&blkcg->lock, flags);
  381. /* Need to take css reference ? */
  382. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  383. blkg->dev = dev;
  384. }
  385. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  386. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  387. {
  388. hlist_del_init_rcu(&blkg->blkcg_node);
  389. blkg->blkcg_id = 0;
  390. }
  391. /*
  392. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  393. * indicating that blk_group was unhashed by the time we got to it.
  394. */
  395. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  396. {
  397. struct blkio_cgroup *blkcg;
  398. unsigned long flags;
  399. struct cgroup_subsys_state *css;
  400. int ret = 1;
  401. rcu_read_lock();
  402. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  403. if (css) {
  404. blkcg = container_of(css, struct blkio_cgroup, css);
  405. spin_lock_irqsave(&blkcg->lock, flags);
  406. if (!hlist_unhashed(&blkg->blkcg_node)) {
  407. __blkiocg_del_blkio_group(blkg);
  408. ret = 0;
  409. }
  410. spin_unlock_irqrestore(&blkcg->lock, flags);
  411. }
  412. rcu_read_unlock();
  413. return ret;
  414. }
  415. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  416. /* called under rcu_read_lock(). */
  417. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  418. {
  419. struct blkio_group *blkg;
  420. struct hlist_node *n;
  421. void *__key;
  422. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  423. __key = blkg->key;
  424. if (__key == key)
  425. return blkg;
  426. }
  427. return NULL;
  428. }
  429. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  430. static int
  431. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  432. {
  433. struct blkio_cgroup *blkcg;
  434. struct blkio_group *blkg;
  435. struct blkio_group_stats *stats;
  436. struct hlist_node *n;
  437. uint64_t queued[BLKIO_STAT_TOTAL];
  438. int i;
  439. #ifdef CONFIG_DEBUG_BLK_CGROUP
  440. bool idling, waiting, empty;
  441. unsigned long long now = sched_clock();
  442. #endif
  443. blkcg = cgroup_to_blkio_cgroup(cgroup);
  444. spin_lock_irq(&blkcg->lock);
  445. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  446. spin_lock(&blkg->stats_lock);
  447. stats = &blkg->stats;
  448. #ifdef CONFIG_DEBUG_BLK_CGROUP
  449. idling = blkio_blkg_idling(stats);
  450. waiting = blkio_blkg_waiting(stats);
  451. empty = blkio_blkg_empty(stats);
  452. #endif
  453. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  454. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  455. memset(stats, 0, sizeof(struct blkio_group_stats));
  456. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  457. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  458. #ifdef CONFIG_DEBUG_BLK_CGROUP
  459. if (idling) {
  460. blkio_mark_blkg_idling(stats);
  461. stats->start_idle_time = now;
  462. }
  463. if (waiting) {
  464. blkio_mark_blkg_waiting(stats);
  465. stats->start_group_wait_time = now;
  466. }
  467. if (empty) {
  468. blkio_mark_blkg_empty(stats);
  469. stats->start_empty_time = now;
  470. }
  471. #endif
  472. spin_unlock(&blkg->stats_lock);
  473. }
  474. spin_unlock_irq(&blkcg->lock);
  475. return 0;
  476. }
  477. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  478. int chars_left, bool diskname_only)
  479. {
  480. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  481. chars_left -= strlen(str);
  482. if (chars_left <= 0) {
  483. printk(KERN_WARNING
  484. "Possibly incorrect cgroup stat display format");
  485. return;
  486. }
  487. if (diskname_only)
  488. return;
  489. switch (type) {
  490. case BLKIO_STAT_READ:
  491. strlcat(str, " Read", chars_left);
  492. break;
  493. case BLKIO_STAT_WRITE:
  494. strlcat(str, " Write", chars_left);
  495. break;
  496. case BLKIO_STAT_SYNC:
  497. strlcat(str, " Sync", chars_left);
  498. break;
  499. case BLKIO_STAT_ASYNC:
  500. strlcat(str, " Async", chars_left);
  501. break;
  502. case BLKIO_STAT_TOTAL:
  503. strlcat(str, " Total", chars_left);
  504. break;
  505. default:
  506. strlcat(str, " Invalid", chars_left);
  507. }
  508. }
  509. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  510. struct cgroup_map_cb *cb, dev_t dev)
  511. {
  512. blkio_get_key_name(0, dev, str, chars_left, true);
  513. cb->fill(cb, str, val);
  514. return val;
  515. }
  516. /* This should be called with blkg->stats_lock held */
  517. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  518. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  519. {
  520. uint64_t disk_total;
  521. char key_str[MAX_KEY_LEN];
  522. enum stat_sub_type sub_type;
  523. if (type == BLKIO_STAT_TIME)
  524. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  525. blkg->stats.time, cb, dev);
  526. if (type == BLKIO_STAT_SECTORS)
  527. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  528. blkg->stats.sectors, cb, dev);
  529. #ifdef CONFIG_DEBUG_BLK_CGROUP
  530. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  531. uint64_t sum = blkg->stats.avg_queue_size_sum;
  532. uint64_t samples = blkg->stats.avg_queue_size_samples;
  533. if (samples)
  534. do_div(sum, samples);
  535. else
  536. sum = 0;
  537. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  538. }
  539. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  540. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  541. blkg->stats.group_wait_time, cb, dev);
  542. if (type == BLKIO_STAT_IDLE_TIME)
  543. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  544. blkg->stats.idle_time, cb, dev);
  545. if (type == BLKIO_STAT_EMPTY_TIME)
  546. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  547. blkg->stats.empty_time, cb, dev);
  548. if (type == BLKIO_STAT_DEQUEUE)
  549. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  550. blkg->stats.dequeue, cb, dev);
  551. #endif
  552. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  553. sub_type++) {
  554. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  555. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  556. }
  557. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  558. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  559. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  560. cb->fill(cb, key_str, disk_total);
  561. return disk_total;
  562. }
  563. static int blkio_check_dev_num(dev_t dev)
  564. {
  565. int part = 0;
  566. struct gendisk *disk;
  567. disk = get_gendisk(dev, &part);
  568. if (!disk || part)
  569. return -ENODEV;
  570. return 0;
  571. }
  572. static int blkio_policy_parse_and_set(char *buf,
  573. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  574. {
  575. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  576. int ret;
  577. unsigned long major, minor, temp;
  578. int i = 0;
  579. dev_t dev;
  580. u64 bps, iops;
  581. memset(s, 0, sizeof(s));
  582. while ((p = strsep(&buf, " ")) != NULL) {
  583. if (!*p)
  584. continue;
  585. s[i++] = p;
  586. /* Prevent from inputing too many things */
  587. if (i == 3)
  588. break;
  589. }
  590. if (i != 2)
  591. return -EINVAL;
  592. p = strsep(&s[0], ":");
  593. if (p != NULL)
  594. major_s = p;
  595. else
  596. return -EINVAL;
  597. minor_s = s[0];
  598. if (!minor_s)
  599. return -EINVAL;
  600. ret = strict_strtoul(major_s, 10, &major);
  601. if (ret)
  602. return -EINVAL;
  603. ret = strict_strtoul(minor_s, 10, &minor);
  604. if (ret)
  605. return -EINVAL;
  606. dev = MKDEV(major, minor);
  607. ret = blkio_check_dev_num(dev);
  608. if (ret)
  609. return ret;
  610. newpn->dev = dev;
  611. if (s[1] == NULL)
  612. return -EINVAL;
  613. switch (plid) {
  614. case BLKIO_POLICY_PROP:
  615. ret = strict_strtoul(s[1], 10, &temp);
  616. if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  617. temp > BLKIO_WEIGHT_MAX)
  618. return -EINVAL;
  619. newpn->plid = plid;
  620. newpn->fileid = fileid;
  621. newpn->val.weight = temp;
  622. break;
  623. case BLKIO_POLICY_THROTL:
  624. switch(fileid) {
  625. case BLKIO_THROTL_read_bps_device:
  626. case BLKIO_THROTL_write_bps_device:
  627. ret = strict_strtoull(s[1], 10, &bps);
  628. if (ret)
  629. return -EINVAL;
  630. newpn->plid = plid;
  631. newpn->fileid = fileid;
  632. newpn->val.bps = bps;
  633. break;
  634. case BLKIO_THROTL_read_iops_device:
  635. case BLKIO_THROTL_write_iops_device:
  636. ret = strict_strtoull(s[1], 10, &iops);
  637. if (ret)
  638. return -EINVAL;
  639. if (iops > THROTL_IOPS_MAX)
  640. return -EINVAL;
  641. newpn->plid = plid;
  642. newpn->fileid = fileid;
  643. newpn->val.iops = (unsigned int)iops;
  644. break;
  645. }
  646. break;
  647. default:
  648. BUG();
  649. }
  650. return 0;
  651. }
  652. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  653. dev_t dev)
  654. {
  655. struct blkio_policy_node *pn;
  656. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  657. BLKIO_PROP_weight_device);
  658. if (pn)
  659. return pn->val.weight;
  660. else
  661. return blkcg->weight;
  662. }
  663. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  664. uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
  665. {
  666. struct blkio_policy_node *pn;
  667. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  668. BLKIO_THROTL_read_bps_device);
  669. if (pn)
  670. return pn->val.bps;
  671. else
  672. return -1;
  673. }
  674. uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
  675. {
  676. struct blkio_policy_node *pn;
  677. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  678. BLKIO_THROTL_write_bps_device);
  679. if (pn)
  680. return pn->val.bps;
  681. else
  682. return -1;
  683. }
  684. unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
  685. {
  686. struct blkio_policy_node *pn;
  687. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  688. BLKIO_THROTL_read_iops_device);
  689. if (pn)
  690. return pn->val.iops;
  691. else
  692. return -1;
  693. }
  694. unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
  695. {
  696. struct blkio_policy_node *pn;
  697. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  698. BLKIO_THROTL_write_iops_device);
  699. if (pn)
  700. return pn->val.iops;
  701. else
  702. return -1;
  703. }
  704. /* Checks whether user asked for deleting a policy rule */
  705. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  706. {
  707. switch(pn->plid) {
  708. case BLKIO_POLICY_PROP:
  709. if (pn->val.weight == 0)
  710. return 1;
  711. break;
  712. case BLKIO_POLICY_THROTL:
  713. switch(pn->fileid) {
  714. case BLKIO_THROTL_read_bps_device:
  715. case BLKIO_THROTL_write_bps_device:
  716. if (pn->val.bps == 0)
  717. return 1;
  718. break;
  719. case BLKIO_THROTL_read_iops_device:
  720. case BLKIO_THROTL_write_iops_device:
  721. if (pn->val.iops == 0)
  722. return 1;
  723. }
  724. break;
  725. default:
  726. BUG();
  727. }
  728. return 0;
  729. }
  730. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  731. struct blkio_policy_node *newpn)
  732. {
  733. switch(oldpn->plid) {
  734. case BLKIO_POLICY_PROP:
  735. oldpn->val.weight = newpn->val.weight;
  736. break;
  737. case BLKIO_POLICY_THROTL:
  738. switch(newpn->fileid) {
  739. case BLKIO_THROTL_read_bps_device:
  740. case BLKIO_THROTL_write_bps_device:
  741. oldpn->val.bps = newpn->val.bps;
  742. break;
  743. case BLKIO_THROTL_read_iops_device:
  744. case BLKIO_THROTL_write_iops_device:
  745. oldpn->val.iops = newpn->val.iops;
  746. }
  747. break;
  748. default:
  749. BUG();
  750. }
  751. }
  752. /*
  753. * Some rules/values in blkg have changed. Propogate those to respective
  754. * policies.
  755. */
  756. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  757. struct blkio_group *blkg, struct blkio_policy_node *pn)
  758. {
  759. unsigned int weight, iops;
  760. u64 bps;
  761. switch(pn->plid) {
  762. case BLKIO_POLICY_PROP:
  763. weight = pn->val.weight ? pn->val.weight :
  764. blkcg->weight;
  765. blkio_update_group_weight(blkg, weight);
  766. break;
  767. case BLKIO_POLICY_THROTL:
  768. switch(pn->fileid) {
  769. case BLKIO_THROTL_read_bps_device:
  770. case BLKIO_THROTL_write_bps_device:
  771. bps = pn->val.bps ? pn->val.bps : (-1);
  772. blkio_update_group_bps(blkg, bps, pn->fileid);
  773. break;
  774. case BLKIO_THROTL_read_iops_device:
  775. case BLKIO_THROTL_write_iops_device:
  776. iops = pn->val.iops ? pn->val.iops : (-1);
  777. blkio_update_group_iops(blkg, iops, pn->fileid);
  778. break;
  779. }
  780. break;
  781. default:
  782. BUG();
  783. }
  784. }
  785. /*
  786. * A policy node rule has been updated. Propogate this update to all the
  787. * block groups which might be affected by this update.
  788. */
  789. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  790. struct blkio_policy_node *pn)
  791. {
  792. struct blkio_group *blkg;
  793. struct hlist_node *n;
  794. spin_lock(&blkio_list_lock);
  795. spin_lock_irq(&blkcg->lock);
  796. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  797. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  798. continue;
  799. blkio_update_blkg_policy(blkcg, blkg, pn);
  800. }
  801. spin_unlock_irq(&blkcg->lock);
  802. spin_unlock(&blkio_list_lock);
  803. }
  804. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  805. const char *buffer)
  806. {
  807. int ret = 0;
  808. char *buf;
  809. struct blkio_policy_node *newpn, *pn;
  810. struct blkio_cgroup *blkcg;
  811. int keep_newpn = 0;
  812. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  813. int fileid = BLKIOFILE_ATTR(cft->private);
  814. buf = kstrdup(buffer, GFP_KERNEL);
  815. if (!buf)
  816. return -ENOMEM;
  817. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  818. if (!newpn) {
  819. ret = -ENOMEM;
  820. goto free_buf;
  821. }
  822. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  823. if (ret)
  824. goto free_newpn;
  825. blkcg = cgroup_to_blkio_cgroup(cgrp);
  826. spin_lock_irq(&blkcg->lock);
  827. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  828. if (!pn) {
  829. if (!blkio_delete_rule_command(newpn)) {
  830. blkio_policy_insert_node(blkcg, newpn);
  831. keep_newpn = 1;
  832. }
  833. spin_unlock_irq(&blkcg->lock);
  834. goto update_io_group;
  835. }
  836. if (blkio_delete_rule_command(newpn)) {
  837. blkio_policy_delete_node(pn);
  838. spin_unlock_irq(&blkcg->lock);
  839. goto update_io_group;
  840. }
  841. spin_unlock_irq(&blkcg->lock);
  842. blkio_update_policy_rule(pn, newpn);
  843. update_io_group:
  844. blkio_update_policy_node_blkg(blkcg, newpn);
  845. free_newpn:
  846. if (!keep_newpn)
  847. kfree(newpn);
  848. free_buf:
  849. kfree(buf);
  850. return ret;
  851. }
  852. static void
  853. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  854. {
  855. switch(pn->plid) {
  856. case BLKIO_POLICY_PROP:
  857. if (pn->fileid == BLKIO_PROP_weight_device)
  858. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  859. MINOR(pn->dev), pn->val.weight);
  860. break;
  861. case BLKIO_POLICY_THROTL:
  862. switch(pn->fileid) {
  863. case BLKIO_THROTL_read_bps_device:
  864. case BLKIO_THROTL_write_bps_device:
  865. seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
  866. MINOR(pn->dev), pn->val.bps);
  867. break;
  868. case BLKIO_THROTL_read_iops_device:
  869. case BLKIO_THROTL_write_iops_device:
  870. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  871. MINOR(pn->dev), pn->val.iops);
  872. break;
  873. }
  874. break;
  875. default:
  876. BUG();
  877. }
  878. }
  879. /* cgroup files which read their data from policy nodes end up here */
  880. static void blkio_read_policy_node_files(struct cftype *cft,
  881. struct blkio_cgroup *blkcg, struct seq_file *m)
  882. {
  883. struct blkio_policy_node *pn;
  884. if (!list_empty(&blkcg->policy_list)) {
  885. spin_lock_irq(&blkcg->lock);
  886. list_for_each_entry(pn, &blkcg->policy_list, node) {
  887. if (!pn_matches_cftype(cft, pn))
  888. continue;
  889. blkio_print_policy_node(m, pn);
  890. }
  891. spin_unlock_irq(&blkcg->lock);
  892. }
  893. }
  894. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  895. struct seq_file *m)
  896. {
  897. struct blkio_cgroup *blkcg;
  898. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  899. int name = BLKIOFILE_ATTR(cft->private);
  900. blkcg = cgroup_to_blkio_cgroup(cgrp);
  901. switch(plid) {
  902. case BLKIO_POLICY_PROP:
  903. switch(name) {
  904. case BLKIO_PROP_weight_device:
  905. blkio_read_policy_node_files(cft, blkcg, m);
  906. return 0;
  907. default:
  908. BUG();
  909. }
  910. break;
  911. case BLKIO_POLICY_THROTL:
  912. switch(name){
  913. case BLKIO_THROTL_read_bps_device:
  914. case BLKIO_THROTL_write_bps_device:
  915. case BLKIO_THROTL_read_iops_device:
  916. case BLKIO_THROTL_write_iops_device:
  917. blkio_read_policy_node_files(cft, blkcg, m);
  918. return 0;
  919. default:
  920. BUG();
  921. }
  922. break;
  923. default:
  924. BUG();
  925. }
  926. return 0;
  927. }
  928. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  929. struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
  930. bool show_total)
  931. {
  932. struct blkio_group *blkg;
  933. struct hlist_node *n;
  934. uint64_t cgroup_total = 0;
  935. rcu_read_lock();
  936. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  937. if (blkg->dev) {
  938. if (!cftype_blkg_same_policy(cft, blkg))
  939. continue;
  940. spin_lock_irq(&blkg->stats_lock);
  941. cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
  942. type);
  943. spin_unlock_irq(&blkg->stats_lock);
  944. }
  945. }
  946. if (show_total)
  947. cb->fill(cb, "Total", cgroup_total);
  948. rcu_read_unlock();
  949. return 0;
  950. }
  951. /* All map kind of cgroup file get serviced by this function */
  952. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  953. struct cgroup_map_cb *cb)
  954. {
  955. struct blkio_cgroup *blkcg;
  956. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  957. int name = BLKIOFILE_ATTR(cft->private);
  958. blkcg = cgroup_to_blkio_cgroup(cgrp);
  959. switch(plid) {
  960. case BLKIO_POLICY_PROP:
  961. switch(name) {
  962. case BLKIO_PROP_time:
  963. return blkio_read_blkg_stats(blkcg, cft, cb,
  964. BLKIO_STAT_TIME, 0);
  965. case BLKIO_PROP_sectors:
  966. return blkio_read_blkg_stats(blkcg, cft, cb,
  967. BLKIO_STAT_SECTORS, 0);
  968. case BLKIO_PROP_io_service_bytes:
  969. return blkio_read_blkg_stats(blkcg, cft, cb,
  970. BLKIO_STAT_SERVICE_BYTES, 1);
  971. case BLKIO_PROP_io_serviced:
  972. return blkio_read_blkg_stats(blkcg, cft, cb,
  973. BLKIO_STAT_SERVICED, 1);
  974. case BLKIO_PROP_io_service_time:
  975. return blkio_read_blkg_stats(blkcg, cft, cb,
  976. BLKIO_STAT_SERVICE_TIME, 1);
  977. case BLKIO_PROP_io_wait_time:
  978. return blkio_read_blkg_stats(blkcg, cft, cb,
  979. BLKIO_STAT_WAIT_TIME, 1);
  980. case BLKIO_PROP_io_merged:
  981. return blkio_read_blkg_stats(blkcg, cft, cb,
  982. BLKIO_STAT_MERGED, 1);
  983. case BLKIO_PROP_io_queued:
  984. return blkio_read_blkg_stats(blkcg, cft, cb,
  985. BLKIO_STAT_QUEUED, 1);
  986. #ifdef CONFIG_DEBUG_BLK_CGROUP
  987. case BLKIO_PROP_dequeue:
  988. return blkio_read_blkg_stats(blkcg, cft, cb,
  989. BLKIO_STAT_DEQUEUE, 0);
  990. case BLKIO_PROP_avg_queue_size:
  991. return blkio_read_blkg_stats(blkcg, cft, cb,
  992. BLKIO_STAT_AVG_QUEUE_SIZE, 0);
  993. case BLKIO_PROP_group_wait_time:
  994. return blkio_read_blkg_stats(blkcg, cft, cb,
  995. BLKIO_STAT_GROUP_WAIT_TIME, 0);
  996. case BLKIO_PROP_idle_time:
  997. return blkio_read_blkg_stats(blkcg, cft, cb,
  998. BLKIO_STAT_IDLE_TIME, 0);
  999. case BLKIO_PROP_empty_time:
  1000. return blkio_read_blkg_stats(blkcg, cft, cb,
  1001. BLKIO_STAT_EMPTY_TIME, 0);
  1002. #endif
  1003. default:
  1004. BUG();
  1005. }
  1006. break;
  1007. case BLKIO_POLICY_THROTL:
  1008. switch(name){
  1009. case BLKIO_THROTL_io_service_bytes:
  1010. return blkio_read_blkg_stats(blkcg, cft, cb,
  1011. BLKIO_STAT_SERVICE_BYTES, 1);
  1012. case BLKIO_THROTL_io_serviced:
  1013. return blkio_read_blkg_stats(blkcg, cft, cb,
  1014. BLKIO_STAT_SERVICED, 1);
  1015. default:
  1016. BUG();
  1017. }
  1018. break;
  1019. default:
  1020. BUG();
  1021. }
  1022. return 0;
  1023. }
  1024. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  1025. {
  1026. struct blkio_group *blkg;
  1027. struct hlist_node *n;
  1028. struct blkio_policy_node *pn;
  1029. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1030. return -EINVAL;
  1031. spin_lock(&blkio_list_lock);
  1032. spin_lock_irq(&blkcg->lock);
  1033. blkcg->weight = (unsigned int)val;
  1034. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1035. pn = blkio_policy_search_node(blkcg, blkg->dev,
  1036. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  1037. if (pn)
  1038. continue;
  1039. blkio_update_group_weight(blkg, blkcg->weight);
  1040. }
  1041. spin_unlock_irq(&blkcg->lock);
  1042. spin_unlock(&blkio_list_lock);
  1043. return 0;
  1044. }
  1045. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1046. struct blkio_cgroup *blkcg;
  1047. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1048. int name = BLKIOFILE_ATTR(cft->private);
  1049. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1050. switch(plid) {
  1051. case BLKIO_POLICY_PROP:
  1052. switch(name) {
  1053. case BLKIO_PROP_weight:
  1054. return (u64)blkcg->weight;
  1055. }
  1056. break;
  1057. default:
  1058. BUG();
  1059. }
  1060. return 0;
  1061. }
  1062. static int
  1063. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1064. {
  1065. struct blkio_cgroup *blkcg;
  1066. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1067. int name = BLKIOFILE_ATTR(cft->private);
  1068. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1069. switch(plid) {
  1070. case BLKIO_POLICY_PROP:
  1071. switch(name) {
  1072. case BLKIO_PROP_weight:
  1073. return blkio_weight_write(blkcg, val);
  1074. }
  1075. break;
  1076. default:
  1077. BUG();
  1078. }
  1079. return 0;
  1080. }
  1081. struct cftype blkio_files[] = {
  1082. {
  1083. .name = "weight_device",
  1084. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1085. BLKIO_PROP_weight_device),
  1086. .read_seq_string = blkiocg_file_read,
  1087. .write_string = blkiocg_file_write,
  1088. .max_write_len = 256,
  1089. },
  1090. {
  1091. .name = "weight",
  1092. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1093. BLKIO_PROP_weight),
  1094. .read_u64 = blkiocg_file_read_u64,
  1095. .write_u64 = blkiocg_file_write_u64,
  1096. },
  1097. {
  1098. .name = "time",
  1099. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1100. BLKIO_PROP_time),
  1101. .read_map = blkiocg_file_read_map,
  1102. },
  1103. {
  1104. .name = "sectors",
  1105. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1106. BLKIO_PROP_sectors),
  1107. .read_map = blkiocg_file_read_map,
  1108. },
  1109. {
  1110. .name = "io_service_bytes",
  1111. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1112. BLKIO_PROP_io_service_bytes),
  1113. .read_map = blkiocg_file_read_map,
  1114. },
  1115. {
  1116. .name = "io_serviced",
  1117. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1118. BLKIO_PROP_io_serviced),
  1119. .read_map = blkiocg_file_read_map,
  1120. },
  1121. {
  1122. .name = "io_service_time",
  1123. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1124. BLKIO_PROP_io_service_time),
  1125. .read_map = blkiocg_file_read_map,
  1126. },
  1127. {
  1128. .name = "io_wait_time",
  1129. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1130. BLKIO_PROP_io_wait_time),
  1131. .read_map = blkiocg_file_read_map,
  1132. },
  1133. {
  1134. .name = "io_merged",
  1135. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1136. BLKIO_PROP_io_merged),
  1137. .read_map = blkiocg_file_read_map,
  1138. },
  1139. {
  1140. .name = "io_queued",
  1141. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1142. BLKIO_PROP_io_queued),
  1143. .read_map = blkiocg_file_read_map,
  1144. },
  1145. {
  1146. .name = "reset_stats",
  1147. .write_u64 = blkiocg_reset_stats,
  1148. },
  1149. #ifdef CONFIG_BLK_DEV_THROTTLING
  1150. {
  1151. .name = "throttle.read_bps_device",
  1152. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1153. BLKIO_THROTL_read_bps_device),
  1154. .read_seq_string = blkiocg_file_read,
  1155. .write_string = blkiocg_file_write,
  1156. .max_write_len = 256,
  1157. },
  1158. {
  1159. .name = "throttle.write_bps_device",
  1160. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1161. BLKIO_THROTL_write_bps_device),
  1162. .read_seq_string = blkiocg_file_read,
  1163. .write_string = blkiocg_file_write,
  1164. .max_write_len = 256,
  1165. },
  1166. {
  1167. .name = "throttle.read_iops_device",
  1168. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1169. BLKIO_THROTL_read_iops_device),
  1170. .read_seq_string = blkiocg_file_read,
  1171. .write_string = blkiocg_file_write,
  1172. .max_write_len = 256,
  1173. },
  1174. {
  1175. .name = "throttle.write_iops_device",
  1176. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1177. BLKIO_THROTL_write_iops_device),
  1178. .read_seq_string = blkiocg_file_read,
  1179. .write_string = blkiocg_file_write,
  1180. .max_write_len = 256,
  1181. },
  1182. {
  1183. .name = "throttle.io_service_bytes",
  1184. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1185. BLKIO_THROTL_io_service_bytes),
  1186. .read_map = blkiocg_file_read_map,
  1187. },
  1188. {
  1189. .name = "throttle.io_serviced",
  1190. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1191. BLKIO_THROTL_io_serviced),
  1192. .read_map = blkiocg_file_read_map,
  1193. },
  1194. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1195. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1196. {
  1197. .name = "avg_queue_size",
  1198. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1199. BLKIO_PROP_avg_queue_size),
  1200. .read_map = blkiocg_file_read_map,
  1201. },
  1202. {
  1203. .name = "group_wait_time",
  1204. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1205. BLKIO_PROP_group_wait_time),
  1206. .read_map = blkiocg_file_read_map,
  1207. },
  1208. {
  1209. .name = "idle_time",
  1210. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1211. BLKIO_PROP_idle_time),
  1212. .read_map = blkiocg_file_read_map,
  1213. },
  1214. {
  1215. .name = "empty_time",
  1216. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1217. BLKIO_PROP_empty_time),
  1218. .read_map = blkiocg_file_read_map,
  1219. },
  1220. {
  1221. .name = "dequeue",
  1222. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1223. BLKIO_PROP_dequeue),
  1224. .read_map = blkiocg_file_read_map,
  1225. },
  1226. #endif
  1227. };
  1228. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1229. {
  1230. return cgroup_add_files(cgroup, subsys, blkio_files,
  1231. ARRAY_SIZE(blkio_files));
  1232. }
  1233. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1234. {
  1235. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1236. unsigned long flags;
  1237. struct blkio_group *blkg;
  1238. void *key;
  1239. struct blkio_policy_type *blkiop;
  1240. struct blkio_policy_node *pn, *pntmp;
  1241. rcu_read_lock();
  1242. do {
  1243. spin_lock_irqsave(&blkcg->lock, flags);
  1244. if (hlist_empty(&blkcg->blkg_list)) {
  1245. spin_unlock_irqrestore(&blkcg->lock, flags);
  1246. break;
  1247. }
  1248. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1249. blkcg_node);
  1250. key = rcu_dereference(blkg->key);
  1251. __blkiocg_del_blkio_group(blkg);
  1252. spin_unlock_irqrestore(&blkcg->lock, flags);
  1253. /*
  1254. * This blkio_group is being unlinked as associated cgroup is
  1255. * going away. Let all the IO controlling policies know about
  1256. * this event.
  1257. */
  1258. spin_lock(&blkio_list_lock);
  1259. list_for_each_entry(blkiop, &blkio_list, list) {
  1260. if (blkiop->plid != blkg->plid)
  1261. continue;
  1262. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1263. }
  1264. spin_unlock(&blkio_list_lock);
  1265. } while (1);
  1266. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1267. blkio_policy_delete_node(pn);
  1268. kfree(pn);
  1269. }
  1270. free_css_id(&blkio_subsys, &blkcg->css);
  1271. rcu_read_unlock();
  1272. if (blkcg != &blkio_root_cgroup)
  1273. kfree(blkcg);
  1274. }
  1275. static struct cgroup_subsys_state *
  1276. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1277. {
  1278. struct blkio_cgroup *blkcg;
  1279. struct cgroup *parent = cgroup->parent;
  1280. if (!parent) {
  1281. blkcg = &blkio_root_cgroup;
  1282. goto done;
  1283. }
  1284. /* Currently we do not support hierarchy deeper than two level (0,1) */
  1285. if (parent != cgroup->top_cgroup)
  1286. return ERR_PTR(-EPERM);
  1287. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1288. if (!blkcg)
  1289. return ERR_PTR(-ENOMEM);
  1290. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1291. done:
  1292. spin_lock_init(&blkcg->lock);
  1293. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1294. INIT_LIST_HEAD(&blkcg->policy_list);
  1295. return &blkcg->css;
  1296. }
  1297. /*
  1298. * We cannot support shared io contexts, as we have no mean to support
  1299. * two tasks with the same ioc in two different groups without major rework
  1300. * of the main cic data structures. For now we allow a task to change
  1301. * its cgroup only if it's the only owner of its ioc.
  1302. */
  1303. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  1304. struct cgroup *cgroup, struct task_struct *tsk,
  1305. bool threadgroup)
  1306. {
  1307. struct io_context *ioc;
  1308. int ret = 0;
  1309. /* task_lock() is needed to avoid races with exit_io_context() */
  1310. task_lock(tsk);
  1311. ioc = tsk->io_context;
  1312. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1313. ret = -EINVAL;
  1314. task_unlock(tsk);
  1315. return ret;
  1316. }
  1317. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  1318. struct cgroup *prev, struct task_struct *tsk,
  1319. bool threadgroup)
  1320. {
  1321. struct io_context *ioc;
  1322. task_lock(tsk);
  1323. ioc = tsk->io_context;
  1324. if (ioc)
  1325. ioc->cgroup_changed = 1;
  1326. task_unlock(tsk);
  1327. }
  1328. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1329. {
  1330. spin_lock(&blkio_list_lock);
  1331. list_add_tail(&blkiop->list, &blkio_list);
  1332. spin_unlock(&blkio_list_lock);
  1333. }
  1334. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1335. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1336. {
  1337. spin_lock(&blkio_list_lock);
  1338. list_del_init(&blkiop->list);
  1339. spin_unlock(&blkio_list_lock);
  1340. }
  1341. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  1342. static int __init init_cgroup_blkio(void)
  1343. {
  1344. return cgroup_load_subsys(&blkio_subsys);
  1345. }
  1346. static void __exit exit_cgroup_blkio(void)
  1347. {
  1348. cgroup_unload_subsys(&blkio_subsys);
  1349. }
  1350. module_init(init_cgroup_blkio);
  1351. module_exit(exit_cgroup_blkio);
  1352. MODULE_LICENSE("GPL");