blk-cgroup.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include "blk-cgroup.h"
  21. #include <linux/genhd.h>
  22. #define MAX_KEY_LEN 100
  23. static DEFINE_SPINLOCK(blkio_list_lock);
  24. static LIST_HEAD(blkio_list);
  25. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  26. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  27. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  28. struct cgroup *);
  29. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  30. struct task_struct *, bool);
  31. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  32. struct cgroup *, struct task_struct *, bool);
  33. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  34. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  35. /* for encoding cft->private value on file */
  36. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  37. /* What policy owns the file, proportional or throttle */
  38. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  39. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  40. struct cgroup_subsys blkio_subsys = {
  41. .name = "blkio",
  42. .create = blkiocg_create,
  43. .can_attach = blkiocg_can_attach,
  44. .attach = blkiocg_attach,
  45. .destroy = blkiocg_destroy,
  46. .populate = blkiocg_populate,
  47. #ifdef CONFIG_BLK_CGROUP
  48. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  49. .subsys_id = blkio_subsys_id,
  50. #endif
  51. .use_id = 1,
  52. .module = THIS_MODULE,
  53. };
  54. EXPORT_SYMBOL_GPL(blkio_subsys);
  55. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  56. struct blkio_policy_node *pn)
  57. {
  58. list_add(&pn->node, &blkcg->policy_list);
  59. }
  60. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  61. struct blkio_group *blkg)
  62. {
  63. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  64. if (blkg->plid == plid)
  65. return 1;
  66. return 0;
  67. }
  68. /* Determines if policy node matches cgroup file being accessed */
  69. static inline bool pn_matches_cftype(struct cftype *cft,
  70. struct blkio_policy_node *pn)
  71. {
  72. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  73. int fileid = BLKIOFILE_ATTR(cft->private);
  74. return (plid == pn->plid && fileid == pn->fileid);
  75. }
  76. /* Must be called with blkcg->lock held */
  77. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  78. {
  79. list_del(&pn->node);
  80. }
  81. /* Must be called with blkcg->lock held */
  82. static struct blkio_policy_node *
  83. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  84. enum blkio_policy_id plid, int fileid)
  85. {
  86. struct blkio_policy_node *pn;
  87. list_for_each_entry(pn, &blkcg->policy_list, node) {
  88. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  89. return pn;
  90. }
  91. return NULL;
  92. }
  93. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  94. {
  95. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  96. struct blkio_cgroup, css);
  97. }
  98. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  99. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  100. {
  101. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  102. struct blkio_cgroup, css);
  103. }
  104. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  105. static inline void
  106. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  107. {
  108. struct blkio_policy_type *blkiop;
  109. list_for_each_entry(blkiop, &blkio_list, list) {
  110. /* If this policy does not own the blkg, do not send updates */
  111. if (blkiop->plid != blkg->plid)
  112. continue;
  113. if (blkiop->ops.blkio_update_group_weight_fn)
  114. blkiop->ops.blkio_update_group_weight_fn(blkg->key,
  115. blkg, weight);
  116. }
  117. }
  118. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  119. int fileid)
  120. {
  121. struct blkio_policy_type *blkiop;
  122. list_for_each_entry(blkiop, &blkio_list, list) {
  123. /* If this policy does not own the blkg, do not send updates */
  124. if (blkiop->plid != blkg->plid)
  125. continue;
  126. if (fileid == BLKIO_THROTL_read_bps_device
  127. && blkiop->ops.blkio_update_group_read_bps_fn)
  128. blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
  129. blkg, bps);
  130. if (fileid == BLKIO_THROTL_write_bps_device
  131. && blkiop->ops.blkio_update_group_write_bps_fn)
  132. blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
  133. blkg, bps);
  134. }
  135. }
  136. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  137. unsigned int iops, int fileid)
  138. {
  139. struct blkio_policy_type *blkiop;
  140. list_for_each_entry(blkiop, &blkio_list, list) {
  141. /* If this policy does not own the blkg, do not send updates */
  142. if (blkiop->plid != blkg->plid)
  143. continue;
  144. if (fileid == BLKIO_THROTL_read_iops_device
  145. && blkiop->ops.blkio_update_group_read_iops_fn)
  146. blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
  147. blkg, iops);
  148. if (fileid == BLKIO_THROTL_write_iops_device
  149. && blkiop->ops.blkio_update_group_write_iops_fn)
  150. blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
  151. blkg,iops);
  152. }
  153. }
  154. /*
  155. * Add to the appropriate stat variable depending on the request type.
  156. * This should be called with the blkg->stats_lock held.
  157. */
  158. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  159. bool sync)
  160. {
  161. if (direction)
  162. stat[BLKIO_STAT_WRITE] += add;
  163. else
  164. stat[BLKIO_STAT_READ] += add;
  165. if (sync)
  166. stat[BLKIO_STAT_SYNC] += add;
  167. else
  168. stat[BLKIO_STAT_ASYNC] += add;
  169. }
  170. /*
  171. * Decrements the appropriate stat variable if non-zero depending on the
  172. * request type. Panics on value being zero.
  173. * This should be called with the blkg->stats_lock held.
  174. */
  175. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  176. {
  177. if (direction) {
  178. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  179. stat[BLKIO_STAT_WRITE]--;
  180. } else {
  181. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  182. stat[BLKIO_STAT_READ]--;
  183. }
  184. if (sync) {
  185. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  186. stat[BLKIO_STAT_SYNC]--;
  187. } else {
  188. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  189. stat[BLKIO_STAT_ASYNC]--;
  190. }
  191. }
  192. #ifdef CONFIG_DEBUG_BLK_CGROUP
  193. /* This should be called with the blkg->stats_lock held. */
  194. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  195. struct blkio_group *curr_blkg)
  196. {
  197. if (blkio_blkg_waiting(&blkg->stats))
  198. return;
  199. if (blkg == curr_blkg)
  200. return;
  201. blkg->stats.start_group_wait_time = sched_clock();
  202. blkio_mark_blkg_waiting(&blkg->stats);
  203. }
  204. /* This should be called with the blkg->stats_lock held. */
  205. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  206. {
  207. unsigned long long now;
  208. if (!blkio_blkg_waiting(stats))
  209. return;
  210. now = sched_clock();
  211. if (time_after64(now, stats->start_group_wait_time))
  212. stats->group_wait_time += now - stats->start_group_wait_time;
  213. blkio_clear_blkg_waiting(stats);
  214. }
  215. /* This should be called with the blkg->stats_lock held. */
  216. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  217. {
  218. unsigned long long now;
  219. if (!blkio_blkg_empty(stats))
  220. return;
  221. now = sched_clock();
  222. if (time_after64(now, stats->start_empty_time))
  223. stats->empty_time += now - stats->start_empty_time;
  224. blkio_clear_blkg_empty(stats);
  225. }
  226. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  227. {
  228. unsigned long flags;
  229. spin_lock_irqsave(&blkg->stats_lock, flags);
  230. BUG_ON(blkio_blkg_idling(&blkg->stats));
  231. blkg->stats.start_idle_time = sched_clock();
  232. blkio_mark_blkg_idling(&blkg->stats);
  233. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  234. }
  235. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  236. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  237. {
  238. unsigned long flags;
  239. unsigned long long now;
  240. struct blkio_group_stats *stats;
  241. spin_lock_irqsave(&blkg->stats_lock, flags);
  242. stats = &blkg->stats;
  243. if (blkio_blkg_idling(stats)) {
  244. now = sched_clock();
  245. if (time_after64(now, stats->start_idle_time))
  246. stats->idle_time += now - stats->start_idle_time;
  247. blkio_clear_blkg_idling(stats);
  248. }
  249. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  250. }
  251. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  252. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  253. {
  254. unsigned long flags;
  255. struct blkio_group_stats *stats;
  256. spin_lock_irqsave(&blkg->stats_lock, flags);
  257. stats = &blkg->stats;
  258. stats->avg_queue_size_sum +=
  259. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  260. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  261. stats->avg_queue_size_samples++;
  262. blkio_update_group_wait_time(stats);
  263. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  264. }
  265. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  266. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  267. {
  268. unsigned long flags;
  269. struct blkio_group_stats *stats;
  270. spin_lock_irqsave(&blkg->stats_lock, flags);
  271. stats = &blkg->stats;
  272. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  273. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  274. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  275. return;
  276. }
  277. /*
  278. * group is already marked empty. This can happen if cfqq got new
  279. * request in parent group and moved to this group while being added
  280. * to service tree. Just ignore the event and move on.
  281. */
  282. if(blkio_blkg_empty(stats)) {
  283. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  284. return;
  285. }
  286. stats->start_empty_time = sched_clock();
  287. blkio_mark_blkg_empty(stats);
  288. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  289. }
  290. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  291. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  292. unsigned long dequeue)
  293. {
  294. blkg->stats.dequeue += dequeue;
  295. }
  296. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  297. #else
  298. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  299. struct blkio_group *curr_blkg) {}
  300. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  301. #endif
  302. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  303. struct blkio_group *curr_blkg, bool direction,
  304. bool sync)
  305. {
  306. unsigned long flags;
  307. spin_lock_irqsave(&blkg->stats_lock, flags);
  308. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  309. sync);
  310. blkio_end_empty_time(&blkg->stats);
  311. blkio_set_start_group_wait_time(blkg, curr_blkg);
  312. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  313. }
  314. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  315. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  316. bool direction, bool sync)
  317. {
  318. unsigned long flags;
  319. spin_lock_irqsave(&blkg->stats_lock, flags);
  320. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  321. direction, sync);
  322. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  323. }
  324. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  325. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  326. unsigned long unaccounted_time)
  327. {
  328. unsigned long flags;
  329. spin_lock_irqsave(&blkg->stats_lock, flags);
  330. blkg->stats.time += time;
  331. #ifdef CONFIG_DEBUG_BLK_CGROUP
  332. blkg->stats.unaccounted_time += unaccounted_time;
  333. #endif
  334. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  335. }
  336. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  337. /*
  338. * should be called under rcu read lock or queue lock to make sure blkg pointer
  339. * is valid.
  340. */
  341. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  342. uint64_t bytes, bool direction, bool sync)
  343. {
  344. struct blkio_group_stats_cpu *stats_cpu;
  345. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  346. stats_cpu->sectors += bytes >> 9;
  347. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  348. 1, direction, sync);
  349. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  350. bytes, direction, sync);
  351. }
  352. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  353. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  354. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  355. {
  356. struct blkio_group_stats *stats;
  357. unsigned long flags;
  358. unsigned long long now = sched_clock();
  359. spin_lock_irqsave(&blkg->stats_lock, flags);
  360. stats = &blkg->stats;
  361. if (time_after64(now, io_start_time))
  362. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  363. now - io_start_time, direction, sync);
  364. if (time_after64(io_start_time, start_time))
  365. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  366. io_start_time - start_time, direction, sync);
  367. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  368. }
  369. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  370. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  371. bool sync)
  372. {
  373. unsigned long flags;
  374. spin_lock_irqsave(&blkg->stats_lock, flags);
  375. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
  376. sync);
  377. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  378. }
  379. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  380. /*
  381. * This function allocates the per cpu stats for blkio_group. Should be called
  382. * from sleepable context as alloc_per_cpu() requires that.
  383. */
  384. int blkio_alloc_blkg_stats(struct blkio_group *blkg)
  385. {
  386. /* Allocate memory for per cpu stats */
  387. blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  388. if (!blkg->stats_cpu)
  389. return -ENOMEM;
  390. return 0;
  391. }
  392. EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
  393. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  394. struct blkio_group *blkg, void *key, dev_t dev,
  395. enum blkio_policy_id plid)
  396. {
  397. unsigned long flags;
  398. spin_lock_irqsave(&blkcg->lock, flags);
  399. spin_lock_init(&blkg->stats_lock);
  400. rcu_assign_pointer(blkg->key, key);
  401. blkg->blkcg_id = css_id(&blkcg->css);
  402. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  403. blkg->plid = plid;
  404. spin_unlock_irqrestore(&blkcg->lock, flags);
  405. /* Need to take css reference ? */
  406. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  407. blkg->dev = dev;
  408. }
  409. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  410. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  411. {
  412. hlist_del_init_rcu(&blkg->blkcg_node);
  413. blkg->blkcg_id = 0;
  414. }
  415. /*
  416. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  417. * indicating that blk_group was unhashed by the time we got to it.
  418. */
  419. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  420. {
  421. struct blkio_cgroup *blkcg;
  422. unsigned long flags;
  423. struct cgroup_subsys_state *css;
  424. int ret = 1;
  425. rcu_read_lock();
  426. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  427. if (css) {
  428. blkcg = container_of(css, struct blkio_cgroup, css);
  429. spin_lock_irqsave(&blkcg->lock, flags);
  430. if (!hlist_unhashed(&blkg->blkcg_node)) {
  431. __blkiocg_del_blkio_group(blkg);
  432. ret = 0;
  433. }
  434. spin_unlock_irqrestore(&blkcg->lock, flags);
  435. }
  436. rcu_read_unlock();
  437. return ret;
  438. }
  439. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  440. /* called under rcu_read_lock(). */
  441. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  442. {
  443. struct blkio_group *blkg;
  444. struct hlist_node *n;
  445. void *__key;
  446. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  447. __key = blkg->key;
  448. if (__key == key)
  449. return blkg;
  450. }
  451. return NULL;
  452. }
  453. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  454. static int
  455. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  456. {
  457. struct blkio_cgroup *blkcg;
  458. struct blkio_group *blkg;
  459. struct blkio_group_stats *stats;
  460. struct hlist_node *n;
  461. uint64_t queued[BLKIO_STAT_TOTAL];
  462. int i;
  463. #ifdef CONFIG_DEBUG_BLK_CGROUP
  464. bool idling, waiting, empty;
  465. unsigned long long now = sched_clock();
  466. #endif
  467. blkcg = cgroup_to_blkio_cgroup(cgroup);
  468. spin_lock_irq(&blkcg->lock);
  469. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  470. spin_lock(&blkg->stats_lock);
  471. stats = &blkg->stats;
  472. #ifdef CONFIG_DEBUG_BLK_CGROUP
  473. idling = blkio_blkg_idling(stats);
  474. waiting = blkio_blkg_waiting(stats);
  475. empty = blkio_blkg_empty(stats);
  476. #endif
  477. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  478. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  479. memset(stats, 0, sizeof(struct blkio_group_stats));
  480. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  481. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  482. #ifdef CONFIG_DEBUG_BLK_CGROUP
  483. if (idling) {
  484. blkio_mark_blkg_idling(stats);
  485. stats->start_idle_time = now;
  486. }
  487. if (waiting) {
  488. blkio_mark_blkg_waiting(stats);
  489. stats->start_group_wait_time = now;
  490. }
  491. if (empty) {
  492. blkio_mark_blkg_empty(stats);
  493. stats->start_empty_time = now;
  494. }
  495. #endif
  496. spin_unlock(&blkg->stats_lock);
  497. }
  498. spin_unlock_irq(&blkcg->lock);
  499. return 0;
  500. }
  501. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  502. int chars_left, bool diskname_only)
  503. {
  504. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  505. chars_left -= strlen(str);
  506. if (chars_left <= 0) {
  507. printk(KERN_WARNING
  508. "Possibly incorrect cgroup stat display format");
  509. return;
  510. }
  511. if (diskname_only)
  512. return;
  513. switch (type) {
  514. case BLKIO_STAT_READ:
  515. strlcat(str, " Read", chars_left);
  516. break;
  517. case BLKIO_STAT_WRITE:
  518. strlcat(str, " Write", chars_left);
  519. break;
  520. case BLKIO_STAT_SYNC:
  521. strlcat(str, " Sync", chars_left);
  522. break;
  523. case BLKIO_STAT_ASYNC:
  524. strlcat(str, " Async", chars_left);
  525. break;
  526. case BLKIO_STAT_TOTAL:
  527. strlcat(str, " Total", chars_left);
  528. break;
  529. default:
  530. strlcat(str, " Invalid", chars_left);
  531. }
  532. }
  533. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  534. struct cgroup_map_cb *cb, dev_t dev)
  535. {
  536. blkio_get_key_name(0, dev, str, chars_left, true);
  537. cb->fill(cb, str, val);
  538. return val;
  539. }
  540. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
  541. enum stat_type_cpu type, enum stat_sub_type sub_type)
  542. {
  543. int cpu;
  544. struct blkio_group_stats_cpu *stats_cpu;
  545. uint64_t val = 0;
  546. for_each_possible_cpu(cpu) {
  547. stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
  548. if (type == BLKIO_STAT_CPU_SECTORS)
  549. val += stats_cpu->sectors;
  550. else
  551. val += stats_cpu->stat_arr_cpu[type][sub_type];
  552. }
  553. return val;
  554. }
  555. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
  556. struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
  557. {
  558. uint64_t disk_total, val;
  559. char key_str[MAX_KEY_LEN];
  560. enum stat_sub_type sub_type;
  561. if (type == BLKIO_STAT_CPU_SECTORS) {
  562. val = blkio_read_stat_cpu(blkg, type, 0);
  563. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
  564. }
  565. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  566. sub_type++) {
  567. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  568. val = blkio_read_stat_cpu(blkg, type, sub_type);
  569. cb->fill(cb, key_str, val);
  570. }
  571. disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
  572. blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
  573. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  574. cb->fill(cb, key_str, disk_total);
  575. return disk_total;
  576. }
  577. /* This should be called with blkg->stats_lock held */
  578. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  579. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  580. {
  581. uint64_t disk_total;
  582. char key_str[MAX_KEY_LEN];
  583. enum stat_sub_type sub_type;
  584. if (type == BLKIO_STAT_TIME)
  585. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  586. blkg->stats.time, cb, dev);
  587. #ifdef CONFIG_DEBUG_BLK_CGROUP
  588. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  589. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  590. blkg->stats.unaccounted_time, cb, dev);
  591. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  592. uint64_t sum = blkg->stats.avg_queue_size_sum;
  593. uint64_t samples = blkg->stats.avg_queue_size_samples;
  594. if (samples)
  595. do_div(sum, samples);
  596. else
  597. sum = 0;
  598. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  599. }
  600. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  601. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  602. blkg->stats.group_wait_time, cb, dev);
  603. if (type == BLKIO_STAT_IDLE_TIME)
  604. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  605. blkg->stats.idle_time, cb, dev);
  606. if (type == BLKIO_STAT_EMPTY_TIME)
  607. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  608. blkg->stats.empty_time, cb, dev);
  609. if (type == BLKIO_STAT_DEQUEUE)
  610. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  611. blkg->stats.dequeue, cb, dev);
  612. #endif
  613. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  614. sub_type++) {
  615. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  616. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  617. }
  618. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  619. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  620. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  621. cb->fill(cb, key_str, disk_total);
  622. return disk_total;
  623. }
  624. static int blkio_check_dev_num(dev_t dev)
  625. {
  626. int part = 0;
  627. struct gendisk *disk;
  628. disk = get_gendisk(dev, &part);
  629. if (!disk || part)
  630. return -ENODEV;
  631. return 0;
  632. }
  633. static int blkio_policy_parse_and_set(char *buf,
  634. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  635. {
  636. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  637. int ret;
  638. unsigned long major, minor, temp;
  639. int i = 0;
  640. dev_t dev;
  641. u64 bps, iops;
  642. memset(s, 0, sizeof(s));
  643. while ((p = strsep(&buf, " ")) != NULL) {
  644. if (!*p)
  645. continue;
  646. s[i++] = p;
  647. /* Prevent from inputing too many things */
  648. if (i == 3)
  649. break;
  650. }
  651. if (i != 2)
  652. return -EINVAL;
  653. p = strsep(&s[0], ":");
  654. if (p != NULL)
  655. major_s = p;
  656. else
  657. return -EINVAL;
  658. minor_s = s[0];
  659. if (!minor_s)
  660. return -EINVAL;
  661. ret = strict_strtoul(major_s, 10, &major);
  662. if (ret)
  663. return -EINVAL;
  664. ret = strict_strtoul(minor_s, 10, &minor);
  665. if (ret)
  666. return -EINVAL;
  667. dev = MKDEV(major, minor);
  668. ret = blkio_check_dev_num(dev);
  669. if (ret)
  670. return ret;
  671. newpn->dev = dev;
  672. if (s[1] == NULL)
  673. return -EINVAL;
  674. switch (plid) {
  675. case BLKIO_POLICY_PROP:
  676. ret = strict_strtoul(s[1], 10, &temp);
  677. if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  678. temp > BLKIO_WEIGHT_MAX)
  679. return -EINVAL;
  680. newpn->plid = plid;
  681. newpn->fileid = fileid;
  682. newpn->val.weight = temp;
  683. break;
  684. case BLKIO_POLICY_THROTL:
  685. switch(fileid) {
  686. case BLKIO_THROTL_read_bps_device:
  687. case BLKIO_THROTL_write_bps_device:
  688. ret = strict_strtoull(s[1], 10, &bps);
  689. if (ret)
  690. return -EINVAL;
  691. newpn->plid = plid;
  692. newpn->fileid = fileid;
  693. newpn->val.bps = bps;
  694. break;
  695. case BLKIO_THROTL_read_iops_device:
  696. case BLKIO_THROTL_write_iops_device:
  697. ret = strict_strtoull(s[1], 10, &iops);
  698. if (ret)
  699. return -EINVAL;
  700. if (iops > THROTL_IOPS_MAX)
  701. return -EINVAL;
  702. newpn->plid = plid;
  703. newpn->fileid = fileid;
  704. newpn->val.iops = (unsigned int)iops;
  705. break;
  706. }
  707. break;
  708. default:
  709. BUG();
  710. }
  711. return 0;
  712. }
  713. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  714. dev_t dev)
  715. {
  716. struct blkio_policy_node *pn;
  717. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  718. BLKIO_PROP_weight_device);
  719. if (pn)
  720. return pn->val.weight;
  721. else
  722. return blkcg->weight;
  723. }
  724. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  725. uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
  726. {
  727. struct blkio_policy_node *pn;
  728. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  729. BLKIO_THROTL_read_bps_device);
  730. if (pn)
  731. return pn->val.bps;
  732. else
  733. return -1;
  734. }
  735. uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
  736. {
  737. struct blkio_policy_node *pn;
  738. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  739. BLKIO_THROTL_write_bps_device);
  740. if (pn)
  741. return pn->val.bps;
  742. else
  743. return -1;
  744. }
  745. unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
  746. {
  747. struct blkio_policy_node *pn;
  748. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  749. BLKIO_THROTL_read_iops_device);
  750. if (pn)
  751. return pn->val.iops;
  752. else
  753. return -1;
  754. }
  755. unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
  756. {
  757. struct blkio_policy_node *pn;
  758. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  759. BLKIO_THROTL_write_iops_device);
  760. if (pn)
  761. return pn->val.iops;
  762. else
  763. return -1;
  764. }
  765. /* Checks whether user asked for deleting a policy rule */
  766. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  767. {
  768. switch(pn->plid) {
  769. case BLKIO_POLICY_PROP:
  770. if (pn->val.weight == 0)
  771. return 1;
  772. break;
  773. case BLKIO_POLICY_THROTL:
  774. switch(pn->fileid) {
  775. case BLKIO_THROTL_read_bps_device:
  776. case BLKIO_THROTL_write_bps_device:
  777. if (pn->val.bps == 0)
  778. return 1;
  779. break;
  780. case BLKIO_THROTL_read_iops_device:
  781. case BLKIO_THROTL_write_iops_device:
  782. if (pn->val.iops == 0)
  783. return 1;
  784. }
  785. break;
  786. default:
  787. BUG();
  788. }
  789. return 0;
  790. }
  791. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  792. struct blkio_policy_node *newpn)
  793. {
  794. switch(oldpn->plid) {
  795. case BLKIO_POLICY_PROP:
  796. oldpn->val.weight = newpn->val.weight;
  797. break;
  798. case BLKIO_POLICY_THROTL:
  799. switch(newpn->fileid) {
  800. case BLKIO_THROTL_read_bps_device:
  801. case BLKIO_THROTL_write_bps_device:
  802. oldpn->val.bps = newpn->val.bps;
  803. break;
  804. case BLKIO_THROTL_read_iops_device:
  805. case BLKIO_THROTL_write_iops_device:
  806. oldpn->val.iops = newpn->val.iops;
  807. }
  808. break;
  809. default:
  810. BUG();
  811. }
  812. }
  813. /*
  814. * Some rules/values in blkg have changed. Propagate those to respective
  815. * policies.
  816. */
  817. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  818. struct blkio_group *blkg, struct blkio_policy_node *pn)
  819. {
  820. unsigned int weight, iops;
  821. u64 bps;
  822. switch(pn->plid) {
  823. case BLKIO_POLICY_PROP:
  824. weight = pn->val.weight ? pn->val.weight :
  825. blkcg->weight;
  826. blkio_update_group_weight(blkg, weight);
  827. break;
  828. case BLKIO_POLICY_THROTL:
  829. switch(pn->fileid) {
  830. case BLKIO_THROTL_read_bps_device:
  831. case BLKIO_THROTL_write_bps_device:
  832. bps = pn->val.bps ? pn->val.bps : (-1);
  833. blkio_update_group_bps(blkg, bps, pn->fileid);
  834. break;
  835. case BLKIO_THROTL_read_iops_device:
  836. case BLKIO_THROTL_write_iops_device:
  837. iops = pn->val.iops ? pn->val.iops : (-1);
  838. blkio_update_group_iops(blkg, iops, pn->fileid);
  839. break;
  840. }
  841. break;
  842. default:
  843. BUG();
  844. }
  845. }
  846. /*
  847. * A policy node rule has been updated. Propagate this update to all the
  848. * block groups which might be affected by this update.
  849. */
  850. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  851. struct blkio_policy_node *pn)
  852. {
  853. struct blkio_group *blkg;
  854. struct hlist_node *n;
  855. spin_lock(&blkio_list_lock);
  856. spin_lock_irq(&blkcg->lock);
  857. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  858. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  859. continue;
  860. blkio_update_blkg_policy(blkcg, blkg, pn);
  861. }
  862. spin_unlock_irq(&blkcg->lock);
  863. spin_unlock(&blkio_list_lock);
  864. }
  865. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  866. const char *buffer)
  867. {
  868. int ret = 0;
  869. char *buf;
  870. struct blkio_policy_node *newpn, *pn;
  871. struct blkio_cgroup *blkcg;
  872. int keep_newpn = 0;
  873. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  874. int fileid = BLKIOFILE_ATTR(cft->private);
  875. buf = kstrdup(buffer, GFP_KERNEL);
  876. if (!buf)
  877. return -ENOMEM;
  878. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  879. if (!newpn) {
  880. ret = -ENOMEM;
  881. goto free_buf;
  882. }
  883. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  884. if (ret)
  885. goto free_newpn;
  886. blkcg = cgroup_to_blkio_cgroup(cgrp);
  887. spin_lock_irq(&blkcg->lock);
  888. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  889. if (!pn) {
  890. if (!blkio_delete_rule_command(newpn)) {
  891. blkio_policy_insert_node(blkcg, newpn);
  892. keep_newpn = 1;
  893. }
  894. spin_unlock_irq(&blkcg->lock);
  895. goto update_io_group;
  896. }
  897. if (blkio_delete_rule_command(newpn)) {
  898. blkio_policy_delete_node(pn);
  899. spin_unlock_irq(&blkcg->lock);
  900. goto update_io_group;
  901. }
  902. spin_unlock_irq(&blkcg->lock);
  903. blkio_update_policy_rule(pn, newpn);
  904. update_io_group:
  905. blkio_update_policy_node_blkg(blkcg, newpn);
  906. free_newpn:
  907. if (!keep_newpn)
  908. kfree(newpn);
  909. free_buf:
  910. kfree(buf);
  911. return ret;
  912. }
  913. static void
  914. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  915. {
  916. switch(pn->plid) {
  917. case BLKIO_POLICY_PROP:
  918. if (pn->fileid == BLKIO_PROP_weight_device)
  919. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  920. MINOR(pn->dev), pn->val.weight);
  921. break;
  922. case BLKIO_POLICY_THROTL:
  923. switch(pn->fileid) {
  924. case BLKIO_THROTL_read_bps_device:
  925. case BLKIO_THROTL_write_bps_device:
  926. seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
  927. MINOR(pn->dev), pn->val.bps);
  928. break;
  929. case BLKIO_THROTL_read_iops_device:
  930. case BLKIO_THROTL_write_iops_device:
  931. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  932. MINOR(pn->dev), pn->val.iops);
  933. break;
  934. }
  935. break;
  936. default:
  937. BUG();
  938. }
  939. }
  940. /* cgroup files which read their data from policy nodes end up here */
  941. static void blkio_read_policy_node_files(struct cftype *cft,
  942. struct blkio_cgroup *blkcg, struct seq_file *m)
  943. {
  944. struct blkio_policy_node *pn;
  945. if (!list_empty(&blkcg->policy_list)) {
  946. spin_lock_irq(&blkcg->lock);
  947. list_for_each_entry(pn, &blkcg->policy_list, node) {
  948. if (!pn_matches_cftype(cft, pn))
  949. continue;
  950. blkio_print_policy_node(m, pn);
  951. }
  952. spin_unlock_irq(&blkcg->lock);
  953. }
  954. }
  955. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  956. struct seq_file *m)
  957. {
  958. struct blkio_cgroup *blkcg;
  959. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  960. int name = BLKIOFILE_ATTR(cft->private);
  961. blkcg = cgroup_to_blkio_cgroup(cgrp);
  962. switch(plid) {
  963. case BLKIO_POLICY_PROP:
  964. switch(name) {
  965. case BLKIO_PROP_weight_device:
  966. blkio_read_policy_node_files(cft, blkcg, m);
  967. return 0;
  968. default:
  969. BUG();
  970. }
  971. break;
  972. case BLKIO_POLICY_THROTL:
  973. switch(name){
  974. case BLKIO_THROTL_read_bps_device:
  975. case BLKIO_THROTL_write_bps_device:
  976. case BLKIO_THROTL_read_iops_device:
  977. case BLKIO_THROTL_write_iops_device:
  978. blkio_read_policy_node_files(cft, blkcg, m);
  979. return 0;
  980. default:
  981. BUG();
  982. }
  983. break;
  984. default:
  985. BUG();
  986. }
  987. return 0;
  988. }
  989. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  990. struct cftype *cft, struct cgroup_map_cb *cb,
  991. enum stat_type type, bool show_total, bool pcpu)
  992. {
  993. struct blkio_group *blkg;
  994. struct hlist_node *n;
  995. uint64_t cgroup_total = 0;
  996. rcu_read_lock();
  997. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  998. if (blkg->dev) {
  999. if (!cftype_blkg_same_policy(cft, blkg))
  1000. continue;
  1001. if (pcpu)
  1002. cgroup_total += blkio_get_stat_cpu(blkg, cb,
  1003. blkg->dev, type);
  1004. else {
  1005. spin_lock_irq(&blkg->stats_lock);
  1006. cgroup_total += blkio_get_stat(blkg, cb,
  1007. blkg->dev, type);
  1008. spin_unlock_irq(&blkg->stats_lock);
  1009. }
  1010. }
  1011. }
  1012. if (show_total)
  1013. cb->fill(cb, "Total", cgroup_total);
  1014. rcu_read_unlock();
  1015. return 0;
  1016. }
  1017. /* All map kind of cgroup file get serviced by this function */
  1018. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1019. struct cgroup_map_cb *cb)
  1020. {
  1021. struct blkio_cgroup *blkcg;
  1022. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1023. int name = BLKIOFILE_ATTR(cft->private);
  1024. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1025. switch(plid) {
  1026. case BLKIO_POLICY_PROP:
  1027. switch(name) {
  1028. case BLKIO_PROP_time:
  1029. return blkio_read_blkg_stats(blkcg, cft, cb,
  1030. BLKIO_STAT_TIME, 0, 0);
  1031. case BLKIO_PROP_sectors:
  1032. return blkio_read_blkg_stats(blkcg, cft, cb,
  1033. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1034. case BLKIO_PROP_io_service_bytes:
  1035. return blkio_read_blkg_stats(blkcg, cft, cb,
  1036. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1037. case BLKIO_PROP_io_serviced:
  1038. return blkio_read_blkg_stats(blkcg, cft, cb,
  1039. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1040. case BLKIO_PROP_io_service_time:
  1041. return blkio_read_blkg_stats(blkcg, cft, cb,
  1042. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1043. case BLKIO_PROP_io_wait_time:
  1044. return blkio_read_blkg_stats(blkcg, cft, cb,
  1045. BLKIO_STAT_WAIT_TIME, 1, 0);
  1046. case BLKIO_PROP_io_merged:
  1047. return blkio_read_blkg_stats(blkcg, cft, cb,
  1048. BLKIO_STAT_MERGED, 1, 0);
  1049. case BLKIO_PROP_io_queued:
  1050. return blkio_read_blkg_stats(blkcg, cft, cb,
  1051. BLKIO_STAT_QUEUED, 1, 0);
  1052. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1053. case BLKIO_PROP_unaccounted_time:
  1054. return blkio_read_blkg_stats(blkcg, cft, cb,
  1055. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1056. case BLKIO_PROP_dequeue:
  1057. return blkio_read_blkg_stats(blkcg, cft, cb,
  1058. BLKIO_STAT_DEQUEUE, 0, 0);
  1059. case BLKIO_PROP_avg_queue_size:
  1060. return blkio_read_blkg_stats(blkcg, cft, cb,
  1061. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1062. case BLKIO_PROP_group_wait_time:
  1063. return blkio_read_blkg_stats(blkcg, cft, cb,
  1064. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1065. case BLKIO_PROP_idle_time:
  1066. return blkio_read_blkg_stats(blkcg, cft, cb,
  1067. BLKIO_STAT_IDLE_TIME, 0, 0);
  1068. case BLKIO_PROP_empty_time:
  1069. return blkio_read_blkg_stats(blkcg, cft, cb,
  1070. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1071. #endif
  1072. default:
  1073. BUG();
  1074. }
  1075. break;
  1076. case BLKIO_POLICY_THROTL:
  1077. switch(name){
  1078. case BLKIO_THROTL_io_service_bytes:
  1079. return blkio_read_blkg_stats(blkcg, cft, cb,
  1080. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1081. case BLKIO_THROTL_io_serviced:
  1082. return blkio_read_blkg_stats(blkcg, cft, cb,
  1083. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1084. default:
  1085. BUG();
  1086. }
  1087. break;
  1088. default:
  1089. BUG();
  1090. }
  1091. return 0;
  1092. }
  1093. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  1094. {
  1095. struct blkio_group *blkg;
  1096. struct hlist_node *n;
  1097. struct blkio_policy_node *pn;
  1098. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1099. return -EINVAL;
  1100. spin_lock(&blkio_list_lock);
  1101. spin_lock_irq(&blkcg->lock);
  1102. blkcg->weight = (unsigned int)val;
  1103. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1104. pn = blkio_policy_search_node(blkcg, blkg->dev,
  1105. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  1106. if (pn)
  1107. continue;
  1108. blkio_update_group_weight(blkg, blkcg->weight);
  1109. }
  1110. spin_unlock_irq(&blkcg->lock);
  1111. spin_unlock(&blkio_list_lock);
  1112. return 0;
  1113. }
  1114. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1115. struct blkio_cgroup *blkcg;
  1116. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1117. int name = BLKIOFILE_ATTR(cft->private);
  1118. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1119. switch(plid) {
  1120. case BLKIO_POLICY_PROP:
  1121. switch(name) {
  1122. case BLKIO_PROP_weight:
  1123. return (u64)blkcg->weight;
  1124. }
  1125. break;
  1126. default:
  1127. BUG();
  1128. }
  1129. return 0;
  1130. }
  1131. static int
  1132. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1133. {
  1134. struct blkio_cgroup *blkcg;
  1135. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1136. int name = BLKIOFILE_ATTR(cft->private);
  1137. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1138. switch(plid) {
  1139. case BLKIO_POLICY_PROP:
  1140. switch(name) {
  1141. case BLKIO_PROP_weight:
  1142. return blkio_weight_write(blkcg, val);
  1143. }
  1144. break;
  1145. default:
  1146. BUG();
  1147. }
  1148. return 0;
  1149. }
  1150. struct cftype blkio_files[] = {
  1151. {
  1152. .name = "weight_device",
  1153. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1154. BLKIO_PROP_weight_device),
  1155. .read_seq_string = blkiocg_file_read,
  1156. .write_string = blkiocg_file_write,
  1157. .max_write_len = 256,
  1158. },
  1159. {
  1160. .name = "weight",
  1161. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1162. BLKIO_PROP_weight),
  1163. .read_u64 = blkiocg_file_read_u64,
  1164. .write_u64 = blkiocg_file_write_u64,
  1165. },
  1166. {
  1167. .name = "time",
  1168. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1169. BLKIO_PROP_time),
  1170. .read_map = blkiocg_file_read_map,
  1171. },
  1172. {
  1173. .name = "sectors",
  1174. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1175. BLKIO_PROP_sectors),
  1176. .read_map = blkiocg_file_read_map,
  1177. },
  1178. {
  1179. .name = "io_service_bytes",
  1180. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1181. BLKIO_PROP_io_service_bytes),
  1182. .read_map = blkiocg_file_read_map,
  1183. },
  1184. {
  1185. .name = "io_serviced",
  1186. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1187. BLKIO_PROP_io_serviced),
  1188. .read_map = blkiocg_file_read_map,
  1189. },
  1190. {
  1191. .name = "io_service_time",
  1192. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1193. BLKIO_PROP_io_service_time),
  1194. .read_map = blkiocg_file_read_map,
  1195. },
  1196. {
  1197. .name = "io_wait_time",
  1198. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1199. BLKIO_PROP_io_wait_time),
  1200. .read_map = blkiocg_file_read_map,
  1201. },
  1202. {
  1203. .name = "io_merged",
  1204. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1205. BLKIO_PROP_io_merged),
  1206. .read_map = blkiocg_file_read_map,
  1207. },
  1208. {
  1209. .name = "io_queued",
  1210. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1211. BLKIO_PROP_io_queued),
  1212. .read_map = blkiocg_file_read_map,
  1213. },
  1214. {
  1215. .name = "reset_stats",
  1216. .write_u64 = blkiocg_reset_stats,
  1217. },
  1218. #ifdef CONFIG_BLK_DEV_THROTTLING
  1219. {
  1220. .name = "throttle.read_bps_device",
  1221. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1222. BLKIO_THROTL_read_bps_device),
  1223. .read_seq_string = blkiocg_file_read,
  1224. .write_string = blkiocg_file_write,
  1225. .max_write_len = 256,
  1226. },
  1227. {
  1228. .name = "throttle.write_bps_device",
  1229. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1230. BLKIO_THROTL_write_bps_device),
  1231. .read_seq_string = blkiocg_file_read,
  1232. .write_string = blkiocg_file_write,
  1233. .max_write_len = 256,
  1234. },
  1235. {
  1236. .name = "throttle.read_iops_device",
  1237. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1238. BLKIO_THROTL_read_iops_device),
  1239. .read_seq_string = blkiocg_file_read,
  1240. .write_string = blkiocg_file_write,
  1241. .max_write_len = 256,
  1242. },
  1243. {
  1244. .name = "throttle.write_iops_device",
  1245. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1246. BLKIO_THROTL_write_iops_device),
  1247. .read_seq_string = blkiocg_file_read,
  1248. .write_string = blkiocg_file_write,
  1249. .max_write_len = 256,
  1250. },
  1251. {
  1252. .name = "throttle.io_service_bytes",
  1253. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1254. BLKIO_THROTL_io_service_bytes),
  1255. .read_map = blkiocg_file_read_map,
  1256. },
  1257. {
  1258. .name = "throttle.io_serviced",
  1259. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1260. BLKIO_THROTL_io_serviced),
  1261. .read_map = blkiocg_file_read_map,
  1262. },
  1263. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1264. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1265. {
  1266. .name = "avg_queue_size",
  1267. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1268. BLKIO_PROP_avg_queue_size),
  1269. .read_map = blkiocg_file_read_map,
  1270. },
  1271. {
  1272. .name = "group_wait_time",
  1273. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1274. BLKIO_PROP_group_wait_time),
  1275. .read_map = blkiocg_file_read_map,
  1276. },
  1277. {
  1278. .name = "idle_time",
  1279. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1280. BLKIO_PROP_idle_time),
  1281. .read_map = blkiocg_file_read_map,
  1282. },
  1283. {
  1284. .name = "empty_time",
  1285. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1286. BLKIO_PROP_empty_time),
  1287. .read_map = blkiocg_file_read_map,
  1288. },
  1289. {
  1290. .name = "dequeue",
  1291. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1292. BLKIO_PROP_dequeue),
  1293. .read_map = blkiocg_file_read_map,
  1294. },
  1295. {
  1296. .name = "unaccounted_time",
  1297. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1298. BLKIO_PROP_unaccounted_time),
  1299. .read_map = blkiocg_file_read_map,
  1300. },
  1301. #endif
  1302. };
  1303. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1304. {
  1305. return cgroup_add_files(cgroup, subsys, blkio_files,
  1306. ARRAY_SIZE(blkio_files));
  1307. }
  1308. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1309. {
  1310. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1311. unsigned long flags;
  1312. struct blkio_group *blkg;
  1313. void *key;
  1314. struct blkio_policy_type *blkiop;
  1315. struct blkio_policy_node *pn, *pntmp;
  1316. rcu_read_lock();
  1317. do {
  1318. spin_lock_irqsave(&blkcg->lock, flags);
  1319. if (hlist_empty(&blkcg->blkg_list)) {
  1320. spin_unlock_irqrestore(&blkcg->lock, flags);
  1321. break;
  1322. }
  1323. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1324. blkcg_node);
  1325. key = rcu_dereference(blkg->key);
  1326. __blkiocg_del_blkio_group(blkg);
  1327. spin_unlock_irqrestore(&blkcg->lock, flags);
  1328. /*
  1329. * This blkio_group is being unlinked as associated cgroup is
  1330. * going away. Let all the IO controlling policies know about
  1331. * this event.
  1332. */
  1333. spin_lock(&blkio_list_lock);
  1334. list_for_each_entry(blkiop, &blkio_list, list) {
  1335. if (blkiop->plid != blkg->plid)
  1336. continue;
  1337. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1338. }
  1339. spin_unlock(&blkio_list_lock);
  1340. } while (1);
  1341. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1342. blkio_policy_delete_node(pn);
  1343. kfree(pn);
  1344. }
  1345. free_css_id(&blkio_subsys, &blkcg->css);
  1346. rcu_read_unlock();
  1347. if (blkcg != &blkio_root_cgroup)
  1348. kfree(blkcg);
  1349. }
  1350. static struct cgroup_subsys_state *
  1351. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1352. {
  1353. struct blkio_cgroup *blkcg;
  1354. struct cgroup *parent = cgroup->parent;
  1355. if (!parent) {
  1356. blkcg = &blkio_root_cgroup;
  1357. goto done;
  1358. }
  1359. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1360. if (!blkcg)
  1361. return ERR_PTR(-ENOMEM);
  1362. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1363. done:
  1364. spin_lock_init(&blkcg->lock);
  1365. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1366. INIT_LIST_HEAD(&blkcg->policy_list);
  1367. return &blkcg->css;
  1368. }
  1369. /*
  1370. * We cannot support shared io contexts, as we have no mean to support
  1371. * two tasks with the same ioc in two different groups without major rework
  1372. * of the main cic data structures. For now we allow a task to change
  1373. * its cgroup only if it's the only owner of its ioc.
  1374. */
  1375. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  1376. struct cgroup *cgroup, struct task_struct *tsk,
  1377. bool threadgroup)
  1378. {
  1379. struct io_context *ioc;
  1380. int ret = 0;
  1381. /* task_lock() is needed to avoid races with exit_io_context() */
  1382. task_lock(tsk);
  1383. ioc = tsk->io_context;
  1384. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1385. ret = -EINVAL;
  1386. task_unlock(tsk);
  1387. return ret;
  1388. }
  1389. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  1390. struct cgroup *prev, struct task_struct *tsk,
  1391. bool threadgroup)
  1392. {
  1393. struct io_context *ioc;
  1394. task_lock(tsk);
  1395. ioc = tsk->io_context;
  1396. if (ioc)
  1397. ioc->cgroup_changed = 1;
  1398. task_unlock(tsk);
  1399. }
  1400. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1401. {
  1402. spin_lock(&blkio_list_lock);
  1403. list_add_tail(&blkiop->list, &blkio_list);
  1404. spin_unlock(&blkio_list_lock);
  1405. }
  1406. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1407. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1408. {
  1409. spin_lock(&blkio_list_lock);
  1410. list_del_init(&blkiop->list);
  1411. spin_unlock(&blkio_list_lock);
  1412. }
  1413. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  1414. static int __init init_cgroup_blkio(void)
  1415. {
  1416. return cgroup_load_subsys(&blkio_subsys);
  1417. }
  1418. static void __exit exit_cgroup_blkio(void)
  1419. {
  1420. cgroup_unload_subsys(&blkio_subsys);
  1421. }
  1422. module_init(init_cgroup_blkio);
  1423. module_exit(exit_cgroup_blkio);
  1424. MODULE_LICENSE("GPL");