blk-cgroup.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include "blk-cgroup.h"
  21. #include <linux/genhd.h>
  22. #define MAX_KEY_LEN 100
  23. static DEFINE_SPINLOCK(blkio_list_lock);
  24. static LIST_HEAD(blkio_list);
  25. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  26. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  27. static struct cgroup_subsys_state *blkiocg_create(struct cgroup *);
  28. static int blkiocg_can_attach(struct cgroup *, struct cgroup_taskset *);
  29. static void blkiocg_attach(struct cgroup *, struct cgroup_taskset *);
  30. static void blkiocg_destroy(struct cgroup *);
  31. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  32. /* for encoding cft->private value on file */
  33. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  34. /* What policy owns the file, proportional or throttle */
  35. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  36. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  37. struct cgroup_subsys blkio_subsys = {
  38. .name = "blkio",
  39. .create = blkiocg_create,
  40. .can_attach = blkiocg_can_attach,
  41. .attach = blkiocg_attach,
  42. .destroy = blkiocg_destroy,
  43. .populate = blkiocg_populate,
  44. #ifdef CONFIG_BLK_CGROUP
  45. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  46. .subsys_id = blkio_subsys_id,
  47. #endif
  48. .use_id = 1,
  49. .module = THIS_MODULE,
  50. };
  51. EXPORT_SYMBOL_GPL(blkio_subsys);
  52. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  53. struct blkio_policy_node *pn)
  54. {
  55. list_add(&pn->node, &blkcg->policy_list);
  56. }
  57. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  58. struct blkio_group *blkg)
  59. {
  60. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  61. if (blkg->plid == plid)
  62. return 1;
  63. return 0;
  64. }
  65. /* Determines if policy node matches cgroup file being accessed */
  66. static inline bool pn_matches_cftype(struct cftype *cft,
  67. struct blkio_policy_node *pn)
  68. {
  69. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  70. int fileid = BLKIOFILE_ATTR(cft->private);
  71. return (plid == pn->plid && fileid == pn->fileid);
  72. }
  73. /* Must be called with blkcg->lock held */
  74. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  75. {
  76. list_del(&pn->node);
  77. }
  78. /* Must be called with blkcg->lock held */
  79. static struct blkio_policy_node *
  80. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  81. enum blkio_policy_id plid, int fileid)
  82. {
  83. struct blkio_policy_node *pn;
  84. list_for_each_entry(pn, &blkcg->policy_list, node) {
  85. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  86. return pn;
  87. }
  88. return NULL;
  89. }
  90. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  91. {
  92. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  93. struct blkio_cgroup, css);
  94. }
  95. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  96. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  97. {
  98. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  99. struct blkio_cgroup, css);
  100. }
  101. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  102. static inline void
  103. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  104. {
  105. struct blkio_policy_type *blkiop;
  106. list_for_each_entry(blkiop, &blkio_list, list) {
  107. /* If this policy does not own the blkg, do not send updates */
  108. if (blkiop->plid != blkg->plid)
  109. continue;
  110. if (blkiop->ops.blkio_update_group_weight_fn)
  111. blkiop->ops.blkio_update_group_weight_fn(blkg->key,
  112. blkg, weight);
  113. }
  114. }
  115. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  116. int fileid)
  117. {
  118. struct blkio_policy_type *blkiop;
  119. list_for_each_entry(blkiop, &blkio_list, list) {
  120. /* If this policy does not own the blkg, do not send updates */
  121. if (blkiop->plid != blkg->plid)
  122. continue;
  123. if (fileid == BLKIO_THROTL_read_bps_device
  124. && blkiop->ops.blkio_update_group_read_bps_fn)
  125. blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
  126. blkg, bps);
  127. if (fileid == BLKIO_THROTL_write_bps_device
  128. && blkiop->ops.blkio_update_group_write_bps_fn)
  129. blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
  130. blkg, bps);
  131. }
  132. }
  133. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  134. unsigned int iops, int fileid)
  135. {
  136. struct blkio_policy_type *blkiop;
  137. list_for_each_entry(blkiop, &blkio_list, list) {
  138. /* If this policy does not own the blkg, do not send updates */
  139. if (blkiop->plid != blkg->plid)
  140. continue;
  141. if (fileid == BLKIO_THROTL_read_iops_device
  142. && blkiop->ops.blkio_update_group_read_iops_fn)
  143. blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
  144. blkg, iops);
  145. if (fileid == BLKIO_THROTL_write_iops_device
  146. && blkiop->ops.blkio_update_group_write_iops_fn)
  147. blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
  148. blkg,iops);
  149. }
  150. }
  151. /*
  152. * Add to the appropriate stat variable depending on the request type.
  153. * This should be called with the blkg->stats_lock held.
  154. */
  155. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  156. bool sync)
  157. {
  158. if (direction)
  159. stat[BLKIO_STAT_WRITE] += add;
  160. else
  161. stat[BLKIO_STAT_READ] += add;
  162. if (sync)
  163. stat[BLKIO_STAT_SYNC] += add;
  164. else
  165. stat[BLKIO_STAT_ASYNC] += add;
  166. }
  167. /*
  168. * Decrements the appropriate stat variable if non-zero depending on the
  169. * request type. Panics on value being zero.
  170. * This should be called with the blkg->stats_lock held.
  171. */
  172. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  173. {
  174. if (direction) {
  175. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  176. stat[BLKIO_STAT_WRITE]--;
  177. } else {
  178. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  179. stat[BLKIO_STAT_READ]--;
  180. }
  181. if (sync) {
  182. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  183. stat[BLKIO_STAT_SYNC]--;
  184. } else {
  185. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  186. stat[BLKIO_STAT_ASYNC]--;
  187. }
  188. }
  189. #ifdef CONFIG_DEBUG_BLK_CGROUP
  190. /* This should be called with the blkg->stats_lock held. */
  191. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  192. struct blkio_group *curr_blkg)
  193. {
  194. if (blkio_blkg_waiting(&blkg->stats))
  195. return;
  196. if (blkg == curr_blkg)
  197. return;
  198. blkg->stats.start_group_wait_time = sched_clock();
  199. blkio_mark_blkg_waiting(&blkg->stats);
  200. }
  201. /* This should be called with the blkg->stats_lock held. */
  202. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  203. {
  204. unsigned long long now;
  205. if (!blkio_blkg_waiting(stats))
  206. return;
  207. now = sched_clock();
  208. if (time_after64(now, stats->start_group_wait_time))
  209. stats->group_wait_time += now - stats->start_group_wait_time;
  210. blkio_clear_blkg_waiting(stats);
  211. }
  212. /* This should be called with the blkg->stats_lock held. */
  213. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  214. {
  215. unsigned long long now;
  216. if (!blkio_blkg_empty(stats))
  217. return;
  218. now = sched_clock();
  219. if (time_after64(now, stats->start_empty_time))
  220. stats->empty_time += now - stats->start_empty_time;
  221. blkio_clear_blkg_empty(stats);
  222. }
  223. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  224. {
  225. unsigned long flags;
  226. spin_lock_irqsave(&blkg->stats_lock, flags);
  227. BUG_ON(blkio_blkg_idling(&blkg->stats));
  228. blkg->stats.start_idle_time = sched_clock();
  229. blkio_mark_blkg_idling(&blkg->stats);
  230. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  231. }
  232. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  233. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  234. {
  235. unsigned long flags;
  236. unsigned long long now;
  237. struct blkio_group_stats *stats;
  238. spin_lock_irqsave(&blkg->stats_lock, flags);
  239. stats = &blkg->stats;
  240. if (blkio_blkg_idling(stats)) {
  241. now = sched_clock();
  242. if (time_after64(now, stats->start_idle_time))
  243. stats->idle_time += now - stats->start_idle_time;
  244. blkio_clear_blkg_idling(stats);
  245. }
  246. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  247. }
  248. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  249. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  250. {
  251. unsigned long flags;
  252. struct blkio_group_stats *stats;
  253. spin_lock_irqsave(&blkg->stats_lock, flags);
  254. stats = &blkg->stats;
  255. stats->avg_queue_size_sum +=
  256. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  257. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  258. stats->avg_queue_size_samples++;
  259. blkio_update_group_wait_time(stats);
  260. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  261. }
  262. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  263. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  264. {
  265. unsigned long flags;
  266. struct blkio_group_stats *stats;
  267. spin_lock_irqsave(&blkg->stats_lock, flags);
  268. stats = &blkg->stats;
  269. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  270. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  271. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  272. return;
  273. }
  274. /*
  275. * group is already marked empty. This can happen if cfqq got new
  276. * request in parent group and moved to this group while being added
  277. * to service tree. Just ignore the event and move on.
  278. */
  279. if(blkio_blkg_empty(stats)) {
  280. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  281. return;
  282. }
  283. stats->start_empty_time = sched_clock();
  284. blkio_mark_blkg_empty(stats);
  285. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  286. }
  287. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  288. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  289. unsigned long dequeue)
  290. {
  291. blkg->stats.dequeue += dequeue;
  292. }
  293. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  294. #else
  295. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  296. struct blkio_group *curr_blkg) {}
  297. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  298. #endif
  299. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  300. struct blkio_group *curr_blkg, bool direction,
  301. bool sync)
  302. {
  303. unsigned long flags;
  304. spin_lock_irqsave(&blkg->stats_lock, flags);
  305. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  306. sync);
  307. blkio_end_empty_time(&blkg->stats);
  308. blkio_set_start_group_wait_time(blkg, curr_blkg);
  309. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  310. }
  311. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  312. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  313. bool direction, bool sync)
  314. {
  315. unsigned long flags;
  316. spin_lock_irqsave(&blkg->stats_lock, flags);
  317. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  318. direction, sync);
  319. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  320. }
  321. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  322. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  323. unsigned long unaccounted_time)
  324. {
  325. unsigned long flags;
  326. spin_lock_irqsave(&blkg->stats_lock, flags);
  327. blkg->stats.time += time;
  328. #ifdef CONFIG_DEBUG_BLK_CGROUP
  329. blkg->stats.unaccounted_time += unaccounted_time;
  330. #endif
  331. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  332. }
  333. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  334. /*
  335. * should be called under rcu read lock or queue lock to make sure blkg pointer
  336. * is valid.
  337. */
  338. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  339. uint64_t bytes, bool direction, bool sync)
  340. {
  341. struct blkio_group_stats_cpu *stats_cpu;
  342. unsigned long flags;
  343. /*
  344. * Disabling interrupts to provide mutual exclusion between two
  345. * writes on same cpu. It probably is not needed for 64bit. Not
  346. * optimizing that case yet.
  347. */
  348. local_irq_save(flags);
  349. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  350. u64_stats_update_begin(&stats_cpu->syncp);
  351. stats_cpu->sectors += bytes >> 9;
  352. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  353. 1, direction, sync);
  354. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  355. bytes, direction, sync);
  356. u64_stats_update_end(&stats_cpu->syncp);
  357. local_irq_restore(flags);
  358. }
  359. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  360. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  361. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  362. {
  363. struct blkio_group_stats *stats;
  364. unsigned long flags;
  365. unsigned long long now = sched_clock();
  366. spin_lock_irqsave(&blkg->stats_lock, flags);
  367. stats = &blkg->stats;
  368. if (time_after64(now, io_start_time))
  369. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  370. now - io_start_time, direction, sync);
  371. if (time_after64(io_start_time, start_time))
  372. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  373. io_start_time - start_time, direction, sync);
  374. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  375. }
  376. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  377. /* Merged stats are per cpu. */
  378. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  379. bool sync)
  380. {
  381. struct blkio_group_stats_cpu *stats_cpu;
  382. unsigned long flags;
  383. /*
  384. * Disabling interrupts to provide mutual exclusion between two
  385. * writes on same cpu. It probably is not needed for 64bit. Not
  386. * optimizing that case yet.
  387. */
  388. local_irq_save(flags);
  389. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  390. u64_stats_update_begin(&stats_cpu->syncp);
  391. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  392. direction, sync);
  393. u64_stats_update_end(&stats_cpu->syncp);
  394. local_irq_restore(flags);
  395. }
  396. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  397. /*
  398. * This function allocates the per cpu stats for blkio_group. Should be called
  399. * from sleepable context as alloc_per_cpu() requires that.
  400. */
  401. int blkio_alloc_blkg_stats(struct blkio_group *blkg)
  402. {
  403. /* Allocate memory for per cpu stats */
  404. blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  405. if (!blkg->stats_cpu)
  406. return -ENOMEM;
  407. return 0;
  408. }
  409. EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
  410. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  411. struct blkio_group *blkg, void *key, dev_t dev,
  412. enum blkio_policy_id plid)
  413. {
  414. unsigned long flags;
  415. spin_lock_irqsave(&blkcg->lock, flags);
  416. spin_lock_init(&blkg->stats_lock);
  417. rcu_assign_pointer(blkg->key, key);
  418. blkg->blkcg_id = css_id(&blkcg->css);
  419. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  420. blkg->plid = plid;
  421. spin_unlock_irqrestore(&blkcg->lock, flags);
  422. /* Need to take css reference ? */
  423. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  424. blkg->dev = dev;
  425. }
  426. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  427. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  428. {
  429. hlist_del_init_rcu(&blkg->blkcg_node);
  430. blkg->blkcg_id = 0;
  431. }
  432. /*
  433. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  434. * indicating that blk_group was unhashed by the time we got to it.
  435. */
  436. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  437. {
  438. struct blkio_cgroup *blkcg;
  439. unsigned long flags;
  440. struct cgroup_subsys_state *css;
  441. int ret = 1;
  442. rcu_read_lock();
  443. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  444. if (css) {
  445. blkcg = container_of(css, struct blkio_cgroup, css);
  446. spin_lock_irqsave(&blkcg->lock, flags);
  447. if (!hlist_unhashed(&blkg->blkcg_node)) {
  448. __blkiocg_del_blkio_group(blkg);
  449. ret = 0;
  450. }
  451. spin_unlock_irqrestore(&blkcg->lock, flags);
  452. }
  453. rcu_read_unlock();
  454. return ret;
  455. }
  456. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  457. /* called under rcu_read_lock(). */
  458. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  459. {
  460. struct blkio_group *blkg;
  461. struct hlist_node *n;
  462. void *__key;
  463. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  464. __key = blkg->key;
  465. if (__key == key)
  466. return blkg;
  467. }
  468. return NULL;
  469. }
  470. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  471. static void blkio_reset_stats_cpu(struct blkio_group *blkg)
  472. {
  473. struct blkio_group_stats_cpu *stats_cpu;
  474. int i, j, k;
  475. /*
  476. * Note: On 64 bit arch this should not be an issue. This has the
  477. * possibility of returning some inconsistent value on 32bit arch
  478. * as 64bit update on 32bit is non atomic. Taking care of this
  479. * corner case makes code very complicated, like sending IPIs to
  480. * cpus, taking care of stats of offline cpus etc.
  481. *
  482. * reset stats is anyway more of a debug feature and this sounds a
  483. * corner case. So I am not complicating the code yet until and
  484. * unless this becomes a real issue.
  485. */
  486. for_each_possible_cpu(i) {
  487. stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
  488. stats_cpu->sectors = 0;
  489. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  490. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  491. stats_cpu->stat_arr_cpu[j][k] = 0;
  492. }
  493. }
  494. static int
  495. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  496. {
  497. struct blkio_cgroup *blkcg;
  498. struct blkio_group *blkg;
  499. struct blkio_group_stats *stats;
  500. struct hlist_node *n;
  501. uint64_t queued[BLKIO_STAT_TOTAL];
  502. int i;
  503. #ifdef CONFIG_DEBUG_BLK_CGROUP
  504. bool idling, waiting, empty;
  505. unsigned long long now = sched_clock();
  506. #endif
  507. blkcg = cgroup_to_blkio_cgroup(cgroup);
  508. spin_lock_irq(&blkcg->lock);
  509. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  510. spin_lock(&blkg->stats_lock);
  511. stats = &blkg->stats;
  512. #ifdef CONFIG_DEBUG_BLK_CGROUP
  513. idling = blkio_blkg_idling(stats);
  514. waiting = blkio_blkg_waiting(stats);
  515. empty = blkio_blkg_empty(stats);
  516. #endif
  517. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  518. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  519. memset(stats, 0, sizeof(struct blkio_group_stats));
  520. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  521. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  522. #ifdef CONFIG_DEBUG_BLK_CGROUP
  523. if (idling) {
  524. blkio_mark_blkg_idling(stats);
  525. stats->start_idle_time = now;
  526. }
  527. if (waiting) {
  528. blkio_mark_blkg_waiting(stats);
  529. stats->start_group_wait_time = now;
  530. }
  531. if (empty) {
  532. blkio_mark_blkg_empty(stats);
  533. stats->start_empty_time = now;
  534. }
  535. #endif
  536. spin_unlock(&blkg->stats_lock);
  537. /* Reset Per cpu stats which don't take blkg->stats_lock */
  538. blkio_reset_stats_cpu(blkg);
  539. }
  540. spin_unlock_irq(&blkcg->lock);
  541. return 0;
  542. }
  543. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  544. int chars_left, bool diskname_only)
  545. {
  546. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  547. chars_left -= strlen(str);
  548. if (chars_left <= 0) {
  549. printk(KERN_WARNING
  550. "Possibly incorrect cgroup stat display format");
  551. return;
  552. }
  553. if (diskname_only)
  554. return;
  555. switch (type) {
  556. case BLKIO_STAT_READ:
  557. strlcat(str, " Read", chars_left);
  558. break;
  559. case BLKIO_STAT_WRITE:
  560. strlcat(str, " Write", chars_left);
  561. break;
  562. case BLKIO_STAT_SYNC:
  563. strlcat(str, " Sync", chars_left);
  564. break;
  565. case BLKIO_STAT_ASYNC:
  566. strlcat(str, " Async", chars_left);
  567. break;
  568. case BLKIO_STAT_TOTAL:
  569. strlcat(str, " Total", chars_left);
  570. break;
  571. default:
  572. strlcat(str, " Invalid", chars_left);
  573. }
  574. }
  575. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  576. struct cgroup_map_cb *cb, dev_t dev)
  577. {
  578. blkio_get_key_name(0, dev, str, chars_left, true);
  579. cb->fill(cb, str, val);
  580. return val;
  581. }
  582. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
  583. enum stat_type_cpu type, enum stat_sub_type sub_type)
  584. {
  585. int cpu;
  586. struct blkio_group_stats_cpu *stats_cpu;
  587. u64 val = 0, tval;
  588. for_each_possible_cpu(cpu) {
  589. unsigned int start;
  590. stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
  591. do {
  592. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  593. if (type == BLKIO_STAT_CPU_SECTORS)
  594. tval = stats_cpu->sectors;
  595. else
  596. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  597. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  598. val += tval;
  599. }
  600. return val;
  601. }
  602. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
  603. struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
  604. {
  605. uint64_t disk_total, val;
  606. char key_str[MAX_KEY_LEN];
  607. enum stat_sub_type sub_type;
  608. if (type == BLKIO_STAT_CPU_SECTORS) {
  609. val = blkio_read_stat_cpu(blkg, type, 0);
  610. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
  611. }
  612. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  613. sub_type++) {
  614. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  615. val = blkio_read_stat_cpu(blkg, type, sub_type);
  616. cb->fill(cb, key_str, val);
  617. }
  618. disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
  619. blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
  620. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  621. cb->fill(cb, key_str, disk_total);
  622. return disk_total;
  623. }
  624. /* This should be called with blkg->stats_lock held */
  625. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  626. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  627. {
  628. uint64_t disk_total;
  629. char key_str[MAX_KEY_LEN];
  630. enum stat_sub_type sub_type;
  631. if (type == BLKIO_STAT_TIME)
  632. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  633. blkg->stats.time, cb, dev);
  634. #ifdef CONFIG_DEBUG_BLK_CGROUP
  635. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  636. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  637. blkg->stats.unaccounted_time, cb, dev);
  638. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  639. uint64_t sum = blkg->stats.avg_queue_size_sum;
  640. uint64_t samples = blkg->stats.avg_queue_size_samples;
  641. if (samples)
  642. do_div(sum, samples);
  643. else
  644. sum = 0;
  645. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  646. }
  647. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  648. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  649. blkg->stats.group_wait_time, cb, dev);
  650. if (type == BLKIO_STAT_IDLE_TIME)
  651. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  652. blkg->stats.idle_time, cb, dev);
  653. if (type == BLKIO_STAT_EMPTY_TIME)
  654. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  655. blkg->stats.empty_time, cb, dev);
  656. if (type == BLKIO_STAT_DEQUEUE)
  657. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  658. blkg->stats.dequeue, cb, dev);
  659. #endif
  660. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  661. sub_type++) {
  662. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  663. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  664. }
  665. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  666. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  667. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  668. cb->fill(cb, key_str, disk_total);
  669. return disk_total;
  670. }
  671. static int blkio_policy_parse_and_set(char *buf,
  672. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  673. {
  674. struct gendisk *disk = NULL;
  675. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  676. unsigned long major, minor;
  677. int i = 0, ret = -EINVAL;
  678. int part;
  679. dev_t dev;
  680. u64 temp;
  681. memset(s, 0, sizeof(s));
  682. while ((p = strsep(&buf, " ")) != NULL) {
  683. if (!*p)
  684. continue;
  685. s[i++] = p;
  686. /* Prevent from inputing too many things */
  687. if (i == 3)
  688. break;
  689. }
  690. if (i != 2)
  691. goto out;
  692. p = strsep(&s[0], ":");
  693. if (p != NULL)
  694. major_s = p;
  695. else
  696. goto out;
  697. minor_s = s[0];
  698. if (!minor_s)
  699. goto out;
  700. if (strict_strtoul(major_s, 10, &major))
  701. goto out;
  702. if (strict_strtoul(minor_s, 10, &minor))
  703. goto out;
  704. dev = MKDEV(major, minor);
  705. if (strict_strtoull(s[1], 10, &temp))
  706. goto out;
  707. /* For rule removal, do not check for device presence. */
  708. if (temp) {
  709. disk = get_gendisk(dev, &part);
  710. if (!disk || part) {
  711. ret = -ENODEV;
  712. goto out;
  713. }
  714. }
  715. newpn->dev = dev;
  716. switch (plid) {
  717. case BLKIO_POLICY_PROP:
  718. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  719. temp > BLKIO_WEIGHT_MAX)
  720. goto out;
  721. newpn->plid = plid;
  722. newpn->fileid = fileid;
  723. newpn->val.weight = temp;
  724. break;
  725. case BLKIO_POLICY_THROTL:
  726. switch(fileid) {
  727. case BLKIO_THROTL_read_bps_device:
  728. case BLKIO_THROTL_write_bps_device:
  729. newpn->plid = plid;
  730. newpn->fileid = fileid;
  731. newpn->val.bps = temp;
  732. break;
  733. case BLKIO_THROTL_read_iops_device:
  734. case BLKIO_THROTL_write_iops_device:
  735. if (temp > THROTL_IOPS_MAX)
  736. goto out;
  737. newpn->plid = plid;
  738. newpn->fileid = fileid;
  739. newpn->val.iops = (unsigned int)temp;
  740. break;
  741. }
  742. break;
  743. default:
  744. BUG();
  745. }
  746. ret = 0;
  747. out:
  748. put_disk(disk);
  749. return ret;
  750. }
  751. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  752. dev_t dev)
  753. {
  754. struct blkio_policy_node *pn;
  755. unsigned long flags;
  756. unsigned int weight;
  757. spin_lock_irqsave(&blkcg->lock, flags);
  758. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  759. BLKIO_PROP_weight_device);
  760. if (pn)
  761. weight = pn->val.weight;
  762. else
  763. weight = blkcg->weight;
  764. spin_unlock_irqrestore(&blkcg->lock, flags);
  765. return weight;
  766. }
  767. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  768. uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
  769. {
  770. struct blkio_policy_node *pn;
  771. unsigned long flags;
  772. uint64_t bps = -1;
  773. spin_lock_irqsave(&blkcg->lock, flags);
  774. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  775. BLKIO_THROTL_read_bps_device);
  776. if (pn)
  777. bps = pn->val.bps;
  778. spin_unlock_irqrestore(&blkcg->lock, flags);
  779. return bps;
  780. }
  781. uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
  782. {
  783. struct blkio_policy_node *pn;
  784. unsigned long flags;
  785. uint64_t bps = -1;
  786. spin_lock_irqsave(&blkcg->lock, flags);
  787. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  788. BLKIO_THROTL_write_bps_device);
  789. if (pn)
  790. bps = pn->val.bps;
  791. spin_unlock_irqrestore(&blkcg->lock, flags);
  792. return bps;
  793. }
  794. unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
  795. {
  796. struct blkio_policy_node *pn;
  797. unsigned long flags;
  798. unsigned int iops = -1;
  799. spin_lock_irqsave(&blkcg->lock, flags);
  800. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  801. BLKIO_THROTL_read_iops_device);
  802. if (pn)
  803. iops = pn->val.iops;
  804. spin_unlock_irqrestore(&blkcg->lock, flags);
  805. return iops;
  806. }
  807. unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
  808. {
  809. struct blkio_policy_node *pn;
  810. unsigned long flags;
  811. unsigned int iops = -1;
  812. spin_lock_irqsave(&blkcg->lock, flags);
  813. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  814. BLKIO_THROTL_write_iops_device);
  815. if (pn)
  816. iops = pn->val.iops;
  817. spin_unlock_irqrestore(&blkcg->lock, flags);
  818. return iops;
  819. }
  820. /* Checks whether user asked for deleting a policy rule */
  821. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  822. {
  823. switch(pn->plid) {
  824. case BLKIO_POLICY_PROP:
  825. if (pn->val.weight == 0)
  826. return 1;
  827. break;
  828. case BLKIO_POLICY_THROTL:
  829. switch(pn->fileid) {
  830. case BLKIO_THROTL_read_bps_device:
  831. case BLKIO_THROTL_write_bps_device:
  832. if (pn->val.bps == 0)
  833. return 1;
  834. break;
  835. case BLKIO_THROTL_read_iops_device:
  836. case BLKIO_THROTL_write_iops_device:
  837. if (pn->val.iops == 0)
  838. return 1;
  839. }
  840. break;
  841. default:
  842. BUG();
  843. }
  844. return 0;
  845. }
  846. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  847. struct blkio_policy_node *newpn)
  848. {
  849. switch(oldpn->plid) {
  850. case BLKIO_POLICY_PROP:
  851. oldpn->val.weight = newpn->val.weight;
  852. break;
  853. case BLKIO_POLICY_THROTL:
  854. switch(newpn->fileid) {
  855. case BLKIO_THROTL_read_bps_device:
  856. case BLKIO_THROTL_write_bps_device:
  857. oldpn->val.bps = newpn->val.bps;
  858. break;
  859. case BLKIO_THROTL_read_iops_device:
  860. case BLKIO_THROTL_write_iops_device:
  861. oldpn->val.iops = newpn->val.iops;
  862. }
  863. break;
  864. default:
  865. BUG();
  866. }
  867. }
  868. /*
  869. * Some rules/values in blkg have changed. Propagate those to respective
  870. * policies.
  871. */
  872. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  873. struct blkio_group *blkg, struct blkio_policy_node *pn)
  874. {
  875. unsigned int weight, iops;
  876. u64 bps;
  877. switch(pn->plid) {
  878. case BLKIO_POLICY_PROP:
  879. weight = pn->val.weight ? pn->val.weight :
  880. blkcg->weight;
  881. blkio_update_group_weight(blkg, weight);
  882. break;
  883. case BLKIO_POLICY_THROTL:
  884. switch(pn->fileid) {
  885. case BLKIO_THROTL_read_bps_device:
  886. case BLKIO_THROTL_write_bps_device:
  887. bps = pn->val.bps ? pn->val.bps : (-1);
  888. blkio_update_group_bps(blkg, bps, pn->fileid);
  889. break;
  890. case BLKIO_THROTL_read_iops_device:
  891. case BLKIO_THROTL_write_iops_device:
  892. iops = pn->val.iops ? pn->val.iops : (-1);
  893. blkio_update_group_iops(blkg, iops, pn->fileid);
  894. break;
  895. }
  896. break;
  897. default:
  898. BUG();
  899. }
  900. }
  901. /*
  902. * A policy node rule has been updated. Propagate this update to all the
  903. * block groups which might be affected by this update.
  904. */
  905. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  906. struct blkio_policy_node *pn)
  907. {
  908. struct blkio_group *blkg;
  909. struct hlist_node *n;
  910. spin_lock(&blkio_list_lock);
  911. spin_lock_irq(&blkcg->lock);
  912. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  913. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  914. continue;
  915. blkio_update_blkg_policy(blkcg, blkg, pn);
  916. }
  917. spin_unlock_irq(&blkcg->lock);
  918. spin_unlock(&blkio_list_lock);
  919. }
  920. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  921. const char *buffer)
  922. {
  923. int ret = 0;
  924. char *buf;
  925. struct blkio_policy_node *newpn, *pn;
  926. struct blkio_cgroup *blkcg;
  927. int keep_newpn = 0;
  928. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  929. int fileid = BLKIOFILE_ATTR(cft->private);
  930. buf = kstrdup(buffer, GFP_KERNEL);
  931. if (!buf)
  932. return -ENOMEM;
  933. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  934. if (!newpn) {
  935. ret = -ENOMEM;
  936. goto free_buf;
  937. }
  938. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  939. if (ret)
  940. goto free_newpn;
  941. blkcg = cgroup_to_blkio_cgroup(cgrp);
  942. spin_lock_irq(&blkcg->lock);
  943. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  944. if (!pn) {
  945. if (!blkio_delete_rule_command(newpn)) {
  946. blkio_policy_insert_node(blkcg, newpn);
  947. keep_newpn = 1;
  948. }
  949. spin_unlock_irq(&blkcg->lock);
  950. goto update_io_group;
  951. }
  952. if (blkio_delete_rule_command(newpn)) {
  953. blkio_policy_delete_node(pn);
  954. kfree(pn);
  955. spin_unlock_irq(&blkcg->lock);
  956. goto update_io_group;
  957. }
  958. spin_unlock_irq(&blkcg->lock);
  959. blkio_update_policy_rule(pn, newpn);
  960. update_io_group:
  961. blkio_update_policy_node_blkg(blkcg, newpn);
  962. free_newpn:
  963. if (!keep_newpn)
  964. kfree(newpn);
  965. free_buf:
  966. kfree(buf);
  967. return ret;
  968. }
  969. static void
  970. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  971. {
  972. switch(pn->plid) {
  973. case BLKIO_POLICY_PROP:
  974. if (pn->fileid == BLKIO_PROP_weight_device)
  975. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  976. MINOR(pn->dev), pn->val.weight);
  977. break;
  978. case BLKIO_POLICY_THROTL:
  979. switch(pn->fileid) {
  980. case BLKIO_THROTL_read_bps_device:
  981. case BLKIO_THROTL_write_bps_device:
  982. seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
  983. MINOR(pn->dev), pn->val.bps);
  984. break;
  985. case BLKIO_THROTL_read_iops_device:
  986. case BLKIO_THROTL_write_iops_device:
  987. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  988. MINOR(pn->dev), pn->val.iops);
  989. break;
  990. }
  991. break;
  992. default:
  993. BUG();
  994. }
  995. }
  996. /* cgroup files which read their data from policy nodes end up here */
  997. static void blkio_read_policy_node_files(struct cftype *cft,
  998. struct blkio_cgroup *blkcg, struct seq_file *m)
  999. {
  1000. struct blkio_policy_node *pn;
  1001. if (!list_empty(&blkcg->policy_list)) {
  1002. spin_lock_irq(&blkcg->lock);
  1003. list_for_each_entry(pn, &blkcg->policy_list, node) {
  1004. if (!pn_matches_cftype(cft, pn))
  1005. continue;
  1006. blkio_print_policy_node(m, pn);
  1007. }
  1008. spin_unlock_irq(&blkcg->lock);
  1009. }
  1010. }
  1011. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  1012. struct seq_file *m)
  1013. {
  1014. struct blkio_cgroup *blkcg;
  1015. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1016. int name = BLKIOFILE_ATTR(cft->private);
  1017. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1018. switch(plid) {
  1019. case BLKIO_POLICY_PROP:
  1020. switch(name) {
  1021. case BLKIO_PROP_weight_device:
  1022. blkio_read_policy_node_files(cft, blkcg, m);
  1023. return 0;
  1024. default:
  1025. BUG();
  1026. }
  1027. break;
  1028. case BLKIO_POLICY_THROTL:
  1029. switch(name){
  1030. case BLKIO_THROTL_read_bps_device:
  1031. case BLKIO_THROTL_write_bps_device:
  1032. case BLKIO_THROTL_read_iops_device:
  1033. case BLKIO_THROTL_write_iops_device:
  1034. blkio_read_policy_node_files(cft, blkcg, m);
  1035. return 0;
  1036. default:
  1037. BUG();
  1038. }
  1039. break;
  1040. default:
  1041. BUG();
  1042. }
  1043. return 0;
  1044. }
  1045. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1046. struct cftype *cft, struct cgroup_map_cb *cb,
  1047. enum stat_type type, bool show_total, bool pcpu)
  1048. {
  1049. struct blkio_group *blkg;
  1050. struct hlist_node *n;
  1051. uint64_t cgroup_total = 0;
  1052. rcu_read_lock();
  1053. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1054. if (blkg->dev) {
  1055. if (!cftype_blkg_same_policy(cft, blkg))
  1056. continue;
  1057. if (pcpu)
  1058. cgroup_total += blkio_get_stat_cpu(blkg, cb,
  1059. blkg->dev, type);
  1060. else {
  1061. spin_lock_irq(&blkg->stats_lock);
  1062. cgroup_total += blkio_get_stat(blkg, cb,
  1063. blkg->dev, type);
  1064. spin_unlock_irq(&blkg->stats_lock);
  1065. }
  1066. }
  1067. }
  1068. if (show_total)
  1069. cb->fill(cb, "Total", cgroup_total);
  1070. rcu_read_unlock();
  1071. return 0;
  1072. }
  1073. /* All map kind of cgroup file get serviced by this function */
  1074. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1075. struct cgroup_map_cb *cb)
  1076. {
  1077. struct blkio_cgroup *blkcg;
  1078. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1079. int name = BLKIOFILE_ATTR(cft->private);
  1080. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1081. switch(plid) {
  1082. case BLKIO_POLICY_PROP:
  1083. switch(name) {
  1084. case BLKIO_PROP_time:
  1085. return blkio_read_blkg_stats(blkcg, cft, cb,
  1086. BLKIO_STAT_TIME, 0, 0);
  1087. case BLKIO_PROP_sectors:
  1088. return blkio_read_blkg_stats(blkcg, cft, cb,
  1089. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1090. case BLKIO_PROP_io_service_bytes:
  1091. return blkio_read_blkg_stats(blkcg, cft, cb,
  1092. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1093. case BLKIO_PROP_io_serviced:
  1094. return blkio_read_blkg_stats(blkcg, cft, cb,
  1095. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1096. case BLKIO_PROP_io_service_time:
  1097. return blkio_read_blkg_stats(blkcg, cft, cb,
  1098. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1099. case BLKIO_PROP_io_wait_time:
  1100. return blkio_read_blkg_stats(blkcg, cft, cb,
  1101. BLKIO_STAT_WAIT_TIME, 1, 0);
  1102. case BLKIO_PROP_io_merged:
  1103. return blkio_read_blkg_stats(blkcg, cft, cb,
  1104. BLKIO_STAT_CPU_MERGED, 1, 1);
  1105. case BLKIO_PROP_io_queued:
  1106. return blkio_read_blkg_stats(blkcg, cft, cb,
  1107. BLKIO_STAT_QUEUED, 1, 0);
  1108. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1109. case BLKIO_PROP_unaccounted_time:
  1110. return blkio_read_blkg_stats(blkcg, cft, cb,
  1111. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1112. case BLKIO_PROP_dequeue:
  1113. return blkio_read_blkg_stats(blkcg, cft, cb,
  1114. BLKIO_STAT_DEQUEUE, 0, 0);
  1115. case BLKIO_PROP_avg_queue_size:
  1116. return blkio_read_blkg_stats(blkcg, cft, cb,
  1117. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1118. case BLKIO_PROP_group_wait_time:
  1119. return blkio_read_blkg_stats(blkcg, cft, cb,
  1120. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1121. case BLKIO_PROP_idle_time:
  1122. return blkio_read_blkg_stats(blkcg, cft, cb,
  1123. BLKIO_STAT_IDLE_TIME, 0, 0);
  1124. case BLKIO_PROP_empty_time:
  1125. return blkio_read_blkg_stats(blkcg, cft, cb,
  1126. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1127. #endif
  1128. default:
  1129. BUG();
  1130. }
  1131. break;
  1132. case BLKIO_POLICY_THROTL:
  1133. switch(name){
  1134. case BLKIO_THROTL_io_service_bytes:
  1135. return blkio_read_blkg_stats(blkcg, cft, cb,
  1136. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1137. case BLKIO_THROTL_io_serviced:
  1138. return blkio_read_blkg_stats(blkcg, cft, cb,
  1139. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1140. default:
  1141. BUG();
  1142. }
  1143. break;
  1144. default:
  1145. BUG();
  1146. }
  1147. return 0;
  1148. }
  1149. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  1150. {
  1151. struct blkio_group *blkg;
  1152. struct hlist_node *n;
  1153. struct blkio_policy_node *pn;
  1154. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1155. return -EINVAL;
  1156. spin_lock(&blkio_list_lock);
  1157. spin_lock_irq(&blkcg->lock);
  1158. blkcg->weight = (unsigned int)val;
  1159. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1160. pn = blkio_policy_search_node(blkcg, blkg->dev,
  1161. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  1162. if (pn)
  1163. continue;
  1164. blkio_update_group_weight(blkg, blkcg->weight);
  1165. }
  1166. spin_unlock_irq(&blkcg->lock);
  1167. spin_unlock(&blkio_list_lock);
  1168. return 0;
  1169. }
  1170. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1171. struct blkio_cgroup *blkcg;
  1172. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1173. int name = BLKIOFILE_ATTR(cft->private);
  1174. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1175. switch(plid) {
  1176. case BLKIO_POLICY_PROP:
  1177. switch(name) {
  1178. case BLKIO_PROP_weight:
  1179. return (u64)blkcg->weight;
  1180. }
  1181. break;
  1182. default:
  1183. BUG();
  1184. }
  1185. return 0;
  1186. }
  1187. static int
  1188. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1189. {
  1190. struct blkio_cgroup *blkcg;
  1191. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1192. int name = BLKIOFILE_ATTR(cft->private);
  1193. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1194. switch(plid) {
  1195. case BLKIO_POLICY_PROP:
  1196. switch(name) {
  1197. case BLKIO_PROP_weight:
  1198. return blkio_weight_write(blkcg, val);
  1199. }
  1200. break;
  1201. default:
  1202. BUG();
  1203. }
  1204. return 0;
  1205. }
  1206. struct cftype blkio_files[] = {
  1207. {
  1208. .name = "weight_device",
  1209. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1210. BLKIO_PROP_weight_device),
  1211. .read_seq_string = blkiocg_file_read,
  1212. .write_string = blkiocg_file_write,
  1213. .max_write_len = 256,
  1214. },
  1215. {
  1216. .name = "weight",
  1217. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1218. BLKIO_PROP_weight),
  1219. .read_u64 = blkiocg_file_read_u64,
  1220. .write_u64 = blkiocg_file_write_u64,
  1221. },
  1222. {
  1223. .name = "time",
  1224. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1225. BLKIO_PROP_time),
  1226. .read_map = blkiocg_file_read_map,
  1227. },
  1228. {
  1229. .name = "sectors",
  1230. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1231. BLKIO_PROP_sectors),
  1232. .read_map = blkiocg_file_read_map,
  1233. },
  1234. {
  1235. .name = "io_service_bytes",
  1236. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1237. BLKIO_PROP_io_service_bytes),
  1238. .read_map = blkiocg_file_read_map,
  1239. },
  1240. {
  1241. .name = "io_serviced",
  1242. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1243. BLKIO_PROP_io_serviced),
  1244. .read_map = blkiocg_file_read_map,
  1245. },
  1246. {
  1247. .name = "io_service_time",
  1248. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1249. BLKIO_PROP_io_service_time),
  1250. .read_map = blkiocg_file_read_map,
  1251. },
  1252. {
  1253. .name = "io_wait_time",
  1254. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1255. BLKIO_PROP_io_wait_time),
  1256. .read_map = blkiocg_file_read_map,
  1257. },
  1258. {
  1259. .name = "io_merged",
  1260. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1261. BLKIO_PROP_io_merged),
  1262. .read_map = blkiocg_file_read_map,
  1263. },
  1264. {
  1265. .name = "io_queued",
  1266. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1267. BLKIO_PROP_io_queued),
  1268. .read_map = blkiocg_file_read_map,
  1269. },
  1270. {
  1271. .name = "reset_stats",
  1272. .write_u64 = blkiocg_reset_stats,
  1273. },
  1274. #ifdef CONFIG_BLK_DEV_THROTTLING
  1275. {
  1276. .name = "throttle.read_bps_device",
  1277. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1278. BLKIO_THROTL_read_bps_device),
  1279. .read_seq_string = blkiocg_file_read,
  1280. .write_string = blkiocg_file_write,
  1281. .max_write_len = 256,
  1282. },
  1283. {
  1284. .name = "throttle.write_bps_device",
  1285. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1286. BLKIO_THROTL_write_bps_device),
  1287. .read_seq_string = blkiocg_file_read,
  1288. .write_string = blkiocg_file_write,
  1289. .max_write_len = 256,
  1290. },
  1291. {
  1292. .name = "throttle.read_iops_device",
  1293. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1294. BLKIO_THROTL_read_iops_device),
  1295. .read_seq_string = blkiocg_file_read,
  1296. .write_string = blkiocg_file_write,
  1297. .max_write_len = 256,
  1298. },
  1299. {
  1300. .name = "throttle.write_iops_device",
  1301. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1302. BLKIO_THROTL_write_iops_device),
  1303. .read_seq_string = blkiocg_file_read,
  1304. .write_string = blkiocg_file_write,
  1305. .max_write_len = 256,
  1306. },
  1307. {
  1308. .name = "throttle.io_service_bytes",
  1309. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1310. BLKIO_THROTL_io_service_bytes),
  1311. .read_map = blkiocg_file_read_map,
  1312. },
  1313. {
  1314. .name = "throttle.io_serviced",
  1315. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1316. BLKIO_THROTL_io_serviced),
  1317. .read_map = blkiocg_file_read_map,
  1318. },
  1319. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1320. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1321. {
  1322. .name = "avg_queue_size",
  1323. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1324. BLKIO_PROP_avg_queue_size),
  1325. .read_map = blkiocg_file_read_map,
  1326. },
  1327. {
  1328. .name = "group_wait_time",
  1329. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1330. BLKIO_PROP_group_wait_time),
  1331. .read_map = blkiocg_file_read_map,
  1332. },
  1333. {
  1334. .name = "idle_time",
  1335. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1336. BLKIO_PROP_idle_time),
  1337. .read_map = blkiocg_file_read_map,
  1338. },
  1339. {
  1340. .name = "empty_time",
  1341. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1342. BLKIO_PROP_empty_time),
  1343. .read_map = blkiocg_file_read_map,
  1344. },
  1345. {
  1346. .name = "dequeue",
  1347. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1348. BLKIO_PROP_dequeue),
  1349. .read_map = blkiocg_file_read_map,
  1350. },
  1351. {
  1352. .name = "unaccounted_time",
  1353. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1354. BLKIO_PROP_unaccounted_time),
  1355. .read_map = blkiocg_file_read_map,
  1356. },
  1357. #endif
  1358. };
  1359. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1360. {
  1361. return cgroup_add_files(cgroup, subsys, blkio_files,
  1362. ARRAY_SIZE(blkio_files));
  1363. }
  1364. static void blkiocg_destroy(struct cgroup *cgroup)
  1365. {
  1366. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1367. unsigned long flags;
  1368. struct blkio_group *blkg;
  1369. void *key;
  1370. struct blkio_policy_type *blkiop;
  1371. struct blkio_policy_node *pn, *pntmp;
  1372. rcu_read_lock();
  1373. do {
  1374. spin_lock_irqsave(&blkcg->lock, flags);
  1375. if (hlist_empty(&blkcg->blkg_list)) {
  1376. spin_unlock_irqrestore(&blkcg->lock, flags);
  1377. break;
  1378. }
  1379. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1380. blkcg_node);
  1381. key = rcu_dereference(blkg->key);
  1382. __blkiocg_del_blkio_group(blkg);
  1383. spin_unlock_irqrestore(&blkcg->lock, flags);
  1384. /*
  1385. * This blkio_group is being unlinked as associated cgroup is
  1386. * going away. Let all the IO controlling policies know about
  1387. * this event.
  1388. */
  1389. spin_lock(&blkio_list_lock);
  1390. list_for_each_entry(blkiop, &blkio_list, list) {
  1391. if (blkiop->plid != blkg->plid)
  1392. continue;
  1393. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1394. }
  1395. spin_unlock(&blkio_list_lock);
  1396. } while (1);
  1397. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1398. blkio_policy_delete_node(pn);
  1399. kfree(pn);
  1400. }
  1401. free_css_id(&blkio_subsys, &blkcg->css);
  1402. rcu_read_unlock();
  1403. if (blkcg != &blkio_root_cgroup)
  1404. kfree(blkcg);
  1405. }
  1406. static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
  1407. {
  1408. struct blkio_cgroup *blkcg;
  1409. struct cgroup *parent = cgroup->parent;
  1410. if (!parent) {
  1411. blkcg = &blkio_root_cgroup;
  1412. goto done;
  1413. }
  1414. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1415. if (!blkcg)
  1416. return ERR_PTR(-ENOMEM);
  1417. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1418. done:
  1419. spin_lock_init(&blkcg->lock);
  1420. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1421. INIT_LIST_HEAD(&blkcg->policy_list);
  1422. return &blkcg->css;
  1423. }
  1424. /*
  1425. * We cannot support shared io contexts, as we have no mean to support
  1426. * two tasks with the same ioc in two different groups without major rework
  1427. * of the main cic data structures. For now we allow a task to change
  1428. * its cgroup only if it's the only owner of its ioc.
  1429. */
  1430. static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  1431. {
  1432. struct task_struct *task;
  1433. struct io_context *ioc;
  1434. int ret = 0;
  1435. /* task_lock() is needed to avoid races with exit_io_context() */
  1436. cgroup_taskset_for_each(task, cgrp, tset) {
  1437. task_lock(task);
  1438. ioc = task->io_context;
  1439. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1440. ret = -EINVAL;
  1441. task_unlock(task);
  1442. if (ret)
  1443. break;
  1444. }
  1445. return ret;
  1446. }
  1447. static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  1448. {
  1449. struct task_struct *task;
  1450. struct io_context *ioc;
  1451. cgroup_taskset_for_each(task, cgrp, tset) {
  1452. /* we don't lose anything even if ioc allocation fails */
  1453. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1454. if (ioc) {
  1455. ioc_cgroup_changed(ioc);
  1456. put_io_context(ioc);
  1457. }
  1458. }
  1459. }
  1460. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1461. {
  1462. spin_lock(&blkio_list_lock);
  1463. list_add_tail(&blkiop->list, &blkio_list);
  1464. spin_unlock(&blkio_list_lock);
  1465. }
  1466. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1467. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1468. {
  1469. spin_lock(&blkio_list_lock);
  1470. list_del_init(&blkiop->list);
  1471. spin_unlock(&blkio_list_lock);
  1472. }
  1473. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  1474. static int __init init_cgroup_blkio(void)
  1475. {
  1476. return cgroup_load_subsys(&blkio_subsys);
  1477. }
  1478. static void __exit exit_cgroup_blkio(void)
  1479. {
  1480. cgroup_unload_subsys(&blkio_subsys);
  1481. }
  1482. module_init(init_cgroup_blkio);
  1483. module_exit(exit_cgroup_blkio);
  1484. MODULE_LICENSE("GPL");