blk-cgroup.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include "blk-cgroup.h"
  21. #include <linux/genhd.h>
  22. #define MAX_KEY_LEN 100
  23. static DEFINE_SPINLOCK(blkio_list_lock);
  24. static LIST_HEAD(blkio_list);
  25. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  26. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  27. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  28. struct cgroup *);
  29. static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
  30. static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
  31. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  32. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  33. /* for encoding cft->private value on file */
  34. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  35. /* What policy owns the file, proportional or throttle */
  36. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  37. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  38. struct cgroup_subsys blkio_subsys = {
  39. .name = "blkio",
  40. .create = blkiocg_create,
  41. .can_attach_task = blkiocg_can_attach_task,
  42. .attach_task = blkiocg_attach_task,
  43. .destroy = blkiocg_destroy,
  44. .populate = blkiocg_populate,
  45. #ifdef CONFIG_BLK_CGROUP
  46. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  47. .subsys_id = blkio_subsys_id,
  48. #endif
  49. .use_id = 1,
  50. .module = THIS_MODULE,
  51. };
  52. EXPORT_SYMBOL_GPL(blkio_subsys);
  53. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  54. struct blkio_policy_node *pn)
  55. {
  56. list_add(&pn->node, &blkcg->policy_list);
  57. }
  58. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  59. struct blkio_group *blkg)
  60. {
  61. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  62. if (blkg->plid == plid)
  63. return 1;
  64. return 0;
  65. }
  66. /* Determines if policy node matches cgroup file being accessed */
  67. static inline bool pn_matches_cftype(struct cftype *cft,
  68. struct blkio_policy_node *pn)
  69. {
  70. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  71. int fileid = BLKIOFILE_ATTR(cft->private);
  72. return (plid == pn->plid && fileid == pn->fileid);
  73. }
  74. /* Must be called with blkcg->lock held */
  75. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  76. {
  77. list_del(&pn->node);
  78. }
  79. /* Must be called with blkcg->lock held */
  80. static struct blkio_policy_node *
  81. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  82. enum blkio_policy_id plid, int fileid)
  83. {
  84. struct blkio_policy_node *pn;
  85. list_for_each_entry(pn, &blkcg->policy_list, node) {
  86. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  87. return pn;
  88. }
  89. return NULL;
  90. }
  91. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  92. {
  93. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  94. struct blkio_cgroup, css);
  95. }
  96. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  97. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  98. {
  99. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  100. struct blkio_cgroup, css);
  101. }
  102. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  103. static inline void
  104. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  105. {
  106. struct blkio_policy_type *blkiop;
  107. list_for_each_entry(blkiop, &blkio_list, list) {
  108. /* If this policy does not own the blkg, do not send updates */
  109. if (blkiop->plid != blkg->plid)
  110. continue;
  111. if (blkiop->ops.blkio_update_group_weight_fn)
  112. blkiop->ops.blkio_update_group_weight_fn(blkg->key,
  113. blkg, weight);
  114. }
  115. }
  116. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  117. int fileid)
  118. {
  119. struct blkio_policy_type *blkiop;
  120. list_for_each_entry(blkiop, &blkio_list, list) {
  121. /* If this policy does not own the blkg, do not send updates */
  122. if (blkiop->plid != blkg->plid)
  123. continue;
  124. if (fileid == BLKIO_THROTL_read_bps_device
  125. && blkiop->ops.blkio_update_group_read_bps_fn)
  126. blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
  127. blkg, bps);
  128. if (fileid == BLKIO_THROTL_write_bps_device
  129. && blkiop->ops.blkio_update_group_write_bps_fn)
  130. blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
  131. blkg, bps);
  132. }
  133. }
  134. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  135. unsigned int iops, int fileid)
  136. {
  137. struct blkio_policy_type *blkiop;
  138. list_for_each_entry(blkiop, &blkio_list, list) {
  139. /* If this policy does not own the blkg, do not send updates */
  140. if (blkiop->plid != blkg->plid)
  141. continue;
  142. if (fileid == BLKIO_THROTL_read_iops_device
  143. && blkiop->ops.blkio_update_group_read_iops_fn)
  144. blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
  145. blkg, iops);
  146. if (fileid == BLKIO_THROTL_write_iops_device
  147. && blkiop->ops.blkio_update_group_write_iops_fn)
  148. blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
  149. blkg,iops);
  150. }
  151. }
  152. /*
  153. * Add to the appropriate stat variable depending on the request type.
  154. * This should be called with the blkg->stats_lock held.
  155. */
  156. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  157. bool sync)
  158. {
  159. if (direction)
  160. stat[BLKIO_STAT_WRITE] += add;
  161. else
  162. stat[BLKIO_STAT_READ] += add;
  163. if (sync)
  164. stat[BLKIO_STAT_SYNC] += add;
  165. else
  166. stat[BLKIO_STAT_ASYNC] += add;
  167. }
  168. /*
  169. * Decrements the appropriate stat variable if non-zero depending on the
  170. * request type. Panics on value being zero.
  171. * This should be called with the blkg->stats_lock held.
  172. */
  173. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  174. {
  175. if (direction) {
  176. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  177. stat[BLKIO_STAT_WRITE]--;
  178. } else {
  179. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  180. stat[BLKIO_STAT_READ]--;
  181. }
  182. if (sync) {
  183. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  184. stat[BLKIO_STAT_SYNC]--;
  185. } else {
  186. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  187. stat[BLKIO_STAT_ASYNC]--;
  188. }
  189. }
  190. #ifdef CONFIG_DEBUG_BLK_CGROUP
  191. /* This should be called with the blkg->stats_lock held. */
  192. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  193. struct blkio_group *curr_blkg)
  194. {
  195. if (blkio_blkg_waiting(&blkg->stats))
  196. return;
  197. if (blkg == curr_blkg)
  198. return;
  199. blkg->stats.start_group_wait_time = sched_clock();
  200. blkio_mark_blkg_waiting(&blkg->stats);
  201. }
  202. /* This should be called with the blkg->stats_lock held. */
  203. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  204. {
  205. unsigned long long now;
  206. if (!blkio_blkg_waiting(stats))
  207. return;
  208. now = sched_clock();
  209. if (time_after64(now, stats->start_group_wait_time))
  210. stats->group_wait_time += now - stats->start_group_wait_time;
  211. blkio_clear_blkg_waiting(stats);
  212. }
  213. /* This should be called with the blkg->stats_lock held. */
  214. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  215. {
  216. unsigned long long now;
  217. if (!blkio_blkg_empty(stats))
  218. return;
  219. now = sched_clock();
  220. if (time_after64(now, stats->start_empty_time))
  221. stats->empty_time += now - stats->start_empty_time;
  222. blkio_clear_blkg_empty(stats);
  223. }
  224. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  225. {
  226. unsigned long flags;
  227. spin_lock_irqsave(&blkg->stats_lock, flags);
  228. BUG_ON(blkio_blkg_idling(&blkg->stats));
  229. blkg->stats.start_idle_time = sched_clock();
  230. blkio_mark_blkg_idling(&blkg->stats);
  231. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  232. }
  233. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  234. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  235. {
  236. unsigned long flags;
  237. unsigned long long now;
  238. struct blkio_group_stats *stats;
  239. spin_lock_irqsave(&blkg->stats_lock, flags);
  240. stats = &blkg->stats;
  241. if (blkio_blkg_idling(stats)) {
  242. now = sched_clock();
  243. if (time_after64(now, stats->start_idle_time))
  244. stats->idle_time += now - stats->start_idle_time;
  245. blkio_clear_blkg_idling(stats);
  246. }
  247. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  248. }
  249. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  250. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  251. {
  252. unsigned long flags;
  253. struct blkio_group_stats *stats;
  254. spin_lock_irqsave(&blkg->stats_lock, flags);
  255. stats = &blkg->stats;
  256. stats->avg_queue_size_sum +=
  257. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  258. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  259. stats->avg_queue_size_samples++;
  260. blkio_update_group_wait_time(stats);
  261. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  262. }
  263. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  264. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  265. {
  266. unsigned long flags;
  267. struct blkio_group_stats *stats;
  268. spin_lock_irqsave(&blkg->stats_lock, flags);
  269. stats = &blkg->stats;
  270. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  271. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  272. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  273. return;
  274. }
  275. /*
  276. * group is already marked empty. This can happen if cfqq got new
  277. * request in parent group and moved to this group while being added
  278. * to service tree. Just ignore the event and move on.
  279. */
  280. if(blkio_blkg_empty(stats)) {
  281. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  282. return;
  283. }
  284. stats->start_empty_time = sched_clock();
  285. blkio_mark_blkg_empty(stats);
  286. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  287. }
  288. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  289. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  290. unsigned long dequeue)
  291. {
  292. blkg->stats.dequeue += dequeue;
  293. }
  294. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  295. #else
  296. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  297. struct blkio_group *curr_blkg) {}
  298. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  299. #endif
  300. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  301. struct blkio_group *curr_blkg, bool direction,
  302. bool sync)
  303. {
  304. unsigned long flags;
  305. spin_lock_irqsave(&blkg->stats_lock, flags);
  306. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  307. sync);
  308. blkio_end_empty_time(&blkg->stats);
  309. blkio_set_start_group_wait_time(blkg, curr_blkg);
  310. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  311. }
  312. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  313. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  314. bool direction, bool sync)
  315. {
  316. unsigned long flags;
  317. spin_lock_irqsave(&blkg->stats_lock, flags);
  318. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  319. direction, sync);
  320. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  321. }
  322. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  323. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  324. unsigned long unaccounted_time)
  325. {
  326. unsigned long flags;
  327. spin_lock_irqsave(&blkg->stats_lock, flags);
  328. blkg->stats.time += time;
  329. #ifdef CONFIG_DEBUG_BLK_CGROUP
  330. blkg->stats.unaccounted_time += unaccounted_time;
  331. #endif
  332. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  333. }
  334. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  335. /*
  336. * should be called under rcu read lock or queue lock to make sure blkg pointer
  337. * is valid.
  338. */
  339. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  340. uint64_t bytes, bool direction, bool sync)
  341. {
  342. struct blkio_group_stats_cpu *stats_cpu;
  343. unsigned long flags;
  344. /*
  345. * Disabling interrupts to provide mutual exclusion between two
  346. * writes on same cpu. It probably is not needed for 64bit. Not
  347. * optimizing that case yet.
  348. */
  349. local_irq_save(flags);
  350. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  351. u64_stats_update_begin(&stats_cpu->syncp);
  352. stats_cpu->sectors += bytes >> 9;
  353. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  354. 1, direction, sync);
  355. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  356. bytes, direction, sync);
  357. u64_stats_update_end(&stats_cpu->syncp);
  358. local_irq_restore(flags);
  359. }
  360. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  361. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  362. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  363. {
  364. struct blkio_group_stats *stats;
  365. unsigned long flags;
  366. unsigned long long now = sched_clock();
  367. spin_lock_irqsave(&blkg->stats_lock, flags);
  368. stats = &blkg->stats;
  369. if (time_after64(now, io_start_time))
  370. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  371. now - io_start_time, direction, sync);
  372. if (time_after64(io_start_time, start_time))
  373. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  374. io_start_time - start_time, direction, sync);
  375. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  376. }
  377. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  378. /* Merged stats are per cpu. */
  379. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  380. bool sync)
  381. {
  382. struct blkio_group_stats_cpu *stats_cpu;
  383. unsigned long flags;
  384. /*
  385. * Disabling interrupts to provide mutual exclusion between two
  386. * writes on same cpu. It probably is not needed for 64bit. Not
  387. * optimizing that case yet.
  388. */
  389. local_irq_save(flags);
  390. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  391. u64_stats_update_begin(&stats_cpu->syncp);
  392. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  393. direction, sync);
  394. u64_stats_update_end(&stats_cpu->syncp);
  395. local_irq_restore(flags);
  396. }
  397. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  398. /*
  399. * This function allocates the per cpu stats for blkio_group. Should be called
  400. * from sleepable context as alloc_per_cpu() requires that.
  401. */
  402. int blkio_alloc_blkg_stats(struct blkio_group *blkg)
  403. {
  404. /* Allocate memory for per cpu stats */
  405. blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  406. if (!blkg->stats_cpu)
  407. return -ENOMEM;
  408. return 0;
  409. }
  410. EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
  411. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  412. struct blkio_group *blkg, void *key, dev_t dev,
  413. enum blkio_policy_id plid)
  414. {
  415. unsigned long flags;
  416. spin_lock_irqsave(&blkcg->lock, flags);
  417. spin_lock_init(&blkg->stats_lock);
  418. rcu_assign_pointer(blkg->key, key);
  419. blkg->blkcg_id = css_id(&blkcg->css);
  420. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  421. blkg->plid = plid;
  422. spin_unlock_irqrestore(&blkcg->lock, flags);
  423. /* Need to take css reference ? */
  424. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  425. blkg->dev = dev;
  426. }
  427. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  428. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  429. {
  430. hlist_del_init_rcu(&blkg->blkcg_node);
  431. blkg->blkcg_id = 0;
  432. }
  433. /*
  434. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  435. * indicating that blk_group was unhashed by the time we got to it.
  436. */
  437. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  438. {
  439. struct blkio_cgroup *blkcg;
  440. unsigned long flags;
  441. struct cgroup_subsys_state *css;
  442. int ret = 1;
  443. rcu_read_lock();
  444. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  445. if (css) {
  446. blkcg = container_of(css, struct blkio_cgroup, css);
  447. spin_lock_irqsave(&blkcg->lock, flags);
  448. if (!hlist_unhashed(&blkg->blkcg_node)) {
  449. __blkiocg_del_blkio_group(blkg);
  450. ret = 0;
  451. }
  452. spin_unlock_irqrestore(&blkcg->lock, flags);
  453. }
  454. rcu_read_unlock();
  455. return ret;
  456. }
  457. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  458. /* called under rcu_read_lock(). */
  459. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  460. {
  461. struct blkio_group *blkg;
  462. struct hlist_node *n;
  463. void *__key;
  464. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  465. __key = blkg->key;
  466. if (__key == key)
  467. return blkg;
  468. }
  469. return NULL;
  470. }
  471. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  472. static void blkio_reset_stats_cpu(struct blkio_group *blkg)
  473. {
  474. struct blkio_group_stats_cpu *stats_cpu;
  475. int i, j, k;
  476. /*
  477. * Note: On 64 bit arch this should not be an issue. This has the
  478. * possibility of returning some inconsistent value on 32bit arch
  479. * as 64bit update on 32bit is non atomic. Taking care of this
  480. * corner case makes code very complicated, like sending IPIs to
  481. * cpus, taking care of stats of offline cpus etc.
  482. *
  483. * reset stats is anyway more of a debug feature and this sounds a
  484. * corner case. So I am not complicating the code yet until and
  485. * unless this becomes a real issue.
  486. */
  487. for_each_possible_cpu(i) {
  488. stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
  489. stats_cpu->sectors = 0;
  490. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  491. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  492. stats_cpu->stat_arr_cpu[j][k] = 0;
  493. }
  494. }
  495. static int
  496. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  497. {
  498. struct blkio_cgroup *blkcg;
  499. struct blkio_group *blkg;
  500. struct blkio_group_stats *stats;
  501. struct hlist_node *n;
  502. uint64_t queued[BLKIO_STAT_TOTAL];
  503. int i;
  504. #ifdef CONFIG_DEBUG_BLK_CGROUP
  505. bool idling, waiting, empty;
  506. unsigned long long now = sched_clock();
  507. #endif
  508. blkcg = cgroup_to_blkio_cgroup(cgroup);
  509. spin_lock_irq(&blkcg->lock);
  510. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  511. spin_lock(&blkg->stats_lock);
  512. stats = &blkg->stats;
  513. #ifdef CONFIG_DEBUG_BLK_CGROUP
  514. idling = blkio_blkg_idling(stats);
  515. waiting = blkio_blkg_waiting(stats);
  516. empty = blkio_blkg_empty(stats);
  517. #endif
  518. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  519. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  520. memset(stats, 0, sizeof(struct blkio_group_stats));
  521. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  522. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  523. #ifdef CONFIG_DEBUG_BLK_CGROUP
  524. if (idling) {
  525. blkio_mark_blkg_idling(stats);
  526. stats->start_idle_time = now;
  527. }
  528. if (waiting) {
  529. blkio_mark_blkg_waiting(stats);
  530. stats->start_group_wait_time = now;
  531. }
  532. if (empty) {
  533. blkio_mark_blkg_empty(stats);
  534. stats->start_empty_time = now;
  535. }
  536. #endif
  537. spin_unlock(&blkg->stats_lock);
  538. /* Reset Per cpu stats which don't take blkg->stats_lock */
  539. blkio_reset_stats_cpu(blkg);
  540. }
  541. spin_unlock_irq(&blkcg->lock);
  542. return 0;
  543. }
  544. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  545. int chars_left, bool diskname_only)
  546. {
  547. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  548. chars_left -= strlen(str);
  549. if (chars_left <= 0) {
  550. printk(KERN_WARNING
  551. "Possibly incorrect cgroup stat display format");
  552. return;
  553. }
  554. if (diskname_only)
  555. return;
  556. switch (type) {
  557. case BLKIO_STAT_READ:
  558. strlcat(str, " Read", chars_left);
  559. break;
  560. case BLKIO_STAT_WRITE:
  561. strlcat(str, " Write", chars_left);
  562. break;
  563. case BLKIO_STAT_SYNC:
  564. strlcat(str, " Sync", chars_left);
  565. break;
  566. case BLKIO_STAT_ASYNC:
  567. strlcat(str, " Async", chars_left);
  568. break;
  569. case BLKIO_STAT_TOTAL:
  570. strlcat(str, " Total", chars_left);
  571. break;
  572. default:
  573. strlcat(str, " Invalid", chars_left);
  574. }
  575. }
  576. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  577. struct cgroup_map_cb *cb, dev_t dev)
  578. {
  579. blkio_get_key_name(0, dev, str, chars_left, true);
  580. cb->fill(cb, str, val);
  581. return val;
  582. }
  583. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
  584. enum stat_type_cpu type, enum stat_sub_type sub_type)
  585. {
  586. int cpu;
  587. struct blkio_group_stats_cpu *stats_cpu;
  588. u64 val = 0, tval;
  589. for_each_possible_cpu(cpu) {
  590. unsigned int start;
  591. stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
  592. do {
  593. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  594. if (type == BLKIO_STAT_CPU_SECTORS)
  595. tval = stats_cpu->sectors;
  596. else
  597. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  598. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  599. val += tval;
  600. }
  601. return val;
  602. }
  603. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
  604. struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
  605. {
  606. uint64_t disk_total, val;
  607. char key_str[MAX_KEY_LEN];
  608. enum stat_sub_type sub_type;
  609. if (type == BLKIO_STAT_CPU_SECTORS) {
  610. val = blkio_read_stat_cpu(blkg, type, 0);
  611. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
  612. }
  613. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  614. sub_type++) {
  615. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  616. val = blkio_read_stat_cpu(blkg, type, sub_type);
  617. cb->fill(cb, key_str, val);
  618. }
  619. disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
  620. blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
  621. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  622. cb->fill(cb, key_str, disk_total);
  623. return disk_total;
  624. }
  625. /* This should be called with blkg->stats_lock held */
  626. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  627. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  628. {
  629. uint64_t disk_total;
  630. char key_str[MAX_KEY_LEN];
  631. enum stat_sub_type sub_type;
  632. if (type == BLKIO_STAT_TIME)
  633. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  634. blkg->stats.time, cb, dev);
  635. #ifdef CONFIG_DEBUG_BLK_CGROUP
  636. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  637. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  638. blkg->stats.unaccounted_time, cb, dev);
  639. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  640. uint64_t sum = blkg->stats.avg_queue_size_sum;
  641. uint64_t samples = blkg->stats.avg_queue_size_samples;
  642. if (samples)
  643. do_div(sum, samples);
  644. else
  645. sum = 0;
  646. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  647. }
  648. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  649. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  650. blkg->stats.group_wait_time, cb, dev);
  651. if (type == BLKIO_STAT_IDLE_TIME)
  652. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  653. blkg->stats.idle_time, cb, dev);
  654. if (type == BLKIO_STAT_EMPTY_TIME)
  655. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  656. blkg->stats.empty_time, cb, dev);
  657. if (type == BLKIO_STAT_DEQUEUE)
  658. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  659. blkg->stats.dequeue, cb, dev);
  660. #endif
  661. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  662. sub_type++) {
  663. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  664. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  665. }
  666. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  667. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  668. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  669. cb->fill(cb, key_str, disk_total);
  670. return disk_total;
  671. }
  672. static int blkio_check_dev_num(dev_t dev)
  673. {
  674. int part = 0;
  675. struct gendisk *disk;
  676. disk = get_gendisk(dev, &part);
  677. if (!disk || part)
  678. return -ENODEV;
  679. return 0;
  680. }
  681. static int blkio_policy_parse_and_set(char *buf,
  682. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  683. {
  684. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  685. int ret;
  686. unsigned long major, minor, temp;
  687. int i = 0;
  688. dev_t dev;
  689. u64 bps, iops;
  690. memset(s, 0, sizeof(s));
  691. while ((p = strsep(&buf, " ")) != NULL) {
  692. if (!*p)
  693. continue;
  694. s[i++] = p;
  695. /* Prevent from inputing too many things */
  696. if (i == 3)
  697. break;
  698. }
  699. if (i != 2)
  700. return -EINVAL;
  701. p = strsep(&s[0], ":");
  702. if (p != NULL)
  703. major_s = p;
  704. else
  705. return -EINVAL;
  706. minor_s = s[0];
  707. if (!minor_s)
  708. return -EINVAL;
  709. ret = strict_strtoul(major_s, 10, &major);
  710. if (ret)
  711. return -EINVAL;
  712. ret = strict_strtoul(minor_s, 10, &minor);
  713. if (ret)
  714. return -EINVAL;
  715. dev = MKDEV(major, minor);
  716. ret = blkio_check_dev_num(dev);
  717. if (ret)
  718. return ret;
  719. newpn->dev = dev;
  720. if (s[1] == NULL)
  721. return -EINVAL;
  722. switch (plid) {
  723. case BLKIO_POLICY_PROP:
  724. ret = strict_strtoul(s[1], 10, &temp);
  725. if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  726. temp > BLKIO_WEIGHT_MAX)
  727. return -EINVAL;
  728. newpn->plid = plid;
  729. newpn->fileid = fileid;
  730. newpn->val.weight = temp;
  731. break;
  732. case BLKIO_POLICY_THROTL:
  733. switch(fileid) {
  734. case BLKIO_THROTL_read_bps_device:
  735. case BLKIO_THROTL_write_bps_device:
  736. ret = strict_strtoull(s[1], 10, &bps);
  737. if (ret)
  738. return -EINVAL;
  739. newpn->plid = plid;
  740. newpn->fileid = fileid;
  741. newpn->val.bps = bps;
  742. break;
  743. case BLKIO_THROTL_read_iops_device:
  744. case BLKIO_THROTL_write_iops_device:
  745. ret = strict_strtoull(s[1], 10, &iops);
  746. if (ret)
  747. return -EINVAL;
  748. if (iops > THROTL_IOPS_MAX)
  749. return -EINVAL;
  750. newpn->plid = plid;
  751. newpn->fileid = fileid;
  752. newpn->val.iops = (unsigned int)iops;
  753. break;
  754. }
  755. break;
  756. default:
  757. BUG();
  758. }
  759. return 0;
  760. }
  761. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  762. dev_t dev)
  763. {
  764. struct blkio_policy_node *pn;
  765. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  766. BLKIO_PROP_weight_device);
  767. if (pn)
  768. return pn->val.weight;
  769. else
  770. return blkcg->weight;
  771. }
  772. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  773. uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
  774. {
  775. struct blkio_policy_node *pn;
  776. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  777. BLKIO_THROTL_read_bps_device);
  778. if (pn)
  779. return pn->val.bps;
  780. else
  781. return -1;
  782. }
  783. uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
  784. {
  785. struct blkio_policy_node *pn;
  786. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  787. BLKIO_THROTL_write_bps_device);
  788. if (pn)
  789. return pn->val.bps;
  790. else
  791. return -1;
  792. }
  793. unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
  794. {
  795. struct blkio_policy_node *pn;
  796. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  797. BLKIO_THROTL_read_iops_device);
  798. if (pn)
  799. return pn->val.iops;
  800. else
  801. return -1;
  802. }
  803. unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
  804. {
  805. struct blkio_policy_node *pn;
  806. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  807. BLKIO_THROTL_write_iops_device);
  808. if (pn)
  809. return pn->val.iops;
  810. else
  811. return -1;
  812. }
  813. /* Checks whether user asked for deleting a policy rule */
  814. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  815. {
  816. switch(pn->plid) {
  817. case BLKIO_POLICY_PROP:
  818. if (pn->val.weight == 0)
  819. return 1;
  820. break;
  821. case BLKIO_POLICY_THROTL:
  822. switch(pn->fileid) {
  823. case BLKIO_THROTL_read_bps_device:
  824. case BLKIO_THROTL_write_bps_device:
  825. if (pn->val.bps == 0)
  826. return 1;
  827. break;
  828. case BLKIO_THROTL_read_iops_device:
  829. case BLKIO_THROTL_write_iops_device:
  830. if (pn->val.iops == 0)
  831. return 1;
  832. }
  833. break;
  834. default:
  835. BUG();
  836. }
  837. return 0;
  838. }
  839. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  840. struct blkio_policy_node *newpn)
  841. {
  842. switch(oldpn->plid) {
  843. case BLKIO_POLICY_PROP:
  844. oldpn->val.weight = newpn->val.weight;
  845. break;
  846. case BLKIO_POLICY_THROTL:
  847. switch(newpn->fileid) {
  848. case BLKIO_THROTL_read_bps_device:
  849. case BLKIO_THROTL_write_bps_device:
  850. oldpn->val.bps = newpn->val.bps;
  851. break;
  852. case BLKIO_THROTL_read_iops_device:
  853. case BLKIO_THROTL_write_iops_device:
  854. oldpn->val.iops = newpn->val.iops;
  855. }
  856. break;
  857. default:
  858. BUG();
  859. }
  860. }
  861. /*
  862. * Some rules/values in blkg have changed. Propagate those to respective
  863. * policies.
  864. */
  865. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  866. struct blkio_group *blkg, struct blkio_policy_node *pn)
  867. {
  868. unsigned int weight, iops;
  869. u64 bps;
  870. switch(pn->plid) {
  871. case BLKIO_POLICY_PROP:
  872. weight = pn->val.weight ? pn->val.weight :
  873. blkcg->weight;
  874. blkio_update_group_weight(blkg, weight);
  875. break;
  876. case BLKIO_POLICY_THROTL:
  877. switch(pn->fileid) {
  878. case BLKIO_THROTL_read_bps_device:
  879. case BLKIO_THROTL_write_bps_device:
  880. bps = pn->val.bps ? pn->val.bps : (-1);
  881. blkio_update_group_bps(blkg, bps, pn->fileid);
  882. break;
  883. case BLKIO_THROTL_read_iops_device:
  884. case BLKIO_THROTL_write_iops_device:
  885. iops = pn->val.iops ? pn->val.iops : (-1);
  886. blkio_update_group_iops(blkg, iops, pn->fileid);
  887. break;
  888. }
  889. break;
  890. default:
  891. BUG();
  892. }
  893. }
  894. /*
  895. * A policy node rule has been updated. Propagate this update to all the
  896. * block groups which might be affected by this update.
  897. */
  898. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  899. struct blkio_policy_node *pn)
  900. {
  901. struct blkio_group *blkg;
  902. struct hlist_node *n;
  903. spin_lock(&blkio_list_lock);
  904. spin_lock_irq(&blkcg->lock);
  905. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  906. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  907. continue;
  908. blkio_update_blkg_policy(blkcg, blkg, pn);
  909. }
  910. spin_unlock_irq(&blkcg->lock);
  911. spin_unlock(&blkio_list_lock);
  912. }
  913. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  914. const char *buffer)
  915. {
  916. int ret = 0;
  917. char *buf;
  918. struct blkio_policy_node *newpn, *pn;
  919. struct blkio_cgroup *blkcg;
  920. int keep_newpn = 0;
  921. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  922. int fileid = BLKIOFILE_ATTR(cft->private);
  923. buf = kstrdup(buffer, GFP_KERNEL);
  924. if (!buf)
  925. return -ENOMEM;
  926. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  927. if (!newpn) {
  928. ret = -ENOMEM;
  929. goto free_buf;
  930. }
  931. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  932. if (ret)
  933. goto free_newpn;
  934. blkcg = cgroup_to_blkio_cgroup(cgrp);
  935. spin_lock_irq(&blkcg->lock);
  936. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  937. if (!pn) {
  938. if (!blkio_delete_rule_command(newpn)) {
  939. blkio_policy_insert_node(blkcg, newpn);
  940. keep_newpn = 1;
  941. }
  942. spin_unlock_irq(&blkcg->lock);
  943. goto update_io_group;
  944. }
  945. if (blkio_delete_rule_command(newpn)) {
  946. blkio_policy_delete_node(pn);
  947. spin_unlock_irq(&blkcg->lock);
  948. goto update_io_group;
  949. }
  950. spin_unlock_irq(&blkcg->lock);
  951. blkio_update_policy_rule(pn, newpn);
  952. update_io_group:
  953. blkio_update_policy_node_blkg(blkcg, newpn);
  954. free_newpn:
  955. if (!keep_newpn)
  956. kfree(newpn);
  957. free_buf:
  958. kfree(buf);
  959. return ret;
  960. }
  961. static void
  962. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  963. {
  964. switch(pn->plid) {
  965. case BLKIO_POLICY_PROP:
  966. if (pn->fileid == BLKIO_PROP_weight_device)
  967. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  968. MINOR(pn->dev), pn->val.weight);
  969. break;
  970. case BLKIO_POLICY_THROTL:
  971. switch(pn->fileid) {
  972. case BLKIO_THROTL_read_bps_device:
  973. case BLKIO_THROTL_write_bps_device:
  974. seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
  975. MINOR(pn->dev), pn->val.bps);
  976. break;
  977. case BLKIO_THROTL_read_iops_device:
  978. case BLKIO_THROTL_write_iops_device:
  979. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  980. MINOR(pn->dev), pn->val.iops);
  981. break;
  982. }
  983. break;
  984. default:
  985. BUG();
  986. }
  987. }
  988. /* cgroup files which read their data from policy nodes end up here */
  989. static void blkio_read_policy_node_files(struct cftype *cft,
  990. struct blkio_cgroup *blkcg, struct seq_file *m)
  991. {
  992. struct blkio_policy_node *pn;
  993. if (!list_empty(&blkcg->policy_list)) {
  994. spin_lock_irq(&blkcg->lock);
  995. list_for_each_entry(pn, &blkcg->policy_list, node) {
  996. if (!pn_matches_cftype(cft, pn))
  997. continue;
  998. blkio_print_policy_node(m, pn);
  999. }
  1000. spin_unlock_irq(&blkcg->lock);
  1001. }
  1002. }
  1003. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  1004. struct seq_file *m)
  1005. {
  1006. struct blkio_cgroup *blkcg;
  1007. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1008. int name = BLKIOFILE_ATTR(cft->private);
  1009. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1010. switch(plid) {
  1011. case BLKIO_POLICY_PROP:
  1012. switch(name) {
  1013. case BLKIO_PROP_weight_device:
  1014. blkio_read_policy_node_files(cft, blkcg, m);
  1015. return 0;
  1016. default:
  1017. BUG();
  1018. }
  1019. break;
  1020. case BLKIO_POLICY_THROTL:
  1021. switch(name){
  1022. case BLKIO_THROTL_read_bps_device:
  1023. case BLKIO_THROTL_write_bps_device:
  1024. case BLKIO_THROTL_read_iops_device:
  1025. case BLKIO_THROTL_write_iops_device:
  1026. blkio_read_policy_node_files(cft, blkcg, m);
  1027. return 0;
  1028. default:
  1029. BUG();
  1030. }
  1031. break;
  1032. default:
  1033. BUG();
  1034. }
  1035. return 0;
  1036. }
  1037. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1038. struct cftype *cft, struct cgroup_map_cb *cb,
  1039. enum stat_type type, bool show_total, bool pcpu)
  1040. {
  1041. struct blkio_group *blkg;
  1042. struct hlist_node *n;
  1043. uint64_t cgroup_total = 0;
  1044. rcu_read_lock();
  1045. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1046. if (blkg->dev) {
  1047. if (!cftype_blkg_same_policy(cft, blkg))
  1048. continue;
  1049. if (pcpu)
  1050. cgroup_total += blkio_get_stat_cpu(blkg, cb,
  1051. blkg->dev, type);
  1052. else {
  1053. spin_lock_irq(&blkg->stats_lock);
  1054. cgroup_total += blkio_get_stat(blkg, cb,
  1055. blkg->dev, type);
  1056. spin_unlock_irq(&blkg->stats_lock);
  1057. }
  1058. }
  1059. }
  1060. if (show_total)
  1061. cb->fill(cb, "Total", cgroup_total);
  1062. rcu_read_unlock();
  1063. return 0;
  1064. }
  1065. /* All map kind of cgroup file get serviced by this function */
  1066. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1067. struct cgroup_map_cb *cb)
  1068. {
  1069. struct blkio_cgroup *blkcg;
  1070. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1071. int name = BLKIOFILE_ATTR(cft->private);
  1072. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1073. switch(plid) {
  1074. case BLKIO_POLICY_PROP:
  1075. switch(name) {
  1076. case BLKIO_PROP_time:
  1077. return blkio_read_blkg_stats(blkcg, cft, cb,
  1078. BLKIO_STAT_TIME, 0, 0);
  1079. case BLKIO_PROP_sectors:
  1080. return blkio_read_blkg_stats(blkcg, cft, cb,
  1081. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1082. case BLKIO_PROP_io_service_bytes:
  1083. return blkio_read_blkg_stats(blkcg, cft, cb,
  1084. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1085. case BLKIO_PROP_io_serviced:
  1086. return blkio_read_blkg_stats(blkcg, cft, cb,
  1087. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1088. case BLKIO_PROP_io_service_time:
  1089. return blkio_read_blkg_stats(blkcg, cft, cb,
  1090. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1091. case BLKIO_PROP_io_wait_time:
  1092. return blkio_read_blkg_stats(blkcg, cft, cb,
  1093. BLKIO_STAT_WAIT_TIME, 1, 0);
  1094. case BLKIO_PROP_io_merged:
  1095. return blkio_read_blkg_stats(blkcg, cft, cb,
  1096. BLKIO_STAT_CPU_MERGED, 1, 1);
  1097. case BLKIO_PROP_io_queued:
  1098. return blkio_read_blkg_stats(blkcg, cft, cb,
  1099. BLKIO_STAT_QUEUED, 1, 0);
  1100. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1101. case BLKIO_PROP_unaccounted_time:
  1102. return blkio_read_blkg_stats(blkcg, cft, cb,
  1103. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1104. case BLKIO_PROP_dequeue:
  1105. return blkio_read_blkg_stats(blkcg, cft, cb,
  1106. BLKIO_STAT_DEQUEUE, 0, 0);
  1107. case BLKIO_PROP_avg_queue_size:
  1108. return blkio_read_blkg_stats(blkcg, cft, cb,
  1109. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1110. case BLKIO_PROP_group_wait_time:
  1111. return blkio_read_blkg_stats(blkcg, cft, cb,
  1112. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1113. case BLKIO_PROP_idle_time:
  1114. return blkio_read_blkg_stats(blkcg, cft, cb,
  1115. BLKIO_STAT_IDLE_TIME, 0, 0);
  1116. case BLKIO_PROP_empty_time:
  1117. return blkio_read_blkg_stats(blkcg, cft, cb,
  1118. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1119. #endif
  1120. default:
  1121. BUG();
  1122. }
  1123. break;
  1124. case BLKIO_POLICY_THROTL:
  1125. switch(name){
  1126. case BLKIO_THROTL_io_service_bytes:
  1127. return blkio_read_blkg_stats(blkcg, cft, cb,
  1128. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1129. case BLKIO_THROTL_io_serviced:
  1130. return blkio_read_blkg_stats(blkcg, cft, cb,
  1131. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1132. default:
  1133. BUG();
  1134. }
  1135. break;
  1136. default:
  1137. BUG();
  1138. }
  1139. return 0;
  1140. }
  1141. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  1142. {
  1143. struct blkio_group *blkg;
  1144. struct hlist_node *n;
  1145. struct blkio_policy_node *pn;
  1146. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1147. return -EINVAL;
  1148. spin_lock(&blkio_list_lock);
  1149. spin_lock_irq(&blkcg->lock);
  1150. blkcg->weight = (unsigned int)val;
  1151. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1152. pn = blkio_policy_search_node(blkcg, blkg->dev,
  1153. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  1154. if (pn)
  1155. continue;
  1156. blkio_update_group_weight(blkg, blkcg->weight);
  1157. }
  1158. spin_unlock_irq(&blkcg->lock);
  1159. spin_unlock(&blkio_list_lock);
  1160. return 0;
  1161. }
  1162. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1163. struct blkio_cgroup *blkcg;
  1164. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1165. int name = BLKIOFILE_ATTR(cft->private);
  1166. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1167. switch(plid) {
  1168. case BLKIO_POLICY_PROP:
  1169. switch(name) {
  1170. case BLKIO_PROP_weight:
  1171. return (u64)blkcg->weight;
  1172. }
  1173. break;
  1174. default:
  1175. BUG();
  1176. }
  1177. return 0;
  1178. }
  1179. static int
  1180. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1181. {
  1182. struct blkio_cgroup *blkcg;
  1183. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1184. int name = BLKIOFILE_ATTR(cft->private);
  1185. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1186. switch(plid) {
  1187. case BLKIO_POLICY_PROP:
  1188. switch(name) {
  1189. case BLKIO_PROP_weight:
  1190. return blkio_weight_write(blkcg, val);
  1191. }
  1192. break;
  1193. default:
  1194. BUG();
  1195. }
  1196. return 0;
  1197. }
  1198. struct cftype blkio_files[] = {
  1199. {
  1200. .name = "weight_device",
  1201. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1202. BLKIO_PROP_weight_device),
  1203. .read_seq_string = blkiocg_file_read,
  1204. .write_string = blkiocg_file_write,
  1205. .max_write_len = 256,
  1206. },
  1207. {
  1208. .name = "weight",
  1209. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1210. BLKIO_PROP_weight),
  1211. .read_u64 = blkiocg_file_read_u64,
  1212. .write_u64 = blkiocg_file_write_u64,
  1213. },
  1214. {
  1215. .name = "time",
  1216. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1217. BLKIO_PROP_time),
  1218. .read_map = blkiocg_file_read_map,
  1219. },
  1220. {
  1221. .name = "sectors",
  1222. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1223. BLKIO_PROP_sectors),
  1224. .read_map = blkiocg_file_read_map,
  1225. },
  1226. {
  1227. .name = "io_service_bytes",
  1228. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1229. BLKIO_PROP_io_service_bytes),
  1230. .read_map = blkiocg_file_read_map,
  1231. },
  1232. {
  1233. .name = "io_serviced",
  1234. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1235. BLKIO_PROP_io_serviced),
  1236. .read_map = blkiocg_file_read_map,
  1237. },
  1238. {
  1239. .name = "io_service_time",
  1240. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1241. BLKIO_PROP_io_service_time),
  1242. .read_map = blkiocg_file_read_map,
  1243. },
  1244. {
  1245. .name = "io_wait_time",
  1246. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1247. BLKIO_PROP_io_wait_time),
  1248. .read_map = blkiocg_file_read_map,
  1249. },
  1250. {
  1251. .name = "io_merged",
  1252. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1253. BLKIO_PROP_io_merged),
  1254. .read_map = blkiocg_file_read_map,
  1255. },
  1256. {
  1257. .name = "io_queued",
  1258. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1259. BLKIO_PROP_io_queued),
  1260. .read_map = blkiocg_file_read_map,
  1261. },
  1262. {
  1263. .name = "reset_stats",
  1264. .write_u64 = blkiocg_reset_stats,
  1265. },
  1266. #ifdef CONFIG_BLK_DEV_THROTTLING
  1267. {
  1268. .name = "throttle.read_bps_device",
  1269. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1270. BLKIO_THROTL_read_bps_device),
  1271. .read_seq_string = blkiocg_file_read,
  1272. .write_string = blkiocg_file_write,
  1273. .max_write_len = 256,
  1274. },
  1275. {
  1276. .name = "throttle.write_bps_device",
  1277. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1278. BLKIO_THROTL_write_bps_device),
  1279. .read_seq_string = blkiocg_file_read,
  1280. .write_string = blkiocg_file_write,
  1281. .max_write_len = 256,
  1282. },
  1283. {
  1284. .name = "throttle.read_iops_device",
  1285. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1286. BLKIO_THROTL_read_iops_device),
  1287. .read_seq_string = blkiocg_file_read,
  1288. .write_string = blkiocg_file_write,
  1289. .max_write_len = 256,
  1290. },
  1291. {
  1292. .name = "throttle.write_iops_device",
  1293. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1294. BLKIO_THROTL_write_iops_device),
  1295. .read_seq_string = blkiocg_file_read,
  1296. .write_string = blkiocg_file_write,
  1297. .max_write_len = 256,
  1298. },
  1299. {
  1300. .name = "throttle.io_service_bytes",
  1301. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1302. BLKIO_THROTL_io_service_bytes),
  1303. .read_map = blkiocg_file_read_map,
  1304. },
  1305. {
  1306. .name = "throttle.io_serviced",
  1307. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1308. BLKIO_THROTL_io_serviced),
  1309. .read_map = blkiocg_file_read_map,
  1310. },
  1311. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1312. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1313. {
  1314. .name = "avg_queue_size",
  1315. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1316. BLKIO_PROP_avg_queue_size),
  1317. .read_map = blkiocg_file_read_map,
  1318. },
  1319. {
  1320. .name = "group_wait_time",
  1321. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1322. BLKIO_PROP_group_wait_time),
  1323. .read_map = blkiocg_file_read_map,
  1324. },
  1325. {
  1326. .name = "idle_time",
  1327. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1328. BLKIO_PROP_idle_time),
  1329. .read_map = blkiocg_file_read_map,
  1330. },
  1331. {
  1332. .name = "empty_time",
  1333. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1334. BLKIO_PROP_empty_time),
  1335. .read_map = blkiocg_file_read_map,
  1336. },
  1337. {
  1338. .name = "dequeue",
  1339. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1340. BLKIO_PROP_dequeue),
  1341. .read_map = blkiocg_file_read_map,
  1342. },
  1343. {
  1344. .name = "unaccounted_time",
  1345. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1346. BLKIO_PROP_unaccounted_time),
  1347. .read_map = blkiocg_file_read_map,
  1348. },
  1349. #endif
  1350. };
  1351. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1352. {
  1353. return cgroup_add_files(cgroup, subsys, blkio_files,
  1354. ARRAY_SIZE(blkio_files));
  1355. }
  1356. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1357. {
  1358. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1359. unsigned long flags;
  1360. struct blkio_group *blkg;
  1361. void *key;
  1362. struct blkio_policy_type *blkiop;
  1363. struct blkio_policy_node *pn, *pntmp;
  1364. rcu_read_lock();
  1365. do {
  1366. spin_lock_irqsave(&blkcg->lock, flags);
  1367. if (hlist_empty(&blkcg->blkg_list)) {
  1368. spin_unlock_irqrestore(&blkcg->lock, flags);
  1369. break;
  1370. }
  1371. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1372. blkcg_node);
  1373. key = rcu_dereference(blkg->key);
  1374. __blkiocg_del_blkio_group(blkg);
  1375. spin_unlock_irqrestore(&blkcg->lock, flags);
  1376. /*
  1377. * This blkio_group is being unlinked as associated cgroup is
  1378. * going away. Let all the IO controlling policies know about
  1379. * this event.
  1380. */
  1381. spin_lock(&blkio_list_lock);
  1382. list_for_each_entry(blkiop, &blkio_list, list) {
  1383. if (blkiop->plid != blkg->plid)
  1384. continue;
  1385. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1386. }
  1387. spin_unlock(&blkio_list_lock);
  1388. } while (1);
  1389. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1390. blkio_policy_delete_node(pn);
  1391. kfree(pn);
  1392. }
  1393. free_css_id(&blkio_subsys, &blkcg->css);
  1394. rcu_read_unlock();
  1395. if (blkcg != &blkio_root_cgroup)
  1396. kfree(blkcg);
  1397. }
  1398. static struct cgroup_subsys_state *
  1399. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1400. {
  1401. struct blkio_cgroup *blkcg;
  1402. struct cgroup *parent = cgroup->parent;
  1403. if (!parent) {
  1404. blkcg = &blkio_root_cgroup;
  1405. goto done;
  1406. }
  1407. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1408. if (!blkcg)
  1409. return ERR_PTR(-ENOMEM);
  1410. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1411. done:
  1412. spin_lock_init(&blkcg->lock);
  1413. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1414. INIT_LIST_HEAD(&blkcg->policy_list);
  1415. return &blkcg->css;
  1416. }
  1417. /*
  1418. * We cannot support shared io contexts, as we have no mean to support
  1419. * two tasks with the same ioc in two different groups without major rework
  1420. * of the main cic data structures. For now we allow a task to change
  1421. * its cgroup only if it's the only owner of its ioc.
  1422. */
  1423. static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
  1424. {
  1425. struct io_context *ioc;
  1426. int ret = 0;
  1427. /* task_lock() is needed to avoid races with exit_io_context() */
  1428. task_lock(tsk);
  1429. ioc = tsk->io_context;
  1430. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1431. ret = -EINVAL;
  1432. task_unlock(tsk);
  1433. return ret;
  1434. }
  1435. static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
  1436. {
  1437. struct io_context *ioc;
  1438. task_lock(tsk);
  1439. ioc = tsk->io_context;
  1440. if (ioc)
  1441. ioc->cgroup_changed = 1;
  1442. task_unlock(tsk);
  1443. }
  1444. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1445. {
  1446. spin_lock(&blkio_list_lock);
  1447. list_add_tail(&blkiop->list, &blkio_list);
  1448. spin_unlock(&blkio_list_lock);
  1449. }
  1450. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1451. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1452. {
  1453. spin_lock(&blkio_list_lock);
  1454. list_del_init(&blkiop->list);
  1455. spin_unlock(&blkio_list_lock);
  1456. }
  1457. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  1458. static int __init init_cgroup_blkio(void)
  1459. {
  1460. return cgroup_load_subsys(&blkio_subsys);
  1461. }
  1462. static void __exit exit_cgroup_blkio(void)
  1463. {
  1464. cgroup_unload_subsys(&blkio_subsys);
  1465. }
  1466. module_init(init_cgroup_blkio);
  1467. module_exit(exit_cgroup_blkio);
  1468. MODULE_LICENSE("GPL");