blk-cgroup.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include "blk-cgroup.h"
  23. #define MAX_KEY_LEN 100
  24. static DEFINE_SPINLOCK(blkio_list_lock);
  25. static LIST_HEAD(blkio_list);
  26. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  27. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  28. static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
  29. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  30. struct cgroup *);
  31. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  32. struct cgroup_taskset *);
  33. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  34. struct cgroup_taskset *);
  35. static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
  36. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  37. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  38. /* for encoding cft->private value on file */
  39. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  40. /* What policy owns the file, proportional or throttle */
  41. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  42. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  43. struct cgroup_subsys blkio_subsys = {
  44. .name = "blkio",
  45. .create = blkiocg_create,
  46. .can_attach = blkiocg_can_attach,
  47. .attach = blkiocg_attach,
  48. .pre_destroy = blkiocg_pre_destroy,
  49. .destroy = blkiocg_destroy,
  50. .populate = blkiocg_populate,
  51. .subsys_id = blkio_subsys_id,
  52. .module = THIS_MODULE,
  53. };
  54. EXPORT_SYMBOL_GPL(blkio_subsys);
  55. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  56. {
  57. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  58. struct blkio_cgroup, css);
  59. }
  60. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  61. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  62. {
  63. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  64. struct blkio_cgroup, css);
  65. }
  66. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  67. static inline void
  68. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  69. {
  70. struct blkio_policy_type *blkiop;
  71. list_for_each_entry(blkiop, &blkio_list, list) {
  72. /* If this policy does not own the blkg, do not send updates */
  73. if (blkiop->plid != blkg->plid)
  74. continue;
  75. if (blkiop->ops.blkio_update_group_weight_fn)
  76. blkiop->ops.blkio_update_group_weight_fn(blkg->q,
  77. blkg, weight);
  78. }
  79. }
  80. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  81. int fileid)
  82. {
  83. struct blkio_policy_type *blkiop;
  84. list_for_each_entry(blkiop, &blkio_list, list) {
  85. /* If this policy does not own the blkg, do not send updates */
  86. if (blkiop->plid != blkg->plid)
  87. continue;
  88. if (fileid == BLKIO_THROTL_read_bps_device
  89. && blkiop->ops.blkio_update_group_read_bps_fn)
  90. blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
  91. blkg, bps);
  92. if (fileid == BLKIO_THROTL_write_bps_device
  93. && blkiop->ops.blkio_update_group_write_bps_fn)
  94. blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
  95. blkg, bps);
  96. }
  97. }
  98. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  99. unsigned int iops, int fileid)
  100. {
  101. struct blkio_policy_type *blkiop;
  102. list_for_each_entry(blkiop, &blkio_list, list) {
  103. /* If this policy does not own the blkg, do not send updates */
  104. if (blkiop->plid != blkg->plid)
  105. continue;
  106. if (fileid == BLKIO_THROTL_read_iops_device
  107. && blkiop->ops.blkio_update_group_read_iops_fn)
  108. blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
  109. blkg, iops);
  110. if (fileid == BLKIO_THROTL_write_iops_device
  111. && blkiop->ops.blkio_update_group_write_iops_fn)
  112. blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
  113. blkg,iops);
  114. }
  115. }
  116. /*
  117. * Add to the appropriate stat variable depending on the request type.
  118. * This should be called with the blkg->stats_lock held.
  119. */
  120. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  121. bool sync)
  122. {
  123. if (direction)
  124. stat[BLKIO_STAT_WRITE] += add;
  125. else
  126. stat[BLKIO_STAT_READ] += add;
  127. if (sync)
  128. stat[BLKIO_STAT_SYNC] += add;
  129. else
  130. stat[BLKIO_STAT_ASYNC] += add;
  131. }
  132. /*
  133. * Decrements the appropriate stat variable if non-zero depending on the
  134. * request type. Panics on value being zero.
  135. * This should be called with the blkg->stats_lock held.
  136. */
  137. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  138. {
  139. if (direction) {
  140. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  141. stat[BLKIO_STAT_WRITE]--;
  142. } else {
  143. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  144. stat[BLKIO_STAT_READ]--;
  145. }
  146. if (sync) {
  147. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  148. stat[BLKIO_STAT_SYNC]--;
  149. } else {
  150. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  151. stat[BLKIO_STAT_ASYNC]--;
  152. }
  153. }
  154. #ifdef CONFIG_DEBUG_BLK_CGROUP
  155. /* This should be called with the blkg->stats_lock held. */
  156. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  157. struct blkio_group *curr_blkg)
  158. {
  159. if (blkio_blkg_waiting(&blkg->stats))
  160. return;
  161. if (blkg == curr_blkg)
  162. return;
  163. blkg->stats.start_group_wait_time = sched_clock();
  164. blkio_mark_blkg_waiting(&blkg->stats);
  165. }
  166. /* This should be called with the blkg->stats_lock held. */
  167. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  168. {
  169. unsigned long long now;
  170. if (!blkio_blkg_waiting(stats))
  171. return;
  172. now = sched_clock();
  173. if (time_after64(now, stats->start_group_wait_time))
  174. stats->group_wait_time += now - stats->start_group_wait_time;
  175. blkio_clear_blkg_waiting(stats);
  176. }
  177. /* This should be called with the blkg->stats_lock held. */
  178. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  179. {
  180. unsigned long long now;
  181. if (!blkio_blkg_empty(stats))
  182. return;
  183. now = sched_clock();
  184. if (time_after64(now, stats->start_empty_time))
  185. stats->empty_time += now - stats->start_empty_time;
  186. blkio_clear_blkg_empty(stats);
  187. }
  188. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  189. {
  190. unsigned long flags;
  191. spin_lock_irqsave(&blkg->stats_lock, flags);
  192. BUG_ON(blkio_blkg_idling(&blkg->stats));
  193. blkg->stats.start_idle_time = sched_clock();
  194. blkio_mark_blkg_idling(&blkg->stats);
  195. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  196. }
  197. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  198. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  199. {
  200. unsigned long flags;
  201. unsigned long long now;
  202. struct blkio_group_stats *stats;
  203. spin_lock_irqsave(&blkg->stats_lock, flags);
  204. stats = &blkg->stats;
  205. if (blkio_blkg_idling(stats)) {
  206. now = sched_clock();
  207. if (time_after64(now, stats->start_idle_time))
  208. stats->idle_time += now - stats->start_idle_time;
  209. blkio_clear_blkg_idling(stats);
  210. }
  211. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  212. }
  213. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  214. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  215. {
  216. unsigned long flags;
  217. struct blkio_group_stats *stats;
  218. spin_lock_irqsave(&blkg->stats_lock, flags);
  219. stats = &blkg->stats;
  220. stats->avg_queue_size_sum +=
  221. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  222. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  223. stats->avg_queue_size_samples++;
  224. blkio_update_group_wait_time(stats);
  225. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  226. }
  227. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  228. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  229. {
  230. unsigned long flags;
  231. struct blkio_group_stats *stats;
  232. spin_lock_irqsave(&blkg->stats_lock, flags);
  233. stats = &blkg->stats;
  234. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  235. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  236. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  237. return;
  238. }
  239. /*
  240. * group is already marked empty. This can happen if cfqq got new
  241. * request in parent group and moved to this group while being added
  242. * to service tree. Just ignore the event and move on.
  243. */
  244. if(blkio_blkg_empty(stats)) {
  245. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  246. return;
  247. }
  248. stats->start_empty_time = sched_clock();
  249. blkio_mark_blkg_empty(stats);
  250. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  251. }
  252. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  253. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  254. unsigned long dequeue)
  255. {
  256. blkg->stats.dequeue += dequeue;
  257. }
  258. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  259. #else
  260. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  261. struct blkio_group *curr_blkg) {}
  262. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  263. #endif
  264. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  265. struct blkio_group *curr_blkg, bool direction,
  266. bool sync)
  267. {
  268. unsigned long flags;
  269. spin_lock_irqsave(&blkg->stats_lock, flags);
  270. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  271. sync);
  272. blkio_end_empty_time(&blkg->stats);
  273. blkio_set_start_group_wait_time(blkg, curr_blkg);
  274. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  275. }
  276. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  277. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  278. bool direction, bool sync)
  279. {
  280. unsigned long flags;
  281. spin_lock_irqsave(&blkg->stats_lock, flags);
  282. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  283. direction, sync);
  284. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  285. }
  286. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  287. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  288. unsigned long unaccounted_time)
  289. {
  290. unsigned long flags;
  291. spin_lock_irqsave(&blkg->stats_lock, flags);
  292. blkg->stats.time += time;
  293. #ifdef CONFIG_DEBUG_BLK_CGROUP
  294. blkg->stats.unaccounted_time += unaccounted_time;
  295. #endif
  296. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  297. }
  298. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  299. /*
  300. * should be called under rcu read lock or queue lock to make sure blkg pointer
  301. * is valid.
  302. */
  303. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  304. uint64_t bytes, bool direction, bool sync)
  305. {
  306. struct blkio_group_stats_cpu *stats_cpu;
  307. unsigned long flags;
  308. /*
  309. * Disabling interrupts to provide mutual exclusion between two
  310. * writes on same cpu. It probably is not needed for 64bit. Not
  311. * optimizing that case yet.
  312. */
  313. local_irq_save(flags);
  314. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  315. u64_stats_update_begin(&stats_cpu->syncp);
  316. stats_cpu->sectors += bytes >> 9;
  317. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  318. 1, direction, sync);
  319. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  320. bytes, direction, sync);
  321. u64_stats_update_end(&stats_cpu->syncp);
  322. local_irq_restore(flags);
  323. }
  324. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  325. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  326. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  327. {
  328. struct blkio_group_stats *stats;
  329. unsigned long flags;
  330. unsigned long long now = sched_clock();
  331. spin_lock_irqsave(&blkg->stats_lock, flags);
  332. stats = &blkg->stats;
  333. if (time_after64(now, io_start_time))
  334. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  335. now - io_start_time, direction, sync);
  336. if (time_after64(io_start_time, start_time))
  337. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  338. io_start_time - start_time, direction, sync);
  339. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  340. }
  341. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  342. /* Merged stats are per cpu. */
  343. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  344. bool sync)
  345. {
  346. struct blkio_group_stats_cpu *stats_cpu;
  347. unsigned long flags;
  348. /*
  349. * Disabling interrupts to provide mutual exclusion between two
  350. * writes on same cpu. It probably is not needed for 64bit. Not
  351. * optimizing that case yet.
  352. */
  353. local_irq_save(flags);
  354. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  355. u64_stats_update_begin(&stats_cpu->syncp);
  356. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  357. direction, sync);
  358. u64_stats_update_end(&stats_cpu->syncp);
  359. local_irq_restore(flags);
  360. }
  361. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  362. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  363. struct request_queue *q,
  364. enum blkio_policy_id plid,
  365. bool for_root)
  366. __releases(q->queue_lock) __acquires(q->queue_lock)
  367. {
  368. struct blkio_policy_type *pol = blkio_policy[plid];
  369. struct blkio_group *blkg, *new_blkg;
  370. WARN_ON_ONCE(!rcu_read_lock_held());
  371. lockdep_assert_held(q->queue_lock);
  372. /*
  373. * This could be the first entry point of blkcg implementation and
  374. * we shouldn't allow anything to go through for a bypassing queue.
  375. * The following can be removed if blkg lookup is guaranteed to
  376. * fail on a bypassing queue.
  377. */
  378. if (unlikely(blk_queue_bypass(q)) && !for_root)
  379. return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  380. blkg = blkg_lookup(blkcg, q, plid);
  381. if (blkg)
  382. return blkg;
  383. /* blkg holds a reference to blkcg */
  384. if (!css_tryget(&blkcg->css))
  385. return ERR_PTR(-EINVAL);
  386. /*
  387. * Allocate and initialize.
  388. *
  389. * FIXME: The following is broken. Percpu memory allocation
  390. * requires %GFP_KERNEL context and can't be performed from IO
  391. * path. Allocation here should inherently be atomic and the
  392. * following lock dancing can be removed once the broken percpu
  393. * allocation is fixed.
  394. */
  395. spin_unlock_irq(q->queue_lock);
  396. rcu_read_unlock();
  397. new_blkg = pol->ops.blkio_alloc_group_fn(q, blkcg);
  398. if (new_blkg) {
  399. new_blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  400. spin_lock_init(&new_blkg->stats_lock);
  401. rcu_assign_pointer(new_blkg->q, q);
  402. new_blkg->blkcg = blkcg;
  403. new_blkg->plid = plid;
  404. cgroup_path(blkcg->css.cgroup, new_blkg->path,
  405. sizeof(new_blkg->path));
  406. } else {
  407. css_put(&blkcg->css);
  408. }
  409. rcu_read_lock();
  410. spin_lock_irq(q->queue_lock);
  411. /* did bypass get turned on inbetween? */
  412. if (unlikely(blk_queue_bypass(q)) && !for_root) {
  413. blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  414. goto out;
  415. }
  416. /* did someone beat us to it? */
  417. blkg = blkg_lookup(blkcg, q, plid);
  418. if (unlikely(blkg))
  419. goto out;
  420. /* did alloc fail? */
  421. if (unlikely(!new_blkg || !new_blkg->stats_cpu)) {
  422. blkg = ERR_PTR(-ENOMEM);
  423. goto out;
  424. }
  425. /* insert */
  426. spin_lock(&blkcg->lock);
  427. swap(blkg, new_blkg);
  428. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  429. pol->ops.blkio_link_group_fn(q, blkg);
  430. spin_unlock(&blkcg->lock);
  431. out:
  432. if (new_blkg) {
  433. free_percpu(new_blkg->stats_cpu);
  434. kfree(new_blkg);
  435. css_put(&blkcg->css);
  436. }
  437. return blkg;
  438. }
  439. EXPORT_SYMBOL_GPL(blkg_lookup_create);
  440. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  441. {
  442. hlist_del_init_rcu(&blkg->blkcg_node);
  443. }
  444. /*
  445. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  446. * indicating that blk_group was unhashed by the time we got to it.
  447. */
  448. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  449. {
  450. struct blkio_cgroup *blkcg = blkg->blkcg;
  451. unsigned long flags;
  452. int ret = 1;
  453. spin_lock_irqsave(&blkcg->lock, flags);
  454. if (!hlist_unhashed(&blkg->blkcg_node)) {
  455. __blkiocg_del_blkio_group(blkg);
  456. ret = 0;
  457. }
  458. spin_unlock_irqrestore(&blkcg->lock, flags);
  459. return ret;
  460. }
  461. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  462. /* called under rcu_read_lock(). */
  463. struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  464. struct request_queue *q,
  465. enum blkio_policy_id plid)
  466. {
  467. struct blkio_group *blkg;
  468. struct hlist_node *n;
  469. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  470. if (blkg->q == q && blkg->plid == plid)
  471. return blkg;
  472. return NULL;
  473. }
  474. EXPORT_SYMBOL_GPL(blkg_lookup);
  475. void blkg_destroy_all(struct request_queue *q)
  476. {
  477. struct blkio_policy_type *pol;
  478. while (true) {
  479. bool done = true;
  480. spin_lock(&blkio_list_lock);
  481. spin_lock_irq(q->queue_lock);
  482. /*
  483. * clear_queue_fn() might return with non-empty group list
  484. * if it raced cgroup removal and lost. cgroup removal is
  485. * guaranteed to make forward progress and retrying after a
  486. * while is enough. This ugliness is scheduled to be
  487. * removed after locking update.
  488. */
  489. list_for_each_entry(pol, &blkio_list, list)
  490. if (!pol->ops.blkio_clear_queue_fn(q))
  491. done = false;
  492. spin_unlock_irq(q->queue_lock);
  493. spin_unlock(&blkio_list_lock);
  494. if (done)
  495. break;
  496. msleep(10); /* just some random duration I like */
  497. }
  498. }
  499. static void blkio_reset_stats_cpu(struct blkio_group *blkg)
  500. {
  501. struct blkio_group_stats_cpu *stats_cpu;
  502. int i, j, k;
  503. /*
  504. * Note: On 64 bit arch this should not be an issue. This has the
  505. * possibility of returning some inconsistent value on 32bit arch
  506. * as 64bit update on 32bit is non atomic. Taking care of this
  507. * corner case makes code very complicated, like sending IPIs to
  508. * cpus, taking care of stats of offline cpus etc.
  509. *
  510. * reset stats is anyway more of a debug feature and this sounds a
  511. * corner case. So I am not complicating the code yet until and
  512. * unless this becomes a real issue.
  513. */
  514. for_each_possible_cpu(i) {
  515. stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
  516. stats_cpu->sectors = 0;
  517. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  518. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  519. stats_cpu->stat_arr_cpu[j][k] = 0;
  520. }
  521. }
  522. static int
  523. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  524. {
  525. struct blkio_cgroup *blkcg;
  526. struct blkio_group *blkg;
  527. struct blkio_group_stats *stats;
  528. struct hlist_node *n;
  529. uint64_t queued[BLKIO_STAT_TOTAL];
  530. int i;
  531. #ifdef CONFIG_DEBUG_BLK_CGROUP
  532. bool idling, waiting, empty;
  533. unsigned long long now = sched_clock();
  534. #endif
  535. blkcg = cgroup_to_blkio_cgroup(cgroup);
  536. spin_lock_irq(&blkcg->lock);
  537. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  538. spin_lock(&blkg->stats_lock);
  539. stats = &blkg->stats;
  540. #ifdef CONFIG_DEBUG_BLK_CGROUP
  541. idling = blkio_blkg_idling(stats);
  542. waiting = blkio_blkg_waiting(stats);
  543. empty = blkio_blkg_empty(stats);
  544. #endif
  545. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  546. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  547. memset(stats, 0, sizeof(struct blkio_group_stats));
  548. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  549. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  550. #ifdef CONFIG_DEBUG_BLK_CGROUP
  551. if (idling) {
  552. blkio_mark_blkg_idling(stats);
  553. stats->start_idle_time = now;
  554. }
  555. if (waiting) {
  556. blkio_mark_blkg_waiting(stats);
  557. stats->start_group_wait_time = now;
  558. }
  559. if (empty) {
  560. blkio_mark_blkg_empty(stats);
  561. stats->start_empty_time = now;
  562. }
  563. #endif
  564. spin_unlock(&blkg->stats_lock);
  565. /* Reset Per cpu stats which don't take blkg->stats_lock */
  566. blkio_reset_stats_cpu(blkg);
  567. }
  568. spin_unlock_irq(&blkcg->lock);
  569. return 0;
  570. }
  571. static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
  572. char *str, int chars_left, bool diskname_only)
  573. {
  574. snprintf(str, chars_left, "%s", dname);
  575. chars_left -= strlen(str);
  576. if (chars_left <= 0) {
  577. printk(KERN_WARNING
  578. "Possibly incorrect cgroup stat display format");
  579. return;
  580. }
  581. if (diskname_only)
  582. return;
  583. switch (type) {
  584. case BLKIO_STAT_READ:
  585. strlcat(str, " Read", chars_left);
  586. break;
  587. case BLKIO_STAT_WRITE:
  588. strlcat(str, " Write", chars_left);
  589. break;
  590. case BLKIO_STAT_SYNC:
  591. strlcat(str, " Sync", chars_left);
  592. break;
  593. case BLKIO_STAT_ASYNC:
  594. strlcat(str, " Async", chars_left);
  595. break;
  596. case BLKIO_STAT_TOTAL:
  597. strlcat(str, " Total", chars_left);
  598. break;
  599. default:
  600. strlcat(str, " Invalid", chars_left);
  601. }
  602. }
  603. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  604. struct cgroup_map_cb *cb, const char *dname)
  605. {
  606. blkio_get_key_name(0, dname, str, chars_left, true);
  607. cb->fill(cb, str, val);
  608. return val;
  609. }
  610. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
  611. enum stat_type_cpu type, enum stat_sub_type sub_type)
  612. {
  613. int cpu;
  614. struct blkio_group_stats_cpu *stats_cpu;
  615. u64 val = 0, tval;
  616. for_each_possible_cpu(cpu) {
  617. unsigned int start;
  618. stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
  619. do {
  620. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  621. if (type == BLKIO_STAT_CPU_SECTORS)
  622. tval = stats_cpu->sectors;
  623. else
  624. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  625. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  626. val += tval;
  627. }
  628. return val;
  629. }
  630. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
  631. struct cgroup_map_cb *cb, const char *dname,
  632. enum stat_type_cpu type)
  633. {
  634. uint64_t disk_total, val;
  635. char key_str[MAX_KEY_LEN];
  636. enum stat_sub_type sub_type;
  637. if (type == BLKIO_STAT_CPU_SECTORS) {
  638. val = blkio_read_stat_cpu(blkg, type, 0);
  639. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
  640. dname);
  641. }
  642. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  643. sub_type++) {
  644. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  645. false);
  646. val = blkio_read_stat_cpu(blkg, type, sub_type);
  647. cb->fill(cb, key_str, val);
  648. }
  649. disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
  650. blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
  651. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  652. false);
  653. cb->fill(cb, key_str, disk_total);
  654. return disk_total;
  655. }
  656. /* This should be called with blkg->stats_lock held */
  657. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  658. struct cgroup_map_cb *cb, const char *dname,
  659. enum stat_type type)
  660. {
  661. uint64_t disk_total;
  662. char key_str[MAX_KEY_LEN];
  663. enum stat_sub_type sub_type;
  664. if (type == BLKIO_STAT_TIME)
  665. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  666. blkg->stats.time, cb, dname);
  667. #ifdef CONFIG_DEBUG_BLK_CGROUP
  668. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  669. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  670. blkg->stats.unaccounted_time, cb, dname);
  671. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  672. uint64_t sum = blkg->stats.avg_queue_size_sum;
  673. uint64_t samples = blkg->stats.avg_queue_size_samples;
  674. if (samples)
  675. do_div(sum, samples);
  676. else
  677. sum = 0;
  678. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  679. sum, cb, dname);
  680. }
  681. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  682. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  683. blkg->stats.group_wait_time, cb, dname);
  684. if (type == BLKIO_STAT_IDLE_TIME)
  685. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  686. blkg->stats.idle_time, cb, dname);
  687. if (type == BLKIO_STAT_EMPTY_TIME)
  688. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  689. blkg->stats.empty_time, cb, dname);
  690. if (type == BLKIO_STAT_DEQUEUE)
  691. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  692. blkg->stats.dequeue, cb, dname);
  693. #endif
  694. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  695. sub_type++) {
  696. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  697. false);
  698. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  699. }
  700. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  701. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  702. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  703. false);
  704. cb->fill(cb, key_str, disk_total);
  705. return disk_total;
  706. }
  707. static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
  708. int fileid, struct blkio_cgroup *blkcg)
  709. {
  710. struct gendisk *disk = NULL;
  711. struct blkio_group *blkg = NULL;
  712. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  713. unsigned long major, minor;
  714. int i = 0, ret = -EINVAL;
  715. int part;
  716. dev_t dev;
  717. u64 temp;
  718. memset(s, 0, sizeof(s));
  719. while ((p = strsep(&buf, " ")) != NULL) {
  720. if (!*p)
  721. continue;
  722. s[i++] = p;
  723. /* Prevent from inputing too many things */
  724. if (i == 3)
  725. break;
  726. }
  727. if (i != 2)
  728. goto out;
  729. p = strsep(&s[0], ":");
  730. if (p != NULL)
  731. major_s = p;
  732. else
  733. goto out;
  734. minor_s = s[0];
  735. if (!minor_s)
  736. goto out;
  737. if (strict_strtoul(major_s, 10, &major))
  738. goto out;
  739. if (strict_strtoul(minor_s, 10, &minor))
  740. goto out;
  741. dev = MKDEV(major, minor);
  742. if (strict_strtoull(s[1], 10, &temp))
  743. goto out;
  744. disk = get_gendisk(dev, &part);
  745. if (!disk || part)
  746. goto out;
  747. rcu_read_lock();
  748. spin_lock_irq(disk->queue->queue_lock);
  749. blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
  750. spin_unlock_irq(disk->queue->queue_lock);
  751. if (IS_ERR(blkg)) {
  752. ret = PTR_ERR(blkg);
  753. goto out_unlock;
  754. }
  755. switch (plid) {
  756. case BLKIO_POLICY_PROP:
  757. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  758. temp > BLKIO_WEIGHT_MAX)
  759. goto out_unlock;
  760. blkg->conf.weight = temp;
  761. blkio_update_group_weight(blkg, temp ?: blkcg->weight);
  762. break;
  763. case BLKIO_POLICY_THROTL:
  764. switch(fileid) {
  765. case BLKIO_THROTL_read_bps_device:
  766. blkg->conf.bps[READ] = temp;
  767. blkio_update_group_bps(blkg, temp ?: -1, fileid);
  768. break;
  769. case BLKIO_THROTL_write_bps_device:
  770. blkg->conf.bps[WRITE] = temp;
  771. blkio_update_group_bps(blkg, temp ?: -1, fileid);
  772. break;
  773. case BLKIO_THROTL_read_iops_device:
  774. if (temp > THROTL_IOPS_MAX)
  775. goto out_unlock;
  776. blkg->conf.iops[READ] = temp;
  777. blkio_update_group_iops(blkg, temp ?: -1, fileid);
  778. break;
  779. case BLKIO_THROTL_write_iops_device:
  780. if (temp > THROTL_IOPS_MAX)
  781. goto out_unlock;
  782. blkg->conf.iops[WRITE] = temp;
  783. blkio_update_group_iops(blkg, temp ?: -1, fileid);
  784. break;
  785. }
  786. break;
  787. default:
  788. BUG();
  789. }
  790. ret = 0;
  791. out_unlock:
  792. rcu_read_unlock();
  793. out:
  794. put_disk(disk);
  795. /*
  796. * If queue was bypassing, we should retry. Do so after a short
  797. * msleep(). It isn't strictly necessary but queue can be
  798. * bypassing for some time and it's always nice to avoid busy
  799. * looping.
  800. */
  801. if (ret == -EBUSY) {
  802. msleep(10);
  803. return restart_syscall();
  804. }
  805. return ret;
  806. }
  807. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  808. const char *buffer)
  809. {
  810. int ret = 0;
  811. char *buf;
  812. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  813. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  814. int fileid = BLKIOFILE_ATTR(cft->private);
  815. buf = kstrdup(buffer, GFP_KERNEL);
  816. if (!buf)
  817. return -ENOMEM;
  818. ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
  819. kfree(buf);
  820. return ret;
  821. }
  822. static const char *blkg_dev_name(struct blkio_group *blkg)
  823. {
  824. /* some drivers (floppy) instantiate a queue w/o disk registered */
  825. if (blkg->q->backing_dev_info.dev)
  826. return dev_name(blkg->q->backing_dev_info.dev);
  827. return NULL;
  828. }
  829. static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
  830. struct seq_file *m)
  831. {
  832. const char *dname = blkg_dev_name(blkg);
  833. int fileid = BLKIOFILE_ATTR(cft->private);
  834. int rw = WRITE;
  835. if (!dname)
  836. return;
  837. switch (blkg->plid) {
  838. case BLKIO_POLICY_PROP:
  839. if (blkg->conf.weight)
  840. seq_printf(m, "%s\t%u\n",
  841. dname, blkg->conf.weight);
  842. break;
  843. case BLKIO_POLICY_THROTL:
  844. switch (fileid) {
  845. case BLKIO_THROTL_read_bps_device:
  846. rw = READ;
  847. case BLKIO_THROTL_write_bps_device:
  848. if (blkg->conf.bps[rw])
  849. seq_printf(m, "%s\t%llu\n",
  850. dname, blkg->conf.bps[rw]);
  851. break;
  852. case BLKIO_THROTL_read_iops_device:
  853. rw = READ;
  854. case BLKIO_THROTL_write_iops_device:
  855. if (blkg->conf.iops[rw])
  856. seq_printf(m, "%s\t%u\n",
  857. dname, blkg->conf.iops[rw]);
  858. break;
  859. }
  860. break;
  861. default:
  862. BUG();
  863. }
  864. }
  865. /* cgroup files which read their data from policy nodes end up here */
  866. static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
  867. struct seq_file *m)
  868. {
  869. struct blkio_group *blkg;
  870. struct hlist_node *n;
  871. spin_lock_irq(&blkcg->lock);
  872. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  873. if (BLKIOFILE_POLICY(cft->private) == blkg->plid)
  874. blkio_print_group_conf(cft, blkg, m);
  875. spin_unlock_irq(&blkcg->lock);
  876. }
  877. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  878. struct seq_file *m)
  879. {
  880. struct blkio_cgroup *blkcg;
  881. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  882. int name = BLKIOFILE_ATTR(cft->private);
  883. blkcg = cgroup_to_blkio_cgroup(cgrp);
  884. switch(plid) {
  885. case BLKIO_POLICY_PROP:
  886. switch(name) {
  887. case BLKIO_PROP_weight_device:
  888. blkio_read_conf(cft, blkcg, m);
  889. return 0;
  890. default:
  891. BUG();
  892. }
  893. break;
  894. case BLKIO_POLICY_THROTL:
  895. switch(name){
  896. case BLKIO_THROTL_read_bps_device:
  897. case BLKIO_THROTL_write_bps_device:
  898. case BLKIO_THROTL_read_iops_device:
  899. case BLKIO_THROTL_write_iops_device:
  900. blkio_read_conf(cft, blkcg, m);
  901. return 0;
  902. default:
  903. BUG();
  904. }
  905. break;
  906. default:
  907. BUG();
  908. }
  909. return 0;
  910. }
  911. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  912. struct cftype *cft, struct cgroup_map_cb *cb,
  913. enum stat_type type, bool show_total, bool pcpu)
  914. {
  915. struct blkio_group *blkg;
  916. struct hlist_node *n;
  917. uint64_t cgroup_total = 0;
  918. rcu_read_lock();
  919. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  920. const char *dname = blkg_dev_name(blkg);
  921. if (!dname || BLKIOFILE_POLICY(cft->private) != blkg->plid)
  922. continue;
  923. if (pcpu)
  924. cgroup_total += blkio_get_stat_cpu(blkg, cb, dname,
  925. type);
  926. else {
  927. spin_lock_irq(&blkg->stats_lock);
  928. cgroup_total += blkio_get_stat(blkg, cb, dname, type);
  929. spin_unlock_irq(&blkg->stats_lock);
  930. }
  931. }
  932. if (show_total)
  933. cb->fill(cb, "Total", cgroup_total);
  934. rcu_read_unlock();
  935. return 0;
  936. }
  937. /* All map kind of cgroup file get serviced by this function */
  938. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  939. struct cgroup_map_cb *cb)
  940. {
  941. struct blkio_cgroup *blkcg;
  942. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  943. int name = BLKIOFILE_ATTR(cft->private);
  944. blkcg = cgroup_to_blkio_cgroup(cgrp);
  945. switch(plid) {
  946. case BLKIO_POLICY_PROP:
  947. switch(name) {
  948. case BLKIO_PROP_time:
  949. return blkio_read_blkg_stats(blkcg, cft, cb,
  950. BLKIO_STAT_TIME, 0, 0);
  951. case BLKIO_PROP_sectors:
  952. return blkio_read_blkg_stats(blkcg, cft, cb,
  953. BLKIO_STAT_CPU_SECTORS, 0, 1);
  954. case BLKIO_PROP_io_service_bytes:
  955. return blkio_read_blkg_stats(blkcg, cft, cb,
  956. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  957. case BLKIO_PROP_io_serviced:
  958. return blkio_read_blkg_stats(blkcg, cft, cb,
  959. BLKIO_STAT_CPU_SERVICED, 1, 1);
  960. case BLKIO_PROP_io_service_time:
  961. return blkio_read_blkg_stats(blkcg, cft, cb,
  962. BLKIO_STAT_SERVICE_TIME, 1, 0);
  963. case BLKIO_PROP_io_wait_time:
  964. return blkio_read_blkg_stats(blkcg, cft, cb,
  965. BLKIO_STAT_WAIT_TIME, 1, 0);
  966. case BLKIO_PROP_io_merged:
  967. return blkio_read_blkg_stats(blkcg, cft, cb,
  968. BLKIO_STAT_CPU_MERGED, 1, 1);
  969. case BLKIO_PROP_io_queued:
  970. return blkio_read_blkg_stats(blkcg, cft, cb,
  971. BLKIO_STAT_QUEUED, 1, 0);
  972. #ifdef CONFIG_DEBUG_BLK_CGROUP
  973. case BLKIO_PROP_unaccounted_time:
  974. return blkio_read_blkg_stats(blkcg, cft, cb,
  975. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  976. case BLKIO_PROP_dequeue:
  977. return blkio_read_blkg_stats(blkcg, cft, cb,
  978. BLKIO_STAT_DEQUEUE, 0, 0);
  979. case BLKIO_PROP_avg_queue_size:
  980. return blkio_read_blkg_stats(blkcg, cft, cb,
  981. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  982. case BLKIO_PROP_group_wait_time:
  983. return blkio_read_blkg_stats(blkcg, cft, cb,
  984. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  985. case BLKIO_PROP_idle_time:
  986. return blkio_read_blkg_stats(blkcg, cft, cb,
  987. BLKIO_STAT_IDLE_TIME, 0, 0);
  988. case BLKIO_PROP_empty_time:
  989. return blkio_read_blkg_stats(blkcg, cft, cb,
  990. BLKIO_STAT_EMPTY_TIME, 0, 0);
  991. #endif
  992. default:
  993. BUG();
  994. }
  995. break;
  996. case BLKIO_POLICY_THROTL:
  997. switch(name){
  998. case BLKIO_THROTL_io_service_bytes:
  999. return blkio_read_blkg_stats(blkcg, cft, cb,
  1000. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1001. case BLKIO_THROTL_io_serviced:
  1002. return blkio_read_blkg_stats(blkcg, cft, cb,
  1003. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1004. default:
  1005. BUG();
  1006. }
  1007. break;
  1008. default:
  1009. BUG();
  1010. }
  1011. return 0;
  1012. }
  1013. static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
  1014. {
  1015. struct blkio_group *blkg;
  1016. struct hlist_node *n;
  1017. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1018. return -EINVAL;
  1019. spin_lock(&blkio_list_lock);
  1020. spin_lock_irq(&blkcg->lock);
  1021. blkcg->weight = (unsigned int)val;
  1022. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  1023. if (blkg->plid == plid && !blkg->conf.weight)
  1024. blkio_update_group_weight(blkg, blkcg->weight);
  1025. spin_unlock_irq(&blkcg->lock);
  1026. spin_unlock(&blkio_list_lock);
  1027. return 0;
  1028. }
  1029. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1030. struct blkio_cgroup *blkcg;
  1031. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1032. int name = BLKIOFILE_ATTR(cft->private);
  1033. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1034. switch(plid) {
  1035. case BLKIO_POLICY_PROP:
  1036. switch(name) {
  1037. case BLKIO_PROP_weight:
  1038. return (u64)blkcg->weight;
  1039. }
  1040. break;
  1041. default:
  1042. BUG();
  1043. }
  1044. return 0;
  1045. }
  1046. static int
  1047. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1048. {
  1049. struct blkio_cgroup *blkcg;
  1050. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1051. int name = BLKIOFILE_ATTR(cft->private);
  1052. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1053. switch(plid) {
  1054. case BLKIO_POLICY_PROP:
  1055. switch(name) {
  1056. case BLKIO_PROP_weight:
  1057. return blkio_weight_write(blkcg, plid, val);
  1058. }
  1059. break;
  1060. default:
  1061. BUG();
  1062. }
  1063. return 0;
  1064. }
  1065. struct cftype blkio_files[] = {
  1066. {
  1067. .name = "weight_device",
  1068. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1069. BLKIO_PROP_weight_device),
  1070. .read_seq_string = blkiocg_file_read,
  1071. .write_string = blkiocg_file_write,
  1072. .max_write_len = 256,
  1073. },
  1074. {
  1075. .name = "weight",
  1076. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1077. BLKIO_PROP_weight),
  1078. .read_u64 = blkiocg_file_read_u64,
  1079. .write_u64 = blkiocg_file_write_u64,
  1080. },
  1081. {
  1082. .name = "time",
  1083. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1084. BLKIO_PROP_time),
  1085. .read_map = blkiocg_file_read_map,
  1086. },
  1087. {
  1088. .name = "sectors",
  1089. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1090. BLKIO_PROP_sectors),
  1091. .read_map = blkiocg_file_read_map,
  1092. },
  1093. {
  1094. .name = "io_service_bytes",
  1095. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1096. BLKIO_PROP_io_service_bytes),
  1097. .read_map = blkiocg_file_read_map,
  1098. },
  1099. {
  1100. .name = "io_serviced",
  1101. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1102. BLKIO_PROP_io_serviced),
  1103. .read_map = blkiocg_file_read_map,
  1104. },
  1105. {
  1106. .name = "io_service_time",
  1107. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1108. BLKIO_PROP_io_service_time),
  1109. .read_map = blkiocg_file_read_map,
  1110. },
  1111. {
  1112. .name = "io_wait_time",
  1113. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1114. BLKIO_PROP_io_wait_time),
  1115. .read_map = blkiocg_file_read_map,
  1116. },
  1117. {
  1118. .name = "io_merged",
  1119. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1120. BLKIO_PROP_io_merged),
  1121. .read_map = blkiocg_file_read_map,
  1122. },
  1123. {
  1124. .name = "io_queued",
  1125. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1126. BLKIO_PROP_io_queued),
  1127. .read_map = blkiocg_file_read_map,
  1128. },
  1129. {
  1130. .name = "reset_stats",
  1131. .write_u64 = blkiocg_reset_stats,
  1132. },
  1133. #ifdef CONFIG_BLK_DEV_THROTTLING
  1134. {
  1135. .name = "throttle.read_bps_device",
  1136. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1137. BLKIO_THROTL_read_bps_device),
  1138. .read_seq_string = blkiocg_file_read,
  1139. .write_string = blkiocg_file_write,
  1140. .max_write_len = 256,
  1141. },
  1142. {
  1143. .name = "throttle.write_bps_device",
  1144. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1145. BLKIO_THROTL_write_bps_device),
  1146. .read_seq_string = blkiocg_file_read,
  1147. .write_string = blkiocg_file_write,
  1148. .max_write_len = 256,
  1149. },
  1150. {
  1151. .name = "throttle.read_iops_device",
  1152. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1153. BLKIO_THROTL_read_iops_device),
  1154. .read_seq_string = blkiocg_file_read,
  1155. .write_string = blkiocg_file_write,
  1156. .max_write_len = 256,
  1157. },
  1158. {
  1159. .name = "throttle.write_iops_device",
  1160. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1161. BLKIO_THROTL_write_iops_device),
  1162. .read_seq_string = blkiocg_file_read,
  1163. .write_string = blkiocg_file_write,
  1164. .max_write_len = 256,
  1165. },
  1166. {
  1167. .name = "throttle.io_service_bytes",
  1168. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1169. BLKIO_THROTL_io_service_bytes),
  1170. .read_map = blkiocg_file_read_map,
  1171. },
  1172. {
  1173. .name = "throttle.io_serviced",
  1174. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1175. BLKIO_THROTL_io_serviced),
  1176. .read_map = blkiocg_file_read_map,
  1177. },
  1178. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1179. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1180. {
  1181. .name = "avg_queue_size",
  1182. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1183. BLKIO_PROP_avg_queue_size),
  1184. .read_map = blkiocg_file_read_map,
  1185. },
  1186. {
  1187. .name = "group_wait_time",
  1188. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1189. BLKIO_PROP_group_wait_time),
  1190. .read_map = blkiocg_file_read_map,
  1191. },
  1192. {
  1193. .name = "idle_time",
  1194. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1195. BLKIO_PROP_idle_time),
  1196. .read_map = blkiocg_file_read_map,
  1197. },
  1198. {
  1199. .name = "empty_time",
  1200. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1201. BLKIO_PROP_empty_time),
  1202. .read_map = blkiocg_file_read_map,
  1203. },
  1204. {
  1205. .name = "dequeue",
  1206. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1207. BLKIO_PROP_dequeue),
  1208. .read_map = blkiocg_file_read_map,
  1209. },
  1210. {
  1211. .name = "unaccounted_time",
  1212. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1213. BLKIO_PROP_unaccounted_time),
  1214. .read_map = blkiocg_file_read_map,
  1215. },
  1216. #endif
  1217. };
  1218. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1219. {
  1220. return cgroup_add_files(cgroup, subsys, blkio_files,
  1221. ARRAY_SIZE(blkio_files));
  1222. }
  1223. static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
  1224. struct cgroup *cgroup)
  1225. {
  1226. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1227. unsigned long flags;
  1228. struct blkio_group *blkg;
  1229. struct request_queue *q;
  1230. struct blkio_policy_type *blkiop;
  1231. rcu_read_lock();
  1232. do {
  1233. spin_lock_irqsave(&blkcg->lock, flags);
  1234. if (hlist_empty(&blkcg->blkg_list)) {
  1235. spin_unlock_irqrestore(&blkcg->lock, flags);
  1236. break;
  1237. }
  1238. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1239. blkcg_node);
  1240. q = rcu_dereference(blkg->q);
  1241. __blkiocg_del_blkio_group(blkg);
  1242. spin_unlock_irqrestore(&blkcg->lock, flags);
  1243. /*
  1244. * This blkio_group is being unlinked as associated cgroup is
  1245. * going away. Let all the IO controlling policies know about
  1246. * this event.
  1247. */
  1248. spin_lock(&blkio_list_lock);
  1249. list_for_each_entry(blkiop, &blkio_list, list) {
  1250. if (blkiop->plid != blkg->plid)
  1251. continue;
  1252. blkiop->ops.blkio_unlink_group_fn(q, blkg);
  1253. }
  1254. spin_unlock(&blkio_list_lock);
  1255. } while (1);
  1256. rcu_read_unlock();
  1257. return 0;
  1258. }
  1259. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1260. {
  1261. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1262. if (blkcg != &blkio_root_cgroup)
  1263. kfree(blkcg);
  1264. }
  1265. static struct cgroup_subsys_state *
  1266. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1267. {
  1268. struct blkio_cgroup *blkcg;
  1269. struct cgroup *parent = cgroup->parent;
  1270. if (!parent) {
  1271. blkcg = &blkio_root_cgroup;
  1272. goto done;
  1273. }
  1274. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1275. if (!blkcg)
  1276. return ERR_PTR(-ENOMEM);
  1277. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1278. done:
  1279. spin_lock_init(&blkcg->lock);
  1280. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1281. return &blkcg->css;
  1282. }
  1283. /*
  1284. * We cannot support shared io contexts, as we have no mean to support
  1285. * two tasks with the same ioc in two different groups without major rework
  1286. * of the main cic data structures. For now we allow a task to change
  1287. * its cgroup only if it's the only owner of its ioc.
  1288. */
  1289. static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1290. struct cgroup_taskset *tset)
  1291. {
  1292. struct task_struct *task;
  1293. struct io_context *ioc;
  1294. int ret = 0;
  1295. /* task_lock() is needed to avoid races with exit_io_context() */
  1296. cgroup_taskset_for_each(task, cgrp, tset) {
  1297. task_lock(task);
  1298. ioc = task->io_context;
  1299. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1300. ret = -EINVAL;
  1301. task_unlock(task);
  1302. if (ret)
  1303. break;
  1304. }
  1305. return ret;
  1306. }
  1307. static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  1308. struct cgroup_taskset *tset)
  1309. {
  1310. struct task_struct *task;
  1311. struct io_context *ioc;
  1312. cgroup_taskset_for_each(task, cgrp, tset) {
  1313. /* we don't lose anything even if ioc allocation fails */
  1314. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1315. if (ioc) {
  1316. ioc_cgroup_changed(ioc);
  1317. put_io_context(ioc);
  1318. }
  1319. }
  1320. }
  1321. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1322. {
  1323. spin_lock(&blkio_list_lock);
  1324. BUG_ON(blkio_policy[blkiop->plid]);
  1325. blkio_policy[blkiop->plid] = blkiop;
  1326. list_add_tail(&blkiop->list, &blkio_list);
  1327. spin_unlock(&blkio_list_lock);
  1328. }
  1329. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1330. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1331. {
  1332. spin_lock(&blkio_list_lock);
  1333. BUG_ON(blkio_policy[blkiop->plid] != blkiop);
  1334. blkio_policy[blkiop->plid] = NULL;
  1335. list_del_init(&blkiop->list);
  1336. spin_unlock(&blkio_list_lock);
  1337. }
  1338. EXPORT_SYMBOL_GPL(blkio_policy_unregister);