blk-cgroup.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include <linux/atomic.h>
  23. #include "blk-cgroup.h"
  24. #include "blk.h"
  25. #define MAX_KEY_LEN 100
  26. static DEFINE_SPINLOCK(blkio_list_lock);
  27. static LIST_HEAD(blkio_list);
  28. static DEFINE_MUTEX(all_q_mutex);
  29. static LIST_HEAD(all_q_list);
  30. /* List of groups pending per cpu stats allocation */
  31. static DEFINE_SPINLOCK(alloc_list_lock);
  32. static LIST_HEAD(alloc_list);
  33. static void blkio_stat_alloc_fn(struct work_struct *);
  34. static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
  35. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  36. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  37. static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
  38. /* for encoding cft->private value on file */
  39. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  40. /* What policy owns the file, proportional or throttle */
  41. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  42. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  43. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  44. {
  45. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  46. struct blkio_cgroup, css);
  47. }
  48. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  49. static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  50. {
  51. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  52. struct blkio_cgroup, css);
  53. }
  54. struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
  55. {
  56. if (bio && bio->bi_css)
  57. return container_of(bio->bi_css, struct blkio_cgroup, css);
  58. return task_blkio_cgroup(current);
  59. }
  60. EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
  61. static inline void blkio_update_group_weight(struct blkio_group *blkg,
  62. int plid, unsigned int weight)
  63. {
  64. struct blkio_policy_type *blkiop;
  65. list_for_each_entry(blkiop, &blkio_list, list) {
  66. /* If this policy does not own the blkg, do not send updates */
  67. if (blkiop->plid != plid)
  68. continue;
  69. if (blkiop->ops.blkio_update_group_weight_fn)
  70. blkiop->ops.blkio_update_group_weight_fn(blkg->q,
  71. blkg, weight);
  72. }
  73. }
  74. static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
  75. u64 bps, int fileid)
  76. {
  77. struct blkio_policy_type *blkiop;
  78. list_for_each_entry(blkiop, &blkio_list, list) {
  79. /* If this policy does not own the blkg, do not send updates */
  80. if (blkiop->plid != plid)
  81. continue;
  82. if (fileid == BLKIO_THROTL_read_bps_device
  83. && blkiop->ops.blkio_update_group_read_bps_fn)
  84. blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
  85. blkg, bps);
  86. if (fileid == BLKIO_THROTL_write_bps_device
  87. && blkiop->ops.blkio_update_group_write_bps_fn)
  88. blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
  89. blkg, bps);
  90. }
  91. }
  92. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  93. int plid, unsigned int iops,
  94. int fileid)
  95. {
  96. struct blkio_policy_type *blkiop;
  97. list_for_each_entry(blkiop, &blkio_list, list) {
  98. /* If this policy does not own the blkg, do not send updates */
  99. if (blkiop->plid != plid)
  100. continue;
  101. if (fileid == BLKIO_THROTL_read_iops_device
  102. && blkiop->ops.blkio_update_group_read_iops_fn)
  103. blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
  104. blkg, iops);
  105. if (fileid == BLKIO_THROTL_write_iops_device
  106. && blkiop->ops.blkio_update_group_write_iops_fn)
  107. blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
  108. blkg,iops);
  109. }
  110. }
  111. /*
  112. * Add to the appropriate stat variable depending on the request type.
  113. * This should be called with queue_lock held.
  114. */
  115. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  116. bool sync)
  117. {
  118. if (direction)
  119. stat[BLKIO_STAT_WRITE] += add;
  120. else
  121. stat[BLKIO_STAT_READ] += add;
  122. if (sync)
  123. stat[BLKIO_STAT_SYNC] += add;
  124. else
  125. stat[BLKIO_STAT_ASYNC] += add;
  126. }
  127. /*
  128. * Decrements the appropriate stat variable if non-zero depending on the
  129. * request type. Panics on value being zero.
  130. * This should be called with the queue_lock held.
  131. */
  132. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  133. {
  134. if (direction) {
  135. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  136. stat[BLKIO_STAT_WRITE]--;
  137. } else {
  138. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  139. stat[BLKIO_STAT_READ]--;
  140. }
  141. if (sync) {
  142. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  143. stat[BLKIO_STAT_SYNC]--;
  144. } else {
  145. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  146. stat[BLKIO_STAT_ASYNC]--;
  147. }
  148. }
  149. #ifdef CONFIG_DEBUG_BLK_CGROUP
  150. /* This should be called with the queue_lock held. */
  151. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  152. struct blkio_policy_type *pol,
  153. struct blkio_group *curr_blkg)
  154. {
  155. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  156. if (blkio_blkg_waiting(&pd->stats))
  157. return;
  158. if (blkg == curr_blkg)
  159. return;
  160. pd->stats.start_group_wait_time = sched_clock();
  161. blkio_mark_blkg_waiting(&pd->stats);
  162. }
  163. /* This should be called with the queue_lock held. */
  164. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  165. {
  166. unsigned long long now;
  167. if (!blkio_blkg_waiting(stats))
  168. return;
  169. now = sched_clock();
  170. if (time_after64(now, stats->start_group_wait_time))
  171. stats->group_wait_time += now - stats->start_group_wait_time;
  172. blkio_clear_blkg_waiting(stats);
  173. }
  174. /* This should be called with the queue_lock held. */
  175. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  176. {
  177. unsigned long long now;
  178. if (!blkio_blkg_empty(stats))
  179. return;
  180. now = sched_clock();
  181. if (time_after64(now, stats->start_empty_time))
  182. stats->empty_time += now - stats->start_empty_time;
  183. blkio_clear_blkg_empty(stats);
  184. }
  185. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
  186. struct blkio_policy_type *pol)
  187. {
  188. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  189. lockdep_assert_held(blkg->q->queue_lock);
  190. BUG_ON(blkio_blkg_idling(stats));
  191. stats->start_idle_time = sched_clock();
  192. blkio_mark_blkg_idling(stats);
  193. }
  194. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  195. void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
  196. struct blkio_policy_type *pol)
  197. {
  198. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  199. lockdep_assert_held(blkg->q->queue_lock);
  200. if (blkio_blkg_idling(stats)) {
  201. unsigned long long now = sched_clock();
  202. if (time_after64(now, stats->start_idle_time)) {
  203. u64_stats_update_begin(&stats->syncp);
  204. stats->idle_time += now - stats->start_idle_time;
  205. u64_stats_update_end(&stats->syncp);
  206. }
  207. blkio_clear_blkg_idling(stats);
  208. }
  209. }
  210. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  211. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
  212. struct blkio_policy_type *pol)
  213. {
  214. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  215. lockdep_assert_held(blkg->q->queue_lock);
  216. u64_stats_update_begin(&stats->syncp);
  217. stats->avg_queue_size_sum +=
  218. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  219. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  220. stats->avg_queue_size_samples++;
  221. blkio_update_group_wait_time(stats);
  222. u64_stats_update_end(&stats->syncp);
  223. }
  224. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  225. void blkiocg_set_start_empty_time(struct blkio_group *blkg,
  226. struct blkio_policy_type *pol)
  227. {
  228. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  229. lockdep_assert_held(blkg->q->queue_lock);
  230. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  231. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE])
  232. return;
  233. /*
  234. * group is already marked empty. This can happen if cfqq got new
  235. * request in parent group and moved to this group while being added
  236. * to service tree. Just ignore the event and move on.
  237. */
  238. if (blkio_blkg_empty(stats))
  239. return;
  240. stats->start_empty_time = sched_clock();
  241. blkio_mark_blkg_empty(stats);
  242. }
  243. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  244. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  245. struct blkio_policy_type *pol,
  246. unsigned long dequeue)
  247. {
  248. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  249. lockdep_assert_held(blkg->q->queue_lock);
  250. pd->stats.dequeue += dequeue;
  251. }
  252. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  253. #else
  254. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  255. struct blkio_policy_type *pol,
  256. struct blkio_group *curr_blkg) { }
  257. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
  258. #endif
  259. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  260. struct blkio_policy_type *pol,
  261. struct blkio_group *curr_blkg, bool direction,
  262. bool sync)
  263. {
  264. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  265. lockdep_assert_held(blkg->q->queue_lock);
  266. u64_stats_update_begin(&stats->syncp);
  267. blkio_add_stat(stats->stat_arr[BLKIO_STAT_QUEUED], 1, direction, sync);
  268. blkio_end_empty_time(stats);
  269. u64_stats_update_end(&stats->syncp);
  270. blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
  271. }
  272. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  273. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  274. struct blkio_policy_type *pol,
  275. bool direction, bool sync)
  276. {
  277. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  278. lockdep_assert_held(blkg->q->queue_lock);
  279. u64_stats_update_begin(&stats->syncp);
  280. blkio_check_and_dec_stat(stats->stat_arr[BLKIO_STAT_QUEUED], direction,
  281. sync);
  282. u64_stats_update_end(&stats->syncp);
  283. }
  284. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  285. void blkiocg_update_timeslice_used(struct blkio_group *blkg,
  286. struct blkio_policy_type *pol,
  287. unsigned long time,
  288. unsigned long unaccounted_time)
  289. {
  290. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  291. lockdep_assert_held(blkg->q->queue_lock);
  292. u64_stats_update_begin(&stats->syncp);
  293. stats->time += time;
  294. #ifdef CONFIG_DEBUG_BLK_CGROUP
  295. stats->unaccounted_time += unaccounted_time;
  296. #endif
  297. u64_stats_update_end(&stats->syncp);
  298. }
  299. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  300. /*
  301. * should be called under rcu read lock or queue lock to make sure blkg pointer
  302. * is valid.
  303. */
  304. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  305. struct blkio_policy_type *pol,
  306. uint64_t bytes, bool direction, bool sync)
  307. {
  308. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  309. struct blkio_group_stats_cpu *stats_cpu;
  310. unsigned long flags;
  311. /* If per cpu stats are not allocated yet, don't do any accounting. */
  312. if (pd->stats_cpu == NULL)
  313. return;
  314. /*
  315. * Disabling interrupts to provide mutual exclusion between two
  316. * writes on same cpu. It probably is not needed for 64bit. Not
  317. * optimizing that case yet.
  318. */
  319. local_irq_save(flags);
  320. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  321. u64_stats_update_begin(&stats_cpu->syncp);
  322. stats_cpu->sectors += bytes >> 9;
  323. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  324. 1, direction, sync);
  325. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  326. bytes, direction, sync);
  327. u64_stats_update_end(&stats_cpu->syncp);
  328. local_irq_restore(flags);
  329. }
  330. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  331. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  332. struct blkio_policy_type *pol,
  333. uint64_t start_time,
  334. uint64_t io_start_time, bool direction,
  335. bool sync)
  336. {
  337. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  338. unsigned long long now = sched_clock();
  339. lockdep_assert_held(blkg->q->queue_lock);
  340. u64_stats_update_begin(&stats->syncp);
  341. if (time_after64(now, io_start_time))
  342. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  343. now - io_start_time, direction, sync);
  344. if (time_after64(io_start_time, start_time))
  345. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  346. io_start_time - start_time, direction, sync);
  347. u64_stats_update_end(&stats->syncp);
  348. }
  349. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  350. /* Merged stats are per cpu. */
  351. void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  352. struct blkio_policy_type *pol,
  353. bool direction, bool sync)
  354. {
  355. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  356. lockdep_assert_held(blkg->q->queue_lock);
  357. u64_stats_update_begin(&stats->syncp);
  358. blkio_add_stat(stats->stat_arr[BLKIO_STAT_MERGED], 1, direction, sync);
  359. u64_stats_update_end(&stats->syncp);
  360. }
  361. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  362. /*
  363. * Worker for allocating per cpu stat for blk groups. This is scheduled on
  364. * the system_nrt_wq once there are some groups on the alloc_list waiting
  365. * for allocation.
  366. */
  367. static void blkio_stat_alloc_fn(struct work_struct *work)
  368. {
  369. static void *pcpu_stats[BLKIO_NR_POLICIES];
  370. struct delayed_work *dwork = to_delayed_work(work);
  371. struct blkio_group *blkg;
  372. int i;
  373. bool empty = false;
  374. alloc_stats:
  375. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  376. if (pcpu_stats[i] != NULL)
  377. continue;
  378. pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
  379. /* Allocation failed. Try again after some time. */
  380. if (pcpu_stats[i] == NULL) {
  381. queue_delayed_work(system_nrt_wq, dwork,
  382. msecs_to_jiffies(10));
  383. return;
  384. }
  385. }
  386. spin_lock_irq(&blkio_list_lock);
  387. spin_lock(&alloc_list_lock);
  388. /* cgroup got deleted or queue exited. */
  389. if (!list_empty(&alloc_list)) {
  390. blkg = list_first_entry(&alloc_list, struct blkio_group,
  391. alloc_node);
  392. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  393. struct blkg_policy_data *pd = blkg->pd[i];
  394. if (blkio_policy[i] && pd && !pd->stats_cpu)
  395. swap(pd->stats_cpu, pcpu_stats[i]);
  396. }
  397. list_del_init(&blkg->alloc_node);
  398. }
  399. empty = list_empty(&alloc_list);
  400. spin_unlock(&alloc_list_lock);
  401. spin_unlock_irq(&blkio_list_lock);
  402. if (!empty)
  403. goto alloc_stats;
  404. }
  405. /**
  406. * blkg_free - free a blkg
  407. * @blkg: blkg to free
  408. *
  409. * Free @blkg which may be partially allocated.
  410. */
  411. static void blkg_free(struct blkio_group *blkg)
  412. {
  413. int i;
  414. if (!blkg)
  415. return;
  416. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  417. struct blkg_policy_data *pd = blkg->pd[i];
  418. if (pd) {
  419. free_percpu(pd->stats_cpu);
  420. kfree(pd);
  421. }
  422. }
  423. kfree(blkg);
  424. }
  425. /**
  426. * blkg_alloc - allocate a blkg
  427. * @blkcg: block cgroup the new blkg is associated with
  428. * @q: request_queue the new blkg is associated with
  429. *
  430. * Allocate a new blkg assocating @blkcg and @q.
  431. */
  432. static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
  433. struct request_queue *q)
  434. {
  435. struct blkio_group *blkg;
  436. int i;
  437. /* alloc and init base part */
  438. blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
  439. if (!blkg)
  440. return NULL;
  441. blkg->q = q;
  442. INIT_LIST_HEAD(&blkg->q_node);
  443. INIT_LIST_HEAD(&blkg->alloc_node);
  444. blkg->blkcg = blkcg;
  445. blkg->refcnt = 1;
  446. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  447. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  448. struct blkio_policy_type *pol = blkio_policy[i];
  449. struct blkg_policy_data *pd;
  450. if (!pol)
  451. continue;
  452. /* alloc per-policy data and attach it to blkg */
  453. pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
  454. q->node);
  455. if (!pd) {
  456. blkg_free(blkg);
  457. return NULL;
  458. }
  459. blkg->pd[i] = pd;
  460. pd->blkg = blkg;
  461. }
  462. /* invoke per-policy init */
  463. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  464. struct blkio_policy_type *pol = blkio_policy[i];
  465. if (pol)
  466. pol->ops.blkio_init_group_fn(blkg);
  467. }
  468. return blkg;
  469. }
  470. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  471. struct request_queue *q,
  472. bool for_root)
  473. __releases(q->queue_lock) __acquires(q->queue_lock)
  474. {
  475. struct blkio_group *blkg;
  476. WARN_ON_ONCE(!rcu_read_lock_held());
  477. lockdep_assert_held(q->queue_lock);
  478. /*
  479. * This could be the first entry point of blkcg implementation and
  480. * we shouldn't allow anything to go through for a bypassing queue.
  481. * The following can be removed if blkg lookup is guaranteed to
  482. * fail on a bypassing queue.
  483. */
  484. if (unlikely(blk_queue_bypass(q)) && !for_root)
  485. return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  486. blkg = blkg_lookup(blkcg, q);
  487. if (blkg)
  488. return blkg;
  489. /* blkg holds a reference to blkcg */
  490. if (!css_tryget(&blkcg->css))
  491. return ERR_PTR(-EINVAL);
  492. /*
  493. * Allocate and initialize.
  494. */
  495. blkg = blkg_alloc(blkcg, q);
  496. /* did alloc fail? */
  497. if (unlikely(!blkg)) {
  498. blkg = ERR_PTR(-ENOMEM);
  499. goto out;
  500. }
  501. /* insert */
  502. spin_lock(&blkcg->lock);
  503. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  504. list_add(&blkg->q_node, &q->blkg_list);
  505. spin_unlock(&blkcg->lock);
  506. spin_lock(&alloc_list_lock);
  507. list_add(&blkg->alloc_node, &alloc_list);
  508. /* Queue per cpu stat allocation from worker thread. */
  509. queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
  510. spin_unlock(&alloc_list_lock);
  511. out:
  512. return blkg;
  513. }
  514. EXPORT_SYMBOL_GPL(blkg_lookup_create);
  515. /* called under rcu_read_lock(). */
  516. struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  517. struct request_queue *q)
  518. {
  519. struct blkio_group *blkg;
  520. struct hlist_node *n;
  521. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  522. if (blkg->q == q)
  523. return blkg;
  524. return NULL;
  525. }
  526. EXPORT_SYMBOL_GPL(blkg_lookup);
  527. static void blkg_destroy(struct blkio_group *blkg)
  528. {
  529. struct request_queue *q = blkg->q;
  530. struct blkio_cgroup *blkcg = blkg->blkcg;
  531. lockdep_assert_held(q->queue_lock);
  532. lockdep_assert_held(&blkcg->lock);
  533. /* Something wrong if we are trying to remove same group twice */
  534. WARN_ON_ONCE(list_empty(&blkg->q_node));
  535. WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
  536. list_del_init(&blkg->q_node);
  537. hlist_del_init_rcu(&blkg->blkcg_node);
  538. spin_lock(&alloc_list_lock);
  539. list_del_init(&blkg->alloc_node);
  540. spin_unlock(&alloc_list_lock);
  541. /*
  542. * Put the reference taken at the time of creation so that when all
  543. * queues are gone, group can be destroyed.
  544. */
  545. blkg_put(blkg);
  546. }
  547. /*
  548. * XXX: This updates blkg policy data in-place for root blkg, which is
  549. * necessary across elevator switch and policy registration as root blkgs
  550. * aren't shot down. This broken and racy implementation is temporary.
  551. * Eventually, blkg shoot down will be replaced by proper in-place update.
  552. */
  553. void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
  554. {
  555. struct blkio_policy_type *pol = blkio_policy[plid];
  556. struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
  557. struct blkg_policy_data *pd;
  558. if (!blkg)
  559. return;
  560. kfree(blkg->pd[plid]);
  561. blkg->pd[plid] = NULL;
  562. if (!pol)
  563. return;
  564. pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
  565. WARN_ON_ONCE(!pd);
  566. pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  567. WARN_ON_ONCE(!pd->stats_cpu);
  568. blkg->pd[plid] = pd;
  569. pd->blkg = blkg;
  570. pol->ops.blkio_init_group_fn(blkg);
  571. }
  572. EXPORT_SYMBOL_GPL(update_root_blkg_pd);
  573. /**
  574. * blkg_destroy_all - destroy all blkgs associated with a request_queue
  575. * @q: request_queue of interest
  576. * @destroy_root: whether to destroy root blkg or not
  577. *
  578. * Destroy blkgs associated with @q. If @destroy_root is %true, all are
  579. * destroyed; otherwise, root blkg is left alone.
  580. */
  581. void blkg_destroy_all(struct request_queue *q, bool destroy_root)
  582. {
  583. struct blkio_group *blkg, *n;
  584. spin_lock_irq(q->queue_lock);
  585. list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
  586. struct blkio_cgroup *blkcg = blkg->blkcg;
  587. /* skip root? */
  588. if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
  589. continue;
  590. spin_lock(&blkcg->lock);
  591. blkg_destroy(blkg);
  592. spin_unlock(&blkcg->lock);
  593. }
  594. spin_unlock_irq(q->queue_lock);
  595. }
  596. EXPORT_SYMBOL_GPL(blkg_destroy_all);
  597. static void blkg_rcu_free(struct rcu_head *rcu_head)
  598. {
  599. blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
  600. }
  601. void __blkg_release(struct blkio_group *blkg)
  602. {
  603. /* release the extra blkcg reference this blkg has been holding */
  604. css_put(&blkg->blkcg->css);
  605. /*
  606. * A group is freed in rcu manner. But having an rcu lock does not
  607. * mean that one can access all the fields of blkg and assume these
  608. * are valid. For example, don't try to follow throtl_data and
  609. * request queue links.
  610. *
  611. * Having a reference to blkg under an rcu allows acess to only
  612. * values local to groups like group stats and group rate limits
  613. */
  614. call_rcu(&blkg->rcu_head, blkg_rcu_free);
  615. }
  616. EXPORT_SYMBOL_GPL(__blkg_release);
  617. static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
  618. {
  619. struct blkg_policy_data *pd = blkg->pd[plid];
  620. int cpu;
  621. if (pd->stats_cpu == NULL)
  622. return;
  623. for_each_possible_cpu(cpu) {
  624. struct blkio_group_stats_cpu *sc =
  625. per_cpu_ptr(pd->stats_cpu, cpu);
  626. sc->sectors = 0;
  627. memset(sc->stat_arr_cpu, 0, sizeof(sc->stat_arr_cpu));
  628. }
  629. }
  630. static int
  631. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  632. {
  633. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  634. struct blkio_group *blkg;
  635. struct hlist_node *n;
  636. int i;
  637. spin_lock(&blkio_list_lock);
  638. spin_lock_irq(&blkcg->lock);
  639. /*
  640. * Note that stat reset is racy - it doesn't synchronize against
  641. * stat updates. This is a debug feature which shouldn't exist
  642. * anyway. If you get hit by a race, retry.
  643. */
  644. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  645. struct blkio_policy_type *pol;
  646. list_for_each_entry(pol, &blkio_list, list) {
  647. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  648. struct blkio_group_stats *stats = &pd->stats;
  649. /* queued stats shouldn't be cleared */
  650. for (i = 0; i < ARRAY_SIZE(stats->stat_arr); i++)
  651. if (i != BLKIO_STAT_QUEUED)
  652. memset(stats->stat_arr[i], 0,
  653. sizeof(stats->stat_arr[i]));
  654. stats->time = 0;
  655. #ifdef CONFIG_DEBUG_BLK_CGROUP
  656. memset((void *)stats + BLKG_STATS_DEBUG_CLEAR_START, 0,
  657. BLKG_STATS_DEBUG_CLEAR_SIZE);
  658. #endif
  659. blkio_reset_stats_cpu(blkg, pol->plid);
  660. }
  661. }
  662. spin_unlock_irq(&blkcg->lock);
  663. spin_unlock(&blkio_list_lock);
  664. return 0;
  665. }
  666. static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
  667. char *str, int chars_left, bool diskname_only)
  668. {
  669. snprintf(str, chars_left, "%s", dname);
  670. chars_left -= strlen(str);
  671. if (chars_left <= 0) {
  672. printk(KERN_WARNING
  673. "Possibly incorrect cgroup stat display format");
  674. return;
  675. }
  676. if (diskname_only)
  677. return;
  678. switch (type) {
  679. case BLKIO_STAT_READ:
  680. strlcat(str, " Read", chars_left);
  681. break;
  682. case BLKIO_STAT_WRITE:
  683. strlcat(str, " Write", chars_left);
  684. break;
  685. case BLKIO_STAT_SYNC:
  686. strlcat(str, " Sync", chars_left);
  687. break;
  688. case BLKIO_STAT_ASYNC:
  689. strlcat(str, " Async", chars_left);
  690. break;
  691. case BLKIO_STAT_TOTAL:
  692. strlcat(str, " Total", chars_left);
  693. break;
  694. default:
  695. strlcat(str, " Invalid", chars_left);
  696. }
  697. }
  698. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
  699. enum stat_type_cpu type, enum stat_sub_type sub_type)
  700. {
  701. struct blkg_policy_data *pd = blkg->pd[plid];
  702. int cpu;
  703. struct blkio_group_stats_cpu *stats_cpu;
  704. u64 val = 0, tval;
  705. if (pd->stats_cpu == NULL)
  706. return val;
  707. for_each_possible_cpu(cpu) {
  708. unsigned int start;
  709. stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
  710. do {
  711. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  712. if (type == BLKIO_STAT_CPU_SECTORS)
  713. tval = stats_cpu->sectors;
  714. else
  715. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  716. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  717. val += tval;
  718. }
  719. return val;
  720. }
  721. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
  722. struct cgroup_map_cb *cb, const char *dname,
  723. enum stat_type_cpu type)
  724. {
  725. uint64_t disk_total, val;
  726. char key_str[MAX_KEY_LEN];
  727. enum stat_sub_type sub_type;
  728. if (type == BLKIO_STAT_CPU_SECTORS) {
  729. val = blkio_read_stat_cpu(blkg, plid, type, 0);
  730. blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
  731. cb->fill(cb, key_str, val);
  732. return val;
  733. }
  734. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  735. sub_type++) {
  736. blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
  737. false);
  738. val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
  739. cb->fill(cb, key_str, val);
  740. }
  741. disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
  742. blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
  743. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  744. false);
  745. cb->fill(cb, key_str, disk_total);
  746. return disk_total;
  747. }
  748. static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
  749. struct cgroup_map_cb *cb, const char *dname,
  750. enum stat_type type)
  751. {
  752. struct blkio_group_stats *stats = &blkg->pd[plid]->stats;
  753. uint64_t v = 0, disk_total = 0;
  754. char key_str[MAX_KEY_LEN];
  755. unsigned int sync_start;
  756. int st;
  757. if (type >= BLKIO_STAT_ARR_NR) {
  758. do {
  759. sync_start = u64_stats_fetch_begin(&stats->syncp);
  760. switch (type) {
  761. case BLKIO_STAT_TIME:
  762. v = stats->time;
  763. break;
  764. #ifdef CONFIG_DEBUG_BLK_CGROUP
  765. case BLKIO_STAT_UNACCOUNTED_TIME:
  766. v = stats->unaccounted_time;
  767. break;
  768. case BLKIO_STAT_AVG_QUEUE_SIZE: {
  769. uint64_t samples = stats->avg_queue_size_samples;
  770. if (samples) {
  771. v = stats->avg_queue_size_sum;
  772. do_div(v, samples);
  773. }
  774. break;
  775. }
  776. case BLKIO_STAT_IDLE_TIME:
  777. v = stats->idle_time;
  778. break;
  779. case BLKIO_STAT_EMPTY_TIME:
  780. v = stats->empty_time;
  781. break;
  782. case BLKIO_STAT_DEQUEUE:
  783. v = stats->dequeue;
  784. break;
  785. case BLKIO_STAT_GROUP_WAIT_TIME:
  786. v = stats->group_wait_time;
  787. break;
  788. #endif
  789. default:
  790. WARN_ON_ONCE(1);
  791. }
  792. } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
  793. blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
  794. cb->fill(cb, key_str, v);
  795. return v;
  796. }
  797. for (st = BLKIO_STAT_READ; st < BLKIO_STAT_TOTAL; st++) {
  798. do {
  799. sync_start = u64_stats_fetch_begin(&stats->syncp);
  800. v = stats->stat_arr[type][st];
  801. } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
  802. blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false);
  803. cb->fill(cb, key_str, v);
  804. if (st == BLKIO_STAT_READ || st == BLKIO_STAT_WRITE)
  805. disk_total += v;
  806. }
  807. blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
  808. false);
  809. cb->fill(cb, key_str, disk_total);
  810. return disk_total;
  811. }
  812. static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
  813. int fileid, struct blkio_cgroup *blkcg)
  814. {
  815. struct gendisk *disk = NULL;
  816. struct blkio_group *blkg = NULL;
  817. struct blkg_policy_data *pd;
  818. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  819. unsigned long major, minor;
  820. int i = 0, ret = -EINVAL;
  821. int part;
  822. dev_t dev;
  823. u64 temp;
  824. memset(s, 0, sizeof(s));
  825. while ((p = strsep(&buf, " ")) != NULL) {
  826. if (!*p)
  827. continue;
  828. s[i++] = p;
  829. /* Prevent from inputing too many things */
  830. if (i == 3)
  831. break;
  832. }
  833. if (i != 2)
  834. goto out;
  835. p = strsep(&s[0], ":");
  836. if (p != NULL)
  837. major_s = p;
  838. else
  839. goto out;
  840. minor_s = s[0];
  841. if (!minor_s)
  842. goto out;
  843. if (strict_strtoul(major_s, 10, &major))
  844. goto out;
  845. if (strict_strtoul(minor_s, 10, &minor))
  846. goto out;
  847. dev = MKDEV(major, minor);
  848. if (strict_strtoull(s[1], 10, &temp))
  849. goto out;
  850. disk = get_gendisk(dev, &part);
  851. if (!disk || part)
  852. goto out;
  853. rcu_read_lock();
  854. spin_lock_irq(disk->queue->queue_lock);
  855. blkg = blkg_lookup_create(blkcg, disk->queue, false);
  856. spin_unlock_irq(disk->queue->queue_lock);
  857. if (IS_ERR(blkg)) {
  858. ret = PTR_ERR(blkg);
  859. goto out_unlock;
  860. }
  861. pd = blkg->pd[plid];
  862. switch (plid) {
  863. case BLKIO_POLICY_PROP:
  864. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  865. temp > BLKIO_WEIGHT_MAX)
  866. goto out_unlock;
  867. pd->conf.weight = temp;
  868. blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
  869. break;
  870. case BLKIO_POLICY_THROTL:
  871. switch(fileid) {
  872. case BLKIO_THROTL_read_bps_device:
  873. pd->conf.bps[READ] = temp;
  874. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  875. break;
  876. case BLKIO_THROTL_write_bps_device:
  877. pd->conf.bps[WRITE] = temp;
  878. blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
  879. break;
  880. case BLKIO_THROTL_read_iops_device:
  881. if (temp > THROTL_IOPS_MAX)
  882. goto out_unlock;
  883. pd->conf.iops[READ] = temp;
  884. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  885. break;
  886. case BLKIO_THROTL_write_iops_device:
  887. if (temp > THROTL_IOPS_MAX)
  888. goto out_unlock;
  889. pd->conf.iops[WRITE] = temp;
  890. blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
  891. break;
  892. }
  893. break;
  894. default:
  895. BUG();
  896. }
  897. ret = 0;
  898. out_unlock:
  899. rcu_read_unlock();
  900. out:
  901. put_disk(disk);
  902. /*
  903. * If queue was bypassing, we should retry. Do so after a short
  904. * msleep(). It isn't strictly necessary but queue can be
  905. * bypassing for some time and it's always nice to avoid busy
  906. * looping.
  907. */
  908. if (ret == -EBUSY) {
  909. msleep(10);
  910. return restart_syscall();
  911. }
  912. return ret;
  913. }
  914. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  915. const char *buffer)
  916. {
  917. int ret = 0;
  918. char *buf;
  919. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  920. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  921. int fileid = BLKIOFILE_ATTR(cft->private);
  922. buf = kstrdup(buffer, GFP_KERNEL);
  923. if (!buf)
  924. return -ENOMEM;
  925. ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
  926. kfree(buf);
  927. return ret;
  928. }
  929. static const char *blkg_dev_name(struct blkio_group *blkg)
  930. {
  931. /* some drivers (floppy) instantiate a queue w/o disk registered */
  932. if (blkg->q->backing_dev_info.dev)
  933. return dev_name(blkg->q->backing_dev_info.dev);
  934. return NULL;
  935. }
  936. static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
  937. struct seq_file *m)
  938. {
  939. int plid = BLKIOFILE_POLICY(cft->private);
  940. int fileid = BLKIOFILE_ATTR(cft->private);
  941. struct blkg_policy_data *pd = blkg->pd[plid];
  942. const char *dname = blkg_dev_name(blkg);
  943. int rw = WRITE;
  944. if (!dname)
  945. return;
  946. switch (plid) {
  947. case BLKIO_POLICY_PROP:
  948. if (pd->conf.weight)
  949. seq_printf(m, "%s\t%u\n",
  950. dname, pd->conf.weight);
  951. break;
  952. case BLKIO_POLICY_THROTL:
  953. switch (fileid) {
  954. case BLKIO_THROTL_read_bps_device:
  955. rw = READ;
  956. case BLKIO_THROTL_write_bps_device:
  957. if (pd->conf.bps[rw])
  958. seq_printf(m, "%s\t%llu\n",
  959. dname, pd->conf.bps[rw]);
  960. break;
  961. case BLKIO_THROTL_read_iops_device:
  962. rw = READ;
  963. case BLKIO_THROTL_write_iops_device:
  964. if (pd->conf.iops[rw])
  965. seq_printf(m, "%s\t%u\n",
  966. dname, pd->conf.iops[rw]);
  967. break;
  968. }
  969. break;
  970. default:
  971. BUG();
  972. }
  973. }
  974. /* cgroup files which read their data from policy nodes end up here */
  975. static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
  976. struct seq_file *m)
  977. {
  978. struct blkio_group *blkg;
  979. struct hlist_node *n;
  980. spin_lock_irq(&blkcg->lock);
  981. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  982. blkio_print_group_conf(cft, blkg, m);
  983. spin_unlock_irq(&blkcg->lock);
  984. }
  985. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  986. struct seq_file *m)
  987. {
  988. struct blkio_cgroup *blkcg;
  989. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  990. int name = BLKIOFILE_ATTR(cft->private);
  991. blkcg = cgroup_to_blkio_cgroup(cgrp);
  992. switch(plid) {
  993. case BLKIO_POLICY_PROP:
  994. switch(name) {
  995. case BLKIO_PROP_weight_device:
  996. blkio_read_conf(cft, blkcg, m);
  997. return 0;
  998. default:
  999. BUG();
  1000. }
  1001. break;
  1002. case BLKIO_POLICY_THROTL:
  1003. switch(name){
  1004. case BLKIO_THROTL_read_bps_device:
  1005. case BLKIO_THROTL_write_bps_device:
  1006. case BLKIO_THROTL_read_iops_device:
  1007. case BLKIO_THROTL_write_iops_device:
  1008. blkio_read_conf(cft, blkcg, m);
  1009. return 0;
  1010. default:
  1011. BUG();
  1012. }
  1013. break;
  1014. default:
  1015. BUG();
  1016. }
  1017. return 0;
  1018. }
  1019. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1020. struct cftype *cft, struct cgroup_map_cb *cb,
  1021. enum stat_type type, bool show_total, bool pcpu)
  1022. {
  1023. struct blkio_group *blkg;
  1024. struct hlist_node *n;
  1025. uint64_t cgroup_total = 0;
  1026. spin_lock_irq(&blkcg->lock);
  1027. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1028. const char *dname = blkg_dev_name(blkg);
  1029. int plid = BLKIOFILE_POLICY(cft->private);
  1030. if (!dname)
  1031. continue;
  1032. if (pcpu)
  1033. cgroup_total += blkio_get_stat_cpu(blkg, plid,
  1034. cb, dname, type);
  1035. else
  1036. cgroup_total += blkio_get_stat(blkg, plid,
  1037. cb, dname, type);
  1038. }
  1039. if (show_total)
  1040. cb->fill(cb, "Total", cgroup_total);
  1041. spin_unlock_irq(&blkcg->lock);
  1042. return 0;
  1043. }
  1044. /* All map kind of cgroup file get serviced by this function */
  1045. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1046. struct cgroup_map_cb *cb)
  1047. {
  1048. struct blkio_cgroup *blkcg;
  1049. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1050. int name = BLKIOFILE_ATTR(cft->private);
  1051. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1052. switch(plid) {
  1053. case BLKIO_POLICY_PROP:
  1054. switch(name) {
  1055. case BLKIO_PROP_time:
  1056. return blkio_read_blkg_stats(blkcg, cft, cb,
  1057. BLKIO_STAT_TIME, 0, 0);
  1058. case BLKIO_PROP_sectors:
  1059. return blkio_read_blkg_stats(blkcg, cft, cb,
  1060. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1061. case BLKIO_PROP_io_service_bytes:
  1062. return blkio_read_blkg_stats(blkcg, cft, cb,
  1063. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1064. case BLKIO_PROP_io_serviced:
  1065. return blkio_read_blkg_stats(blkcg, cft, cb,
  1066. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1067. case BLKIO_PROP_io_service_time:
  1068. return blkio_read_blkg_stats(blkcg, cft, cb,
  1069. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1070. case BLKIO_PROP_io_wait_time:
  1071. return blkio_read_blkg_stats(blkcg, cft, cb,
  1072. BLKIO_STAT_WAIT_TIME, 1, 0);
  1073. case BLKIO_PROP_io_merged:
  1074. return blkio_read_blkg_stats(blkcg, cft, cb,
  1075. BLKIO_STAT_MERGED, 1, 0);
  1076. case BLKIO_PROP_io_queued:
  1077. return blkio_read_blkg_stats(blkcg, cft, cb,
  1078. BLKIO_STAT_QUEUED, 1, 0);
  1079. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1080. case BLKIO_PROP_unaccounted_time:
  1081. return blkio_read_blkg_stats(blkcg, cft, cb,
  1082. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1083. case BLKIO_PROP_dequeue:
  1084. return blkio_read_blkg_stats(blkcg, cft, cb,
  1085. BLKIO_STAT_DEQUEUE, 0, 0);
  1086. case BLKIO_PROP_avg_queue_size:
  1087. return blkio_read_blkg_stats(blkcg, cft, cb,
  1088. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1089. case BLKIO_PROP_group_wait_time:
  1090. return blkio_read_blkg_stats(blkcg, cft, cb,
  1091. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1092. case BLKIO_PROP_idle_time:
  1093. return blkio_read_blkg_stats(blkcg, cft, cb,
  1094. BLKIO_STAT_IDLE_TIME, 0, 0);
  1095. case BLKIO_PROP_empty_time:
  1096. return blkio_read_blkg_stats(blkcg, cft, cb,
  1097. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1098. #endif
  1099. default:
  1100. BUG();
  1101. }
  1102. break;
  1103. case BLKIO_POLICY_THROTL:
  1104. switch(name){
  1105. case BLKIO_THROTL_io_service_bytes:
  1106. return blkio_read_blkg_stats(blkcg, cft, cb,
  1107. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1108. case BLKIO_THROTL_io_serviced:
  1109. return blkio_read_blkg_stats(blkcg, cft, cb,
  1110. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1111. default:
  1112. BUG();
  1113. }
  1114. break;
  1115. default:
  1116. BUG();
  1117. }
  1118. return 0;
  1119. }
  1120. static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
  1121. {
  1122. struct blkio_group *blkg;
  1123. struct hlist_node *n;
  1124. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1125. return -EINVAL;
  1126. spin_lock(&blkio_list_lock);
  1127. spin_lock_irq(&blkcg->lock);
  1128. blkcg->weight = (unsigned int)val;
  1129. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1130. struct blkg_policy_data *pd = blkg->pd[plid];
  1131. if (!pd->conf.weight)
  1132. blkio_update_group_weight(blkg, plid, blkcg->weight);
  1133. }
  1134. spin_unlock_irq(&blkcg->lock);
  1135. spin_unlock(&blkio_list_lock);
  1136. return 0;
  1137. }
  1138. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1139. struct blkio_cgroup *blkcg;
  1140. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1141. int name = BLKIOFILE_ATTR(cft->private);
  1142. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1143. switch(plid) {
  1144. case BLKIO_POLICY_PROP:
  1145. switch(name) {
  1146. case BLKIO_PROP_weight:
  1147. return (u64)blkcg->weight;
  1148. }
  1149. break;
  1150. default:
  1151. BUG();
  1152. }
  1153. return 0;
  1154. }
  1155. static int
  1156. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1157. {
  1158. struct blkio_cgroup *blkcg;
  1159. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1160. int name = BLKIOFILE_ATTR(cft->private);
  1161. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1162. switch(plid) {
  1163. case BLKIO_POLICY_PROP:
  1164. switch(name) {
  1165. case BLKIO_PROP_weight:
  1166. return blkio_weight_write(blkcg, plid, val);
  1167. }
  1168. break;
  1169. default:
  1170. BUG();
  1171. }
  1172. return 0;
  1173. }
  1174. struct cftype blkio_files[] = {
  1175. {
  1176. .name = "weight_device",
  1177. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1178. BLKIO_PROP_weight_device),
  1179. .read_seq_string = blkiocg_file_read,
  1180. .write_string = blkiocg_file_write,
  1181. .max_write_len = 256,
  1182. },
  1183. {
  1184. .name = "weight",
  1185. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1186. BLKIO_PROP_weight),
  1187. .read_u64 = blkiocg_file_read_u64,
  1188. .write_u64 = blkiocg_file_write_u64,
  1189. },
  1190. {
  1191. .name = "time",
  1192. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1193. BLKIO_PROP_time),
  1194. .read_map = blkiocg_file_read_map,
  1195. },
  1196. {
  1197. .name = "sectors",
  1198. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1199. BLKIO_PROP_sectors),
  1200. .read_map = blkiocg_file_read_map,
  1201. },
  1202. {
  1203. .name = "io_service_bytes",
  1204. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1205. BLKIO_PROP_io_service_bytes),
  1206. .read_map = blkiocg_file_read_map,
  1207. },
  1208. {
  1209. .name = "io_serviced",
  1210. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1211. BLKIO_PROP_io_serviced),
  1212. .read_map = blkiocg_file_read_map,
  1213. },
  1214. {
  1215. .name = "io_service_time",
  1216. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1217. BLKIO_PROP_io_service_time),
  1218. .read_map = blkiocg_file_read_map,
  1219. },
  1220. {
  1221. .name = "io_wait_time",
  1222. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1223. BLKIO_PROP_io_wait_time),
  1224. .read_map = blkiocg_file_read_map,
  1225. },
  1226. {
  1227. .name = "io_merged",
  1228. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1229. BLKIO_PROP_io_merged),
  1230. .read_map = blkiocg_file_read_map,
  1231. },
  1232. {
  1233. .name = "io_queued",
  1234. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1235. BLKIO_PROP_io_queued),
  1236. .read_map = blkiocg_file_read_map,
  1237. },
  1238. {
  1239. .name = "reset_stats",
  1240. .write_u64 = blkiocg_reset_stats,
  1241. },
  1242. #ifdef CONFIG_BLK_DEV_THROTTLING
  1243. {
  1244. .name = "throttle.read_bps_device",
  1245. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1246. BLKIO_THROTL_read_bps_device),
  1247. .read_seq_string = blkiocg_file_read,
  1248. .write_string = blkiocg_file_write,
  1249. .max_write_len = 256,
  1250. },
  1251. {
  1252. .name = "throttle.write_bps_device",
  1253. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1254. BLKIO_THROTL_write_bps_device),
  1255. .read_seq_string = blkiocg_file_read,
  1256. .write_string = blkiocg_file_write,
  1257. .max_write_len = 256,
  1258. },
  1259. {
  1260. .name = "throttle.read_iops_device",
  1261. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1262. BLKIO_THROTL_read_iops_device),
  1263. .read_seq_string = blkiocg_file_read,
  1264. .write_string = blkiocg_file_write,
  1265. .max_write_len = 256,
  1266. },
  1267. {
  1268. .name = "throttle.write_iops_device",
  1269. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1270. BLKIO_THROTL_write_iops_device),
  1271. .read_seq_string = blkiocg_file_read,
  1272. .write_string = blkiocg_file_write,
  1273. .max_write_len = 256,
  1274. },
  1275. {
  1276. .name = "throttle.io_service_bytes",
  1277. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1278. BLKIO_THROTL_io_service_bytes),
  1279. .read_map = blkiocg_file_read_map,
  1280. },
  1281. {
  1282. .name = "throttle.io_serviced",
  1283. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1284. BLKIO_THROTL_io_serviced),
  1285. .read_map = blkiocg_file_read_map,
  1286. },
  1287. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1288. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1289. {
  1290. .name = "avg_queue_size",
  1291. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1292. BLKIO_PROP_avg_queue_size),
  1293. .read_map = blkiocg_file_read_map,
  1294. },
  1295. {
  1296. .name = "group_wait_time",
  1297. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1298. BLKIO_PROP_group_wait_time),
  1299. .read_map = blkiocg_file_read_map,
  1300. },
  1301. {
  1302. .name = "idle_time",
  1303. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1304. BLKIO_PROP_idle_time),
  1305. .read_map = blkiocg_file_read_map,
  1306. },
  1307. {
  1308. .name = "empty_time",
  1309. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1310. BLKIO_PROP_empty_time),
  1311. .read_map = blkiocg_file_read_map,
  1312. },
  1313. {
  1314. .name = "dequeue",
  1315. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1316. BLKIO_PROP_dequeue),
  1317. .read_map = blkiocg_file_read_map,
  1318. },
  1319. {
  1320. .name = "unaccounted_time",
  1321. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1322. BLKIO_PROP_unaccounted_time),
  1323. .read_map = blkiocg_file_read_map,
  1324. },
  1325. #endif
  1326. { } /* terminate */
  1327. };
  1328. /**
  1329. * blkiocg_pre_destroy - cgroup pre_destroy callback
  1330. * @cgroup: cgroup of interest
  1331. *
  1332. * This function is called when @cgroup is about to go away and responsible
  1333. * for shooting down all blkgs associated with @cgroup. blkgs should be
  1334. * removed while holding both q and blkcg locks. As blkcg lock is nested
  1335. * inside q lock, this function performs reverse double lock dancing.
  1336. *
  1337. * This is the blkcg counterpart of ioc_release_fn().
  1338. */
  1339. static int blkiocg_pre_destroy(struct cgroup *cgroup)
  1340. {
  1341. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1342. spin_lock_irq(&blkcg->lock);
  1343. while (!hlist_empty(&blkcg->blkg_list)) {
  1344. struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
  1345. struct blkio_group, blkcg_node);
  1346. struct request_queue *q = blkg->q;
  1347. if (spin_trylock(q->queue_lock)) {
  1348. blkg_destroy(blkg);
  1349. spin_unlock(q->queue_lock);
  1350. } else {
  1351. spin_unlock_irq(&blkcg->lock);
  1352. cpu_relax();
  1353. spin_lock_irq(&blkcg->lock);
  1354. }
  1355. }
  1356. spin_unlock_irq(&blkcg->lock);
  1357. return 0;
  1358. }
  1359. static void blkiocg_destroy(struct cgroup *cgroup)
  1360. {
  1361. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1362. if (blkcg != &blkio_root_cgroup)
  1363. kfree(blkcg);
  1364. }
  1365. static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
  1366. {
  1367. static atomic64_t id_seq = ATOMIC64_INIT(0);
  1368. struct blkio_cgroup *blkcg;
  1369. struct cgroup *parent = cgroup->parent;
  1370. if (!parent) {
  1371. blkcg = &blkio_root_cgroup;
  1372. goto done;
  1373. }
  1374. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1375. if (!blkcg)
  1376. return ERR_PTR(-ENOMEM);
  1377. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1378. blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
  1379. done:
  1380. spin_lock_init(&blkcg->lock);
  1381. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1382. return &blkcg->css;
  1383. }
  1384. /**
  1385. * blkcg_init_queue - initialize blkcg part of request queue
  1386. * @q: request_queue to initialize
  1387. *
  1388. * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  1389. * part of new request_queue @q.
  1390. *
  1391. * RETURNS:
  1392. * 0 on success, -errno on failure.
  1393. */
  1394. int blkcg_init_queue(struct request_queue *q)
  1395. {
  1396. int ret;
  1397. might_sleep();
  1398. ret = blk_throtl_init(q);
  1399. if (ret)
  1400. return ret;
  1401. mutex_lock(&all_q_mutex);
  1402. INIT_LIST_HEAD(&q->all_q_node);
  1403. list_add_tail(&q->all_q_node, &all_q_list);
  1404. mutex_unlock(&all_q_mutex);
  1405. return 0;
  1406. }
  1407. /**
  1408. * blkcg_drain_queue - drain blkcg part of request_queue
  1409. * @q: request_queue to drain
  1410. *
  1411. * Called from blk_drain_queue(). Responsible for draining blkcg part.
  1412. */
  1413. void blkcg_drain_queue(struct request_queue *q)
  1414. {
  1415. lockdep_assert_held(q->queue_lock);
  1416. blk_throtl_drain(q);
  1417. }
  1418. /**
  1419. * blkcg_exit_queue - exit and release blkcg part of request_queue
  1420. * @q: request_queue being released
  1421. *
  1422. * Called from blk_release_queue(). Responsible for exiting blkcg part.
  1423. */
  1424. void blkcg_exit_queue(struct request_queue *q)
  1425. {
  1426. mutex_lock(&all_q_mutex);
  1427. list_del_init(&q->all_q_node);
  1428. mutex_unlock(&all_q_mutex);
  1429. blkg_destroy_all(q, true);
  1430. blk_throtl_exit(q);
  1431. }
  1432. /*
  1433. * We cannot support shared io contexts, as we have no mean to support
  1434. * two tasks with the same ioc in two different groups without major rework
  1435. * of the main cic data structures. For now we allow a task to change
  1436. * its cgroup only if it's the only owner of its ioc.
  1437. */
  1438. static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  1439. {
  1440. struct task_struct *task;
  1441. struct io_context *ioc;
  1442. int ret = 0;
  1443. /* task_lock() is needed to avoid races with exit_io_context() */
  1444. cgroup_taskset_for_each(task, cgrp, tset) {
  1445. task_lock(task);
  1446. ioc = task->io_context;
  1447. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1448. ret = -EINVAL;
  1449. task_unlock(task);
  1450. if (ret)
  1451. break;
  1452. }
  1453. return ret;
  1454. }
  1455. static void blkcg_bypass_start(void)
  1456. __acquires(&all_q_mutex)
  1457. {
  1458. struct request_queue *q;
  1459. mutex_lock(&all_q_mutex);
  1460. list_for_each_entry(q, &all_q_list, all_q_node) {
  1461. blk_queue_bypass_start(q);
  1462. blkg_destroy_all(q, false);
  1463. }
  1464. }
  1465. static void blkcg_bypass_end(void)
  1466. __releases(&all_q_mutex)
  1467. {
  1468. struct request_queue *q;
  1469. list_for_each_entry(q, &all_q_list, all_q_node)
  1470. blk_queue_bypass_end(q);
  1471. mutex_unlock(&all_q_mutex);
  1472. }
  1473. struct cgroup_subsys blkio_subsys = {
  1474. .name = "blkio",
  1475. .create = blkiocg_create,
  1476. .can_attach = blkiocg_can_attach,
  1477. .pre_destroy = blkiocg_pre_destroy,
  1478. .destroy = blkiocg_destroy,
  1479. .subsys_id = blkio_subsys_id,
  1480. .base_cftypes = blkio_files,
  1481. .module = THIS_MODULE,
  1482. };
  1483. EXPORT_SYMBOL_GPL(blkio_subsys);
  1484. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1485. {
  1486. struct request_queue *q;
  1487. blkcg_bypass_start();
  1488. spin_lock(&blkio_list_lock);
  1489. BUG_ON(blkio_policy[blkiop->plid]);
  1490. blkio_policy[blkiop->plid] = blkiop;
  1491. list_add_tail(&blkiop->list, &blkio_list);
  1492. spin_unlock(&blkio_list_lock);
  1493. list_for_each_entry(q, &all_q_list, all_q_node)
  1494. update_root_blkg_pd(q, blkiop->plid);
  1495. blkcg_bypass_end();
  1496. }
  1497. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1498. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1499. {
  1500. struct request_queue *q;
  1501. blkcg_bypass_start();
  1502. spin_lock(&blkio_list_lock);
  1503. BUG_ON(blkio_policy[blkiop->plid] != blkiop);
  1504. blkio_policy[blkiop->plid] = NULL;
  1505. list_del_init(&blkiop->list);
  1506. spin_unlock(&blkio_list_lock);
  1507. list_for_each_entry(q, &all_q_list, all_q_node)
  1508. update_root_blkg_pd(q, blkiop->plid);
  1509. blkcg_bypass_end();
  1510. }
  1511. EXPORT_SYMBOL_GPL(blkio_policy_unregister);