blk-cgroup.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/kdev_t.h>
  15. #include <linux/module.h>
  16. #include <linux/err.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/slab.h>
  19. #include <linux/genhd.h>
  20. #include <linux/delay.h>
  21. #include <linux/atomic.h>
  22. #include "blk-cgroup.h"
  23. #include "blk.h"
  24. #define MAX_KEY_LEN 100
  25. static DEFINE_SPINLOCK(blkio_list_lock);
  26. static LIST_HEAD(blkio_list);
  27. static DEFINE_MUTEX(all_q_mutex);
  28. static LIST_HEAD(all_q_list);
  29. /* List of groups pending per cpu stats allocation */
  30. static DEFINE_SPINLOCK(alloc_list_lock);
  31. static LIST_HEAD(alloc_list);
  32. static void blkio_stat_alloc_fn(struct work_struct *);
  33. static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
  34. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  35. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  36. static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
  37. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  38. {
  39. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  40. struct blkio_cgroup, css);
  41. }
  42. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  43. static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  44. {
  45. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  46. struct blkio_cgroup, css);
  47. }
  48. struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
  49. {
  50. if (bio && bio->bi_css)
  51. return container_of(bio->bi_css, struct blkio_cgroup, css);
  52. return task_blkio_cgroup(current);
  53. }
  54. EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
  55. #ifdef CONFIG_DEBUG_BLK_CGROUP
  56. /* This should be called with the queue_lock held. */
  57. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  58. struct blkio_policy_type *pol,
  59. struct blkio_group *curr_blkg)
  60. {
  61. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  62. if (blkio_blkg_waiting(&pd->stats))
  63. return;
  64. if (blkg == curr_blkg)
  65. return;
  66. pd->stats.start_group_wait_time = sched_clock();
  67. blkio_mark_blkg_waiting(&pd->stats);
  68. }
  69. /* This should be called with the queue_lock held. */
  70. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  71. {
  72. unsigned long long now;
  73. if (!blkio_blkg_waiting(stats))
  74. return;
  75. now = sched_clock();
  76. if (time_after64(now, stats->start_group_wait_time))
  77. blkg_stat_add(&stats->group_wait_time,
  78. now - stats->start_group_wait_time);
  79. blkio_clear_blkg_waiting(stats);
  80. }
  81. /* This should be called with the queue_lock held. */
  82. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  83. {
  84. unsigned long long now;
  85. if (!blkio_blkg_empty(stats))
  86. return;
  87. now = sched_clock();
  88. if (time_after64(now, stats->start_empty_time))
  89. blkg_stat_add(&stats->empty_time,
  90. now - stats->start_empty_time);
  91. blkio_clear_blkg_empty(stats);
  92. }
  93. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
  94. struct blkio_policy_type *pol)
  95. {
  96. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  97. lockdep_assert_held(blkg->q->queue_lock);
  98. BUG_ON(blkio_blkg_idling(stats));
  99. stats->start_idle_time = sched_clock();
  100. blkio_mark_blkg_idling(stats);
  101. }
  102. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  103. void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
  104. struct blkio_policy_type *pol)
  105. {
  106. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  107. lockdep_assert_held(blkg->q->queue_lock);
  108. if (blkio_blkg_idling(stats)) {
  109. unsigned long long now = sched_clock();
  110. if (time_after64(now, stats->start_idle_time))
  111. blkg_stat_add(&stats->idle_time,
  112. now - stats->start_idle_time);
  113. blkio_clear_blkg_idling(stats);
  114. }
  115. }
  116. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  117. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
  118. struct blkio_policy_type *pol)
  119. {
  120. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  121. lockdep_assert_held(blkg->q->queue_lock);
  122. blkg_stat_add(&stats->avg_queue_size_sum,
  123. blkg_rwstat_sum(&stats->queued));
  124. blkg_stat_add(&stats->avg_queue_size_samples, 1);
  125. blkio_update_group_wait_time(stats);
  126. }
  127. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  128. void blkiocg_set_start_empty_time(struct blkio_group *blkg,
  129. struct blkio_policy_type *pol)
  130. {
  131. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  132. lockdep_assert_held(blkg->q->queue_lock);
  133. if (blkg_rwstat_sum(&stats->queued))
  134. return;
  135. /*
  136. * group is already marked empty. This can happen if cfqq got new
  137. * request in parent group and moved to this group while being added
  138. * to service tree. Just ignore the event and move on.
  139. */
  140. if (blkio_blkg_empty(stats))
  141. return;
  142. stats->start_empty_time = sched_clock();
  143. blkio_mark_blkg_empty(stats);
  144. }
  145. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  146. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  147. struct blkio_policy_type *pol,
  148. unsigned long dequeue)
  149. {
  150. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  151. lockdep_assert_held(blkg->q->queue_lock);
  152. blkg_stat_add(&pd->stats.dequeue, dequeue);
  153. }
  154. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  155. #else
  156. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  157. struct blkio_policy_type *pol,
  158. struct blkio_group *curr_blkg) { }
  159. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
  160. #endif
  161. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  162. struct blkio_policy_type *pol,
  163. struct blkio_group *curr_blkg, bool direction,
  164. bool sync)
  165. {
  166. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  167. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  168. lockdep_assert_held(blkg->q->queue_lock);
  169. blkg_rwstat_add(&stats->queued, rw, 1);
  170. blkio_end_empty_time(stats);
  171. blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
  172. }
  173. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  174. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  175. struct blkio_policy_type *pol,
  176. bool direction, bool sync)
  177. {
  178. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  179. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  180. lockdep_assert_held(blkg->q->queue_lock);
  181. blkg_rwstat_add(&stats->queued, rw, -1);
  182. }
  183. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  184. void blkiocg_update_timeslice_used(struct blkio_group *blkg,
  185. struct blkio_policy_type *pol,
  186. unsigned long time,
  187. unsigned long unaccounted_time)
  188. {
  189. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  190. lockdep_assert_held(blkg->q->queue_lock);
  191. blkg_stat_add(&stats->time, time);
  192. #ifdef CONFIG_DEBUG_BLK_CGROUP
  193. blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
  194. #endif
  195. }
  196. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  197. /*
  198. * should be called under rcu read lock or queue lock to make sure blkg pointer
  199. * is valid.
  200. */
  201. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  202. struct blkio_policy_type *pol,
  203. uint64_t bytes, bool direction, bool sync)
  204. {
  205. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  206. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  207. struct blkio_group_stats_cpu *stats_cpu;
  208. unsigned long flags;
  209. /* If per cpu stats are not allocated yet, don't do any accounting. */
  210. if (pd->stats_cpu == NULL)
  211. return;
  212. /*
  213. * Disabling interrupts to provide mutual exclusion between two
  214. * writes on same cpu. It probably is not needed for 64bit. Not
  215. * optimizing that case yet.
  216. */
  217. local_irq_save(flags);
  218. stats_cpu = this_cpu_ptr(pd->stats_cpu);
  219. blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
  220. blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
  221. blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
  222. local_irq_restore(flags);
  223. }
  224. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  225. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  226. struct blkio_policy_type *pol,
  227. uint64_t start_time,
  228. uint64_t io_start_time, bool direction,
  229. bool sync)
  230. {
  231. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  232. unsigned long long now = sched_clock();
  233. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  234. lockdep_assert_held(blkg->q->queue_lock);
  235. if (time_after64(now, io_start_time))
  236. blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
  237. if (time_after64(io_start_time, start_time))
  238. blkg_rwstat_add(&stats->wait_time, rw,
  239. io_start_time - start_time);
  240. }
  241. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  242. /* Merged stats are per cpu. */
  243. void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  244. struct blkio_policy_type *pol,
  245. bool direction, bool sync)
  246. {
  247. struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
  248. int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
  249. lockdep_assert_held(blkg->q->queue_lock);
  250. blkg_rwstat_add(&stats->merged, rw, 1);
  251. }
  252. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  253. /*
  254. * Worker for allocating per cpu stat for blk groups. This is scheduled on
  255. * the system_nrt_wq once there are some groups on the alloc_list waiting
  256. * for allocation.
  257. */
  258. static void blkio_stat_alloc_fn(struct work_struct *work)
  259. {
  260. static void *pcpu_stats[BLKIO_NR_POLICIES];
  261. struct delayed_work *dwork = to_delayed_work(work);
  262. struct blkio_group *blkg;
  263. int i;
  264. bool empty = false;
  265. alloc_stats:
  266. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  267. if (pcpu_stats[i] != NULL)
  268. continue;
  269. pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
  270. /* Allocation failed. Try again after some time. */
  271. if (pcpu_stats[i] == NULL) {
  272. queue_delayed_work(system_nrt_wq, dwork,
  273. msecs_to_jiffies(10));
  274. return;
  275. }
  276. }
  277. spin_lock_irq(&blkio_list_lock);
  278. spin_lock(&alloc_list_lock);
  279. /* cgroup got deleted or queue exited. */
  280. if (!list_empty(&alloc_list)) {
  281. blkg = list_first_entry(&alloc_list, struct blkio_group,
  282. alloc_node);
  283. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  284. struct blkg_policy_data *pd = blkg->pd[i];
  285. if (blkio_policy[i] && pd && !pd->stats_cpu)
  286. swap(pd->stats_cpu, pcpu_stats[i]);
  287. }
  288. list_del_init(&blkg->alloc_node);
  289. }
  290. empty = list_empty(&alloc_list);
  291. spin_unlock(&alloc_list_lock);
  292. spin_unlock_irq(&blkio_list_lock);
  293. if (!empty)
  294. goto alloc_stats;
  295. }
  296. /**
  297. * blkg_free - free a blkg
  298. * @blkg: blkg to free
  299. *
  300. * Free @blkg which may be partially allocated.
  301. */
  302. static void blkg_free(struct blkio_group *blkg)
  303. {
  304. int i;
  305. if (!blkg)
  306. return;
  307. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  308. struct blkg_policy_data *pd = blkg->pd[i];
  309. if (pd) {
  310. free_percpu(pd->stats_cpu);
  311. kfree(pd);
  312. }
  313. }
  314. kfree(blkg);
  315. }
  316. /**
  317. * blkg_alloc - allocate a blkg
  318. * @blkcg: block cgroup the new blkg is associated with
  319. * @q: request_queue the new blkg is associated with
  320. *
  321. * Allocate a new blkg assocating @blkcg and @q.
  322. */
  323. static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
  324. struct request_queue *q)
  325. {
  326. struct blkio_group *blkg;
  327. int i;
  328. /* alloc and init base part */
  329. blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
  330. if (!blkg)
  331. return NULL;
  332. blkg->q = q;
  333. INIT_LIST_HEAD(&blkg->q_node);
  334. INIT_LIST_HEAD(&blkg->alloc_node);
  335. blkg->blkcg = blkcg;
  336. blkg->refcnt = 1;
  337. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  338. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  339. struct blkio_policy_type *pol = blkio_policy[i];
  340. struct blkg_policy_data *pd;
  341. if (!pol)
  342. continue;
  343. /* alloc per-policy data and attach it to blkg */
  344. pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
  345. q->node);
  346. if (!pd) {
  347. blkg_free(blkg);
  348. return NULL;
  349. }
  350. blkg->pd[i] = pd;
  351. pd->blkg = blkg;
  352. }
  353. /* invoke per-policy init */
  354. for (i = 0; i < BLKIO_NR_POLICIES; i++) {
  355. struct blkio_policy_type *pol = blkio_policy[i];
  356. if (pol)
  357. pol->ops.blkio_init_group_fn(blkg);
  358. }
  359. return blkg;
  360. }
  361. struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
  362. struct request_queue *q,
  363. bool for_root)
  364. __releases(q->queue_lock) __acquires(q->queue_lock)
  365. {
  366. struct blkio_group *blkg;
  367. WARN_ON_ONCE(!rcu_read_lock_held());
  368. lockdep_assert_held(q->queue_lock);
  369. /*
  370. * This could be the first entry point of blkcg implementation and
  371. * we shouldn't allow anything to go through for a bypassing queue.
  372. * The following can be removed if blkg lookup is guaranteed to
  373. * fail on a bypassing queue.
  374. */
  375. if (unlikely(blk_queue_bypass(q)) && !for_root)
  376. return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
  377. blkg = blkg_lookup(blkcg, q);
  378. if (blkg)
  379. return blkg;
  380. /* blkg holds a reference to blkcg */
  381. if (!css_tryget(&blkcg->css))
  382. return ERR_PTR(-EINVAL);
  383. /*
  384. * Allocate and initialize.
  385. */
  386. blkg = blkg_alloc(blkcg, q);
  387. /* did alloc fail? */
  388. if (unlikely(!blkg)) {
  389. blkg = ERR_PTR(-ENOMEM);
  390. goto out;
  391. }
  392. /* insert */
  393. spin_lock(&blkcg->lock);
  394. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  395. list_add(&blkg->q_node, &q->blkg_list);
  396. spin_unlock(&blkcg->lock);
  397. spin_lock(&alloc_list_lock);
  398. list_add(&blkg->alloc_node, &alloc_list);
  399. /* Queue per cpu stat allocation from worker thread. */
  400. queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
  401. spin_unlock(&alloc_list_lock);
  402. out:
  403. return blkg;
  404. }
  405. EXPORT_SYMBOL_GPL(blkg_lookup_create);
  406. /* called under rcu_read_lock(). */
  407. struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
  408. struct request_queue *q)
  409. {
  410. struct blkio_group *blkg;
  411. struct hlist_node *n;
  412. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  413. if (blkg->q == q)
  414. return blkg;
  415. return NULL;
  416. }
  417. EXPORT_SYMBOL_GPL(blkg_lookup);
  418. static void blkg_destroy(struct blkio_group *blkg)
  419. {
  420. struct request_queue *q = blkg->q;
  421. struct blkio_cgroup *blkcg = blkg->blkcg;
  422. lockdep_assert_held(q->queue_lock);
  423. lockdep_assert_held(&blkcg->lock);
  424. /* Something wrong if we are trying to remove same group twice */
  425. WARN_ON_ONCE(list_empty(&blkg->q_node));
  426. WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
  427. list_del_init(&blkg->q_node);
  428. hlist_del_init_rcu(&blkg->blkcg_node);
  429. spin_lock(&alloc_list_lock);
  430. list_del_init(&blkg->alloc_node);
  431. spin_unlock(&alloc_list_lock);
  432. /*
  433. * Put the reference taken at the time of creation so that when all
  434. * queues are gone, group can be destroyed.
  435. */
  436. blkg_put(blkg);
  437. }
  438. /*
  439. * XXX: This updates blkg policy data in-place for root blkg, which is
  440. * necessary across elevator switch and policy registration as root blkgs
  441. * aren't shot down. This broken and racy implementation is temporary.
  442. * Eventually, blkg shoot down will be replaced by proper in-place update.
  443. */
  444. void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
  445. {
  446. struct blkio_policy_type *pol = blkio_policy[plid];
  447. struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
  448. struct blkg_policy_data *pd;
  449. if (!blkg)
  450. return;
  451. kfree(blkg->pd[plid]);
  452. blkg->pd[plid] = NULL;
  453. if (!pol)
  454. return;
  455. pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
  456. WARN_ON_ONCE(!pd);
  457. pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  458. WARN_ON_ONCE(!pd->stats_cpu);
  459. blkg->pd[plid] = pd;
  460. pd->blkg = blkg;
  461. pol->ops.blkio_init_group_fn(blkg);
  462. }
  463. EXPORT_SYMBOL_GPL(update_root_blkg_pd);
  464. /**
  465. * blkg_destroy_all - destroy all blkgs associated with a request_queue
  466. * @q: request_queue of interest
  467. * @destroy_root: whether to destroy root blkg or not
  468. *
  469. * Destroy blkgs associated with @q. If @destroy_root is %true, all are
  470. * destroyed; otherwise, root blkg is left alone.
  471. */
  472. void blkg_destroy_all(struct request_queue *q, bool destroy_root)
  473. {
  474. struct blkio_group *blkg, *n;
  475. spin_lock_irq(q->queue_lock);
  476. list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
  477. struct blkio_cgroup *blkcg = blkg->blkcg;
  478. /* skip root? */
  479. if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
  480. continue;
  481. spin_lock(&blkcg->lock);
  482. blkg_destroy(blkg);
  483. spin_unlock(&blkcg->lock);
  484. }
  485. spin_unlock_irq(q->queue_lock);
  486. }
  487. EXPORT_SYMBOL_GPL(blkg_destroy_all);
  488. static void blkg_rcu_free(struct rcu_head *rcu_head)
  489. {
  490. blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
  491. }
  492. void __blkg_release(struct blkio_group *blkg)
  493. {
  494. /* release the extra blkcg reference this blkg has been holding */
  495. css_put(&blkg->blkcg->css);
  496. /*
  497. * A group is freed in rcu manner. But having an rcu lock does not
  498. * mean that one can access all the fields of blkg and assume these
  499. * are valid. For example, don't try to follow throtl_data and
  500. * request queue links.
  501. *
  502. * Having a reference to blkg under an rcu allows acess to only
  503. * values local to groups like group stats and group rate limits
  504. */
  505. call_rcu(&blkg->rcu_head, blkg_rcu_free);
  506. }
  507. EXPORT_SYMBOL_GPL(__blkg_release);
  508. static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
  509. {
  510. struct blkg_policy_data *pd = blkg->pd[plid];
  511. int cpu;
  512. if (pd->stats_cpu == NULL)
  513. return;
  514. for_each_possible_cpu(cpu) {
  515. struct blkio_group_stats_cpu *sc =
  516. per_cpu_ptr(pd->stats_cpu, cpu);
  517. blkg_rwstat_reset(&sc->service_bytes);
  518. blkg_rwstat_reset(&sc->serviced);
  519. blkg_stat_reset(&sc->sectors);
  520. }
  521. }
  522. static int
  523. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  524. {
  525. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  526. struct blkio_group *blkg;
  527. struct hlist_node *n;
  528. spin_lock(&blkio_list_lock);
  529. spin_lock_irq(&blkcg->lock);
  530. /*
  531. * Note that stat reset is racy - it doesn't synchronize against
  532. * stat updates. This is a debug feature which shouldn't exist
  533. * anyway. If you get hit by a race, retry.
  534. */
  535. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  536. struct blkio_policy_type *pol;
  537. list_for_each_entry(pol, &blkio_list, list) {
  538. struct blkg_policy_data *pd = blkg->pd[pol->plid];
  539. struct blkio_group_stats *stats = &pd->stats;
  540. /* queued stats shouldn't be cleared */
  541. blkg_rwstat_reset(&stats->merged);
  542. blkg_rwstat_reset(&stats->service_time);
  543. blkg_rwstat_reset(&stats->wait_time);
  544. blkg_stat_reset(&stats->time);
  545. #ifdef CONFIG_DEBUG_BLK_CGROUP
  546. blkg_stat_reset(&stats->unaccounted_time);
  547. blkg_stat_reset(&stats->avg_queue_size_sum);
  548. blkg_stat_reset(&stats->avg_queue_size_samples);
  549. blkg_stat_reset(&stats->dequeue);
  550. blkg_stat_reset(&stats->group_wait_time);
  551. blkg_stat_reset(&stats->idle_time);
  552. blkg_stat_reset(&stats->empty_time);
  553. #endif
  554. blkio_reset_stats_cpu(blkg, pol->plid);
  555. }
  556. }
  557. spin_unlock_irq(&blkcg->lock);
  558. spin_unlock(&blkio_list_lock);
  559. return 0;
  560. }
  561. static const char *blkg_dev_name(struct blkio_group *blkg)
  562. {
  563. /* some drivers (floppy) instantiate a queue w/o disk registered */
  564. if (blkg->q->backing_dev_info.dev)
  565. return dev_name(blkg->q->backing_dev_info.dev);
  566. return NULL;
  567. }
  568. /**
  569. * blkcg_print_blkgs - helper for printing per-blkg data
  570. * @sf: seq_file to print to
  571. * @blkcg: blkcg of interest
  572. * @prfill: fill function to print out a blkg
  573. * @pol: policy in question
  574. * @data: data to be passed to @prfill
  575. * @show_total: to print out sum of prfill return values or not
  576. *
  577. * This function invokes @prfill on each blkg of @blkcg if pd for the
  578. * policy specified by @pol exists. @prfill is invoked with @sf, the
  579. * policy data and @data. If @show_total is %true, the sum of the return
  580. * values from @prfill is printed with "Total" label at the end.
  581. *
  582. * This is to be used to construct print functions for
  583. * cftype->read_seq_string method.
  584. */
  585. void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
  586. u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int),
  587. int pol, int data, bool show_total)
  588. {
  589. struct blkio_group *blkg;
  590. struct hlist_node *n;
  591. u64 total = 0;
  592. spin_lock_irq(&blkcg->lock);
  593. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
  594. if (blkg->pd[pol])
  595. total += prfill(sf, blkg->pd[pol], data);
  596. spin_unlock_irq(&blkcg->lock);
  597. if (show_total)
  598. seq_printf(sf, "Total %llu\n", (unsigned long long)total);
  599. }
  600. EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
  601. /**
  602. * __blkg_prfill_u64 - prfill helper for a single u64 value
  603. * @sf: seq_file to print to
  604. * @pd: policy data of interest
  605. * @v: value to print
  606. *
  607. * Print @v to @sf for the device assocaited with @pd.
  608. */
  609. u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
  610. {
  611. const char *dname = blkg_dev_name(pd->blkg);
  612. if (!dname)
  613. return 0;
  614. seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
  615. return v;
  616. }
  617. EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
  618. /**
  619. * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
  620. * @sf: seq_file to print to
  621. * @pd: policy data of interest
  622. * @rwstat: rwstat to print
  623. *
  624. * Print @rwstat to @sf for the device assocaited with @pd.
  625. */
  626. u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  627. const struct blkg_rwstat *rwstat)
  628. {
  629. static const char *rwstr[] = {
  630. [BLKG_RWSTAT_READ] = "Read",
  631. [BLKG_RWSTAT_WRITE] = "Write",
  632. [BLKG_RWSTAT_SYNC] = "Sync",
  633. [BLKG_RWSTAT_ASYNC] = "Async",
  634. };
  635. const char *dname = blkg_dev_name(pd->blkg);
  636. u64 v;
  637. int i;
  638. if (!dname)
  639. return 0;
  640. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  641. seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
  642. (unsigned long long)rwstat->cnt[i]);
  643. v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
  644. seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
  645. return v;
  646. }
  647. static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
  648. int off)
  649. {
  650. return __blkg_prfill_u64(sf, pd,
  651. blkg_stat_read((void *)&pd->stats + off));
  652. }
  653. static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  654. int off)
  655. {
  656. struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off);
  657. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  658. }
  659. /* print blkg_stat specified by BLKCG_STAT_PRIV() */
  660. int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
  661. struct seq_file *sf)
  662. {
  663. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  664. blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat,
  665. BLKCG_STAT_POL(cft->private),
  666. BLKCG_STAT_OFF(cft->private), false);
  667. return 0;
  668. }
  669. EXPORT_SYMBOL_GPL(blkcg_print_stat);
  670. /* print blkg_rwstat specified by BLKCG_STAT_PRIV() */
  671. int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
  672. struct seq_file *sf)
  673. {
  674. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  675. blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat,
  676. BLKCG_STAT_POL(cft->private),
  677. BLKCG_STAT_OFF(cft->private), true);
  678. return 0;
  679. }
  680. EXPORT_SYMBOL_GPL(blkcg_print_rwstat);
  681. static u64 blkg_prfill_cpu_stat(struct seq_file *sf,
  682. struct blkg_policy_data *pd, int off)
  683. {
  684. u64 v = 0;
  685. int cpu;
  686. for_each_possible_cpu(cpu) {
  687. struct blkio_group_stats_cpu *sc =
  688. per_cpu_ptr(pd->stats_cpu, cpu);
  689. v += blkg_stat_read((void *)sc + off);
  690. }
  691. return __blkg_prfill_u64(sf, pd, v);
  692. }
  693. static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
  694. struct blkg_policy_data *pd, int off)
  695. {
  696. struct blkg_rwstat rwstat = { }, tmp;
  697. int i, cpu;
  698. for_each_possible_cpu(cpu) {
  699. struct blkio_group_stats_cpu *sc =
  700. per_cpu_ptr(pd->stats_cpu, cpu);
  701. tmp = blkg_rwstat_read((void *)sc + off);
  702. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  703. rwstat.cnt[i] += tmp.cnt[i];
  704. }
  705. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  706. }
  707. /* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
  708. int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
  709. struct seq_file *sf)
  710. {
  711. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  712. blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat,
  713. BLKCG_STAT_POL(cft->private),
  714. BLKCG_STAT_OFF(cft->private), false);
  715. return 0;
  716. }
  717. EXPORT_SYMBOL_GPL(blkcg_print_cpu_stat);
  718. /* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
  719. int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
  720. struct seq_file *sf)
  721. {
  722. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  723. blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
  724. BLKCG_STAT_POL(cft->private),
  725. BLKCG_STAT_OFF(cft->private), true);
  726. return 0;
  727. }
  728. EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat);
  729. /**
  730. * blkg_conf_prep - parse and prepare for per-blkg config update
  731. * @blkcg: target block cgroup
  732. * @input: input string
  733. * @ctx: blkg_conf_ctx to be filled
  734. *
  735. * Parse per-blkg config update from @input and initialize @ctx with the
  736. * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
  737. * value. This function returns with RCU read locked and must be paired
  738. * with blkg_conf_finish().
  739. */
  740. int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
  741. struct blkg_conf_ctx *ctx)
  742. __acquires(rcu)
  743. {
  744. struct gendisk *disk;
  745. struct blkio_group *blkg;
  746. unsigned int major, minor;
  747. unsigned long long v;
  748. int part, ret;
  749. if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
  750. return -EINVAL;
  751. disk = get_gendisk(MKDEV(major, minor), &part);
  752. if (!disk || part)
  753. return -EINVAL;
  754. rcu_read_lock();
  755. spin_lock_irq(disk->queue->queue_lock);
  756. blkg = blkg_lookup_create(blkcg, disk->queue, false);
  757. spin_unlock_irq(disk->queue->queue_lock);
  758. if (IS_ERR(blkg)) {
  759. ret = PTR_ERR(blkg);
  760. rcu_read_unlock();
  761. put_disk(disk);
  762. /*
  763. * If queue was bypassing, we should retry. Do so after a
  764. * short msleep(). It isn't strictly necessary but queue
  765. * can be bypassing for some time and it's always nice to
  766. * avoid busy looping.
  767. */
  768. if (ret == -EBUSY) {
  769. msleep(10);
  770. ret = restart_syscall();
  771. }
  772. return ret;
  773. }
  774. ctx->disk = disk;
  775. ctx->blkg = blkg;
  776. ctx->v = v;
  777. return 0;
  778. }
  779. EXPORT_SYMBOL_GPL(blkg_conf_prep);
  780. /**
  781. * blkg_conf_finish - finish up per-blkg config update
  782. * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
  783. *
  784. * Finish up after per-blkg config update. This function must be paired
  785. * with blkg_conf_prep().
  786. */
  787. void blkg_conf_finish(struct blkg_conf_ctx *ctx)
  788. __releases(rcu)
  789. {
  790. rcu_read_unlock();
  791. put_disk(ctx->disk);
  792. }
  793. EXPORT_SYMBOL_GPL(blkg_conf_finish);
  794. struct cftype blkio_files[] = {
  795. {
  796. .name = "reset_stats",
  797. .write_u64 = blkiocg_reset_stats,
  798. },
  799. { } /* terminate */
  800. };
  801. /**
  802. * blkiocg_pre_destroy - cgroup pre_destroy callback
  803. * @cgroup: cgroup of interest
  804. *
  805. * This function is called when @cgroup is about to go away and responsible
  806. * for shooting down all blkgs associated with @cgroup. blkgs should be
  807. * removed while holding both q and blkcg locks. As blkcg lock is nested
  808. * inside q lock, this function performs reverse double lock dancing.
  809. *
  810. * This is the blkcg counterpart of ioc_release_fn().
  811. */
  812. static int blkiocg_pre_destroy(struct cgroup *cgroup)
  813. {
  814. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  815. spin_lock_irq(&blkcg->lock);
  816. while (!hlist_empty(&blkcg->blkg_list)) {
  817. struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
  818. struct blkio_group, blkcg_node);
  819. struct request_queue *q = blkg->q;
  820. if (spin_trylock(q->queue_lock)) {
  821. blkg_destroy(blkg);
  822. spin_unlock(q->queue_lock);
  823. } else {
  824. spin_unlock_irq(&blkcg->lock);
  825. cpu_relax();
  826. spin_lock_irq(&blkcg->lock);
  827. }
  828. }
  829. spin_unlock_irq(&blkcg->lock);
  830. return 0;
  831. }
  832. static void blkiocg_destroy(struct cgroup *cgroup)
  833. {
  834. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  835. if (blkcg != &blkio_root_cgroup)
  836. kfree(blkcg);
  837. }
  838. static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
  839. {
  840. static atomic64_t id_seq = ATOMIC64_INIT(0);
  841. struct blkio_cgroup *blkcg;
  842. struct cgroup *parent = cgroup->parent;
  843. if (!parent) {
  844. blkcg = &blkio_root_cgroup;
  845. goto done;
  846. }
  847. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  848. if (!blkcg)
  849. return ERR_PTR(-ENOMEM);
  850. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  851. blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
  852. done:
  853. spin_lock_init(&blkcg->lock);
  854. INIT_HLIST_HEAD(&blkcg->blkg_list);
  855. return &blkcg->css;
  856. }
  857. /**
  858. * blkcg_init_queue - initialize blkcg part of request queue
  859. * @q: request_queue to initialize
  860. *
  861. * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  862. * part of new request_queue @q.
  863. *
  864. * RETURNS:
  865. * 0 on success, -errno on failure.
  866. */
  867. int blkcg_init_queue(struct request_queue *q)
  868. {
  869. int ret;
  870. might_sleep();
  871. ret = blk_throtl_init(q);
  872. if (ret)
  873. return ret;
  874. mutex_lock(&all_q_mutex);
  875. INIT_LIST_HEAD(&q->all_q_node);
  876. list_add_tail(&q->all_q_node, &all_q_list);
  877. mutex_unlock(&all_q_mutex);
  878. return 0;
  879. }
  880. /**
  881. * blkcg_drain_queue - drain blkcg part of request_queue
  882. * @q: request_queue to drain
  883. *
  884. * Called from blk_drain_queue(). Responsible for draining blkcg part.
  885. */
  886. void blkcg_drain_queue(struct request_queue *q)
  887. {
  888. lockdep_assert_held(q->queue_lock);
  889. blk_throtl_drain(q);
  890. }
  891. /**
  892. * blkcg_exit_queue - exit and release blkcg part of request_queue
  893. * @q: request_queue being released
  894. *
  895. * Called from blk_release_queue(). Responsible for exiting blkcg part.
  896. */
  897. void blkcg_exit_queue(struct request_queue *q)
  898. {
  899. mutex_lock(&all_q_mutex);
  900. list_del_init(&q->all_q_node);
  901. mutex_unlock(&all_q_mutex);
  902. blkg_destroy_all(q, true);
  903. blk_throtl_exit(q);
  904. }
  905. /*
  906. * We cannot support shared io contexts, as we have no mean to support
  907. * two tasks with the same ioc in two different groups without major rework
  908. * of the main cic data structures. For now we allow a task to change
  909. * its cgroup only if it's the only owner of its ioc.
  910. */
  911. static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  912. {
  913. struct task_struct *task;
  914. struct io_context *ioc;
  915. int ret = 0;
  916. /* task_lock() is needed to avoid races with exit_io_context() */
  917. cgroup_taskset_for_each(task, cgrp, tset) {
  918. task_lock(task);
  919. ioc = task->io_context;
  920. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  921. ret = -EINVAL;
  922. task_unlock(task);
  923. if (ret)
  924. break;
  925. }
  926. return ret;
  927. }
  928. static void blkcg_bypass_start(void)
  929. __acquires(&all_q_mutex)
  930. {
  931. struct request_queue *q;
  932. mutex_lock(&all_q_mutex);
  933. list_for_each_entry(q, &all_q_list, all_q_node) {
  934. blk_queue_bypass_start(q);
  935. blkg_destroy_all(q, false);
  936. }
  937. }
  938. static void blkcg_bypass_end(void)
  939. __releases(&all_q_mutex)
  940. {
  941. struct request_queue *q;
  942. list_for_each_entry(q, &all_q_list, all_q_node)
  943. blk_queue_bypass_end(q);
  944. mutex_unlock(&all_q_mutex);
  945. }
  946. struct cgroup_subsys blkio_subsys = {
  947. .name = "blkio",
  948. .create = blkiocg_create,
  949. .can_attach = blkiocg_can_attach,
  950. .pre_destroy = blkiocg_pre_destroy,
  951. .destroy = blkiocg_destroy,
  952. .subsys_id = blkio_subsys_id,
  953. .base_cftypes = blkio_files,
  954. .module = THIS_MODULE,
  955. };
  956. EXPORT_SYMBOL_GPL(blkio_subsys);
  957. void blkio_policy_register(struct blkio_policy_type *blkiop)
  958. {
  959. struct request_queue *q;
  960. blkcg_bypass_start();
  961. spin_lock(&blkio_list_lock);
  962. BUG_ON(blkio_policy[blkiop->plid]);
  963. blkio_policy[blkiop->plid] = blkiop;
  964. list_add_tail(&blkiop->list, &blkio_list);
  965. spin_unlock(&blkio_list_lock);
  966. list_for_each_entry(q, &all_q_list, all_q_node)
  967. update_root_blkg_pd(q, blkiop->plid);
  968. blkcg_bypass_end();
  969. if (blkiop->cftypes)
  970. WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes));
  971. }
  972. EXPORT_SYMBOL_GPL(blkio_policy_register);
  973. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  974. {
  975. struct request_queue *q;
  976. if (blkiop->cftypes)
  977. cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes);
  978. blkcg_bypass_start();
  979. spin_lock(&blkio_list_lock);
  980. BUG_ON(blkio_policy[blkiop->plid] != blkiop);
  981. blkio_policy[blkiop->plid] = NULL;
  982. list_del_init(&blkiop->list);
  983. spin_unlock(&blkio_list_lock);
  984. list_for_each_entry(q, &all_q_list, all_q_node)
  985. update_root_blkg_pd(q, blkiop->plid);
  986. blkcg_bypass_end();
  987. }
  988. EXPORT_SYMBOL_GPL(blkio_policy_unregister);