blk-cgroup.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include "blk-cgroup.h"
  21. #include <linux/genhd.h>
  22. #define MAX_KEY_LEN 100
  23. static DEFINE_SPINLOCK(blkio_list_lock);
  24. static LIST_HEAD(blkio_list);
  25. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  26. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  27. static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
  28. struct cgroup *);
  29. static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
  30. struct task_struct *, bool);
  31. static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
  32. struct cgroup *, struct task_struct *, bool);
  33. static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
  34. static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
  35. /* for encoding cft->private value on file */
  36. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  37. /* What policy owns the file, proportional or throttle */
  38. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  39. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  40. struct cgroup_subsys blkio_subsys = {
  41. .name = "blkio",
  42. .create = blkiocg_create,
  43. .can_attach = blkiocg_can_attach,
  44. .attach = blkiocg_attach,
  45. .destroy = blkiocg_destroy,
  46. .populate = blkiocg_populate,
  47. #ifdef CONFIG_BLK_CGROUP
  48. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  49. .subsys_id = blkio_subsys_id,
  50. #endif
  51. .use_id = 1,
  52. .module = THIS_MODULE,
  53. };
  54. EXPORT_SYMBOL_GPL(blkio_subsys);
  55. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  56. struct blkio_policy_node *pn)
  57. {
  58. list_add(&pn->node, &blkcg->policy_list);
  59. }
  60. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  61. struct blkio_group *blkg)
  62. {
  63. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  64. if (blkg->plid == plid)
  65. return 1;
  66. return 0;
  67. }
  68. /* Determines if policy node matches cgroup file being accessed */
  69. static inline bool pn_matches_cftype(struct cftype *cft,
  70. struct blkio_policy_node *pn)
  71. {
  72. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  73. int fileid = BLKIOFILE_ATTR(cft->private);
  74. return (plid == pn->plid && fileid == pn->fileid);
  75. }
  76. /* Must be called with blkcg->lock held */
  77. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  78. {
  79. list_del(&pn->node);
  80. }
  81. /* Must be called with blkcg->lock held */
  82. static struct blkio_policy_node *
  83. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  84. enum blkio_policy_id plid, int fileid)
  85. {
  86. struct blkio_policy_node *pn;
  87. list_for_each_entry(pn, &blkcg->policy_list, node) {
  88. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  89. return pn;
  90. }
  91. return NULL;
  92. }
  93. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  94. {
  95. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  96. struct blkio_cgroup, css);
  97. }
  98. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  99. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  100. {
  101. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  102. struct blkio_cgroup, css);
  103. }
  104. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  105. static inline void
  106. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  107. {
  108. struct blkio_policy_type *blkiop;
  109. list_for_each_entry(blkiop, &blkio_list, list) {
  110. /* If this policy does not own the blkg, do not send updates */
  111. if (blkiop->plid != blkg->plid)
  112. continue;
  113. if (blkiop->ops.blkio_update_group_weight_fn)
  114. blkiop->ops.blkio_update_group_weight_fn(blkg->key,
  115. blkg, weight);
  116. }
  117. }
  118. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  119. int fileid)
  120. {
  121. struct blkio_policy_type *blkiop;
  122. list_for_each_entry(blkiop, &blkio_list, list) {
  123. /* If this policy does not own the blkg, do not send updates */
  124. if (blkiop->plid != blkg->plid)
  125. continue;
  126. if (fileid == BLKIO_THROTL_read_bps_device
  127. && blkiop->ops.blkio_update_group_read_bps_fn)
  128. blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
  129. blkg, bps);
  130. if (fileid == BLKIO_THROTL_write_bps_device
  131. && blkiop->ops.blkio_update_group_write_bps_fn)
  132. blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
  133. blkg, bps);
  134. }
  135. }
  136. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  137. unsigned int iops, int fileid)
  138. {
  139. struct blkio_policy_type *blkiop;
  140. list_for_each_entry(blkiop, &blkio_list, list) {
  141. /* If this policy does not own the blkg, do not send updates */
  142. if (blkiop->plid != blkg->plid)
  143. continue;
  144. if (fileid == BLKIO_THROTL_read_iops_device
  145. && blkiop->ops.blkio_update_group_read_iops_fn)
  146. blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
  147. blkg, iops);
  148. if (fileid == BLKIO_THROTL_write_iops_device
  149. && blkiop->ops.blkio_update_group_write_iops_fn)
  150. blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
  151. blkg,iops);
  152. }
  153. }
  154. /*
  155. * Add to the appropriate stat variable depending on the request type.
  156. * This should be called with the blkg->stats_lock held.
  157. */
  158. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  159. bool sync)
  160. {
  161. if (direction)
  162. stat[BLKIO_STAT_WRITE] += add;
  163. else
  164. stat[BLKIO_STAT_READ] += add;
  165. if (sync)
  166. stat[BLKIO_STAT_SYNC] += add;
  167. else
  168. stat[BLKIO_STAT_ASYNC] += add;
  169. }
  170. /*
  171. * Decrements the appropriate stat variable if non-zero depending on the
  172. * request type. Panics on value being zero.
  173. * This should be called with the blkg->stats_lock held.
  174. */
  175. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  176. {
  177. if (direction) {
  178. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  179. stat[BLKIO_STAT_WRITE]--;
  180. } else {
  181. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  182. stat[BLKIO_STAT_READ]--;
  183. }
  184. if (sync) {
  185. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  186. stat[BLKIO_STAT_SYNC]--;
  187. } else {
  188. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  189. stat[BLKIO_STAT_ASYNC]--;
  190. }
  191. }
  192. #ifdef CONFIG_DEBUG_BLK_CGROUP
  193. /* This should be called with the blkg->stats_lock held. */
  194. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  195. struct blkio_group *curr_blkg)
  196. {
  197. if (blkio_blkg_waiting(&blkg->stats))
  198. return;
  199. if (blkg == curr_blkg)
  200. return;
  201. blkg->stats.start_group_wait_time = sched_clock();
  202. blkio_mark_blkg_waiting(&blkg->stats);
  203. }
  204. /* This should be called with the blkg->stats_lock held. */
  205. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  206. {
  207. unsigned long long now;
  208. if (!blkio_blkg_waiting(stats))
  209. return;
  210. now = sched_clock();
  211. if (time_after64(now, stats->start_group_wait_time))
  212. stats->group_wait_time += now - stats->start_group_wait_time;
  213. blkio_clear_blkg_waiting(stats);
  214. }
  215. /* This should be called with the blkg->stats_lock held. */
  216. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  217. {
  218. unsigned long long now;
  219. if (!blkio_blkg_empty(stats))
  220. return;
  221. now = sched_clock();
  222. if (time_after64(now, stats->start_empty_time))
  223. stats->empty_time += now - stats->start_empty_time;
  224. blkio_clear_blkg_empty(stats);
  225. }
  226. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  227. {
  228. unsigned long flags;
  229. spin_lock_irqsave(&blkg->stats_lock, flags);
  230. BUG_ON(blkio_blkg_idling(&blkg->stats));
  231. blkg->stats.start_idle_time = sched_clock();
  232. blkio_mark_blkg_idling(&blkg->stats);
  233. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  234. }
  235. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  236. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  237. {
  238. unsigned long flags;
  239. unsigned long long now;
  240. struct blkio_group_stats *stats;
  241. spin_lock_irqsave(&blkg->stats_lock, flags);
  242. stats = &blkg->stats;
  243. if (blkio_blkg_idling(stats)) {
  244. now = sched_clock();
  245. if (time_after64(now, stats->start_idle_time))
  246. stats->idle_time += now - stats->start_idle_time;
  247. blkio_clear_blkg_idling(stats);
  248. }
  249. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  250. }
  251. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  252. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  253. {
  254. unsigned long flags;
  255. struct blkio_group_stats *stats;
  256. spin_lock_irqsave(&blkg->stats_lock, flags);
  257. stats = &blkg->stats;
  258. stats->avg_queue_size_sum +=
  259. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  260. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  261. stats->avg_queue_size_samples++;
  262. blkio_update_group_wait_time(stats);
  263. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  264. }
  265. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  266. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  267. {
  268. unsigned long flags;
  269. struct blkio_group_stats *stats;
  270. spin_lock_irqsave(&blkg->stats_lock, flags);
  271. stats = &blkg->stats;
  272. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  273. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  274. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  275. return;
  276. }
  277. /*
  278. * group is already marked empty. This can happen if cfqq got new
  279. * request in parent group and moved to this group while being added
  280. * to service tree. Just ignore the event and move on.
  281. */
  282. if(blkio_blkg_empty(stats)) {
  283. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  284. return;
  285. }
  286. stats->start_empty_time = sched_clock();
  287. blkio_mark_blkg_empty(stats);
  288. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  289. }
  290. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  291. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  292. unsigned long dequeue)
  293. {
  294. blkg->stats.dequeue += dequeue;
  295. }
  296. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  297. #else
  298. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  299. struct blkio_group *curr_blkg) {}
  300. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  301. #endif
  302. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  303. struct blkio_group *curr_blkg, bool direction,
  304. bool sync)
  305. {
  306. unsigned long flags;
  307. spin_lock_irqsave(&blkg->stats_lock, flags);
  308. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  309. sync);
  310. blkio_end_empty_time(&blkg->stats);
  311. blkio_set_start_group_wait_time(blkg, curr_blkg);
  312. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  313. }
  314. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  315. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  316. bool direction, bool sync)
  317. {
  318. unsigned long flags;
  319. spin_lock_irqsave(&blkg->stats_lock, flags);
  320. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  321. direction, sync);
  322. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  323. }
  324. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  325. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  326. unsigned long unaccounted_time)
  327. {
  328. unsigned long flags;
  329. spin_lock_irqsave(&blkg->stats_lock, flags);
  330. blkg->stats.time += time;
  331. blkg->stats.unaccounted_time += unaccounted_time;
  332. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  333. }
  334. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  335. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  336. uint64_t bytes, bool direction, bool sync)
  337. {
  338. struct blkio_group_stats *stats;
  339. unsigned long flags;
  340. spin_lock_irqsave(&blkg->stats_lock, flags);
  341. stats = &blkg->stats;
  342. stats->sectors += bytes >> 9;
  343. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
  344. sync);
  345. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
  346. direction, sync);
  347. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  348. }
  349. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  350. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  351. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  352. {
  353. struct blkio_group_stats *stats;
  354. unsigned long flags;
  355. unsigned long long now = sched_clock();
  356. spin_lock_irqsave(&blkg->stats_lock, flags);
  357. stats = &blkg->stats;
  358. if (time_after64(now, io_start_time))
  359. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  360. now - io_start_time, direction, sync);
  361. if (time_after64(io_start_time, start_time))
  362. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  363. io_start_time - start_time, direction, sync);
  364. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  365. }
  366. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  367. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  368. bool sync)
  369. {
  370. unsigned long flags;
  371. spin_lock_irqsave(&blkg->stats_lock, flags);
  372. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
  373. sync);
  374. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  375. }
  376. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  377. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  378. struct blkio_group *blkg, void *key, dev_t dev,
  379. enum blkio_policy_id plid)
  380. {
  381. unsigned long flags;
  382. spin_lock_irqsave(&blkcg->lock, flags);
  383. spin_lock_init(&blkg->stats_lock);
  384. rcu_assign_pointer(blkg->key, key);
  385. blkg->blkcg_id = css_id(&blkcg->css);
  386. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  387. blkg->plid = plid;
  388. spin_unlock_irqrestore(&blkcg->lock, flags);
  389. /* Need to take css reference ? */
  390. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  391. blkg->dev = dev;
  392. }
  393. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  394. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  395. {
  396. hlist_del_init_rcu(&blkg->blkcg_node);
  397. blkg->blkcg_id = 0;
  398. }
  399. /*
  400. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  401. * indicating that blk_group was unhashed by the time we got to it.
  402. */
  403. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  404. {
  405. struct blkio_cgroup *blkcg;
  406. unsigned long flags;
  407. struct cgroup_subsys_state *css;
  408. int ret = 1;
  409. rcu_read_lock();
  410. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  411. if (css) {
  412. blkcg = container_of(css, struct blkio_cgroup, css);
  413. spin_lock_irqsave(&blkcg->lock, flags);
  414. if (!hlist_unhashed(&blkg->blkcg_node)) {
  415. __blkiocg_del_blkio_group(blkg);
  416. ret = 0;
  417. }
  418. spin_unlock_irqrestore(&blkcg->lock, flags);
  419. }
  420. rcu_read_unlock();
  421. return ret;
  422. }
  423. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  424. /* called under rcu_read_lock(). */
  425. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  426. {
  427. struct blkio_group *blkg;
  428. struct hlist_node *n;
  429. void *__key;
  430. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  431. __key = blkg->key;
  432. if (__key == key)
  433. return blkg;
  434. }
  435. return NULL;
  436. }
  437. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  438. static int
  439. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  440. {
  441. struct blkio_cgroup *blkcg;
  442. struct blkio_group *blkg;
  443. struct blkio_group_stats *stats;
  444. struct hlist_node *n;
  445. uint64_t queued[BLKIO_STAT_TOTAL];
  446. int i;
  447. #ifdef CONFIG_DEBUG_BLK_CGROUP
  448. bool idling, waiting, empty;
  449. unsigned long long now = sched_clock();
  450. #endif
  451. blkcg = cgroup_to_blkio_cgroup(cgroup);
  452. spin_lock_irq(&blkcg->lock);
  453. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  454. spin_lock(&blkg->stats_lock);
  455. stats = &blkg->stats;
  456. #ifdef CONFIG_DEBUG_BLK_CGROUP
  457. idling = blkio_blkg_idling(stats);
  458. waiting = blkio_blkg_waiting(stats);
  459. empty = blkio_blkg_empty(stats);
  460. #endif
  461. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  462. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  463. memset(stats, 0, sizeof(struct blkio_group_stats));
  464. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  465. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  466. #ifdef CONFIG_DEBUG_BLK_CGROUP
  467. if (idling) {
  468. blkio_mark_blkg_idling(stats);
  469. stats->start_idle_time = now;
  470. }
  471. if (waiting) {
  472. blkio_mark_blkg_waiting(stats);
  473. stats->start_group_wait_time = now;
  474. }
  475. if (empty) {
  476. blkio_mark_blkg_empty(stats);
  477. stats->start_empty_time = now;
  478. }
  479. #endif
  480. spin_unlock(&blkg->stats_lock);
  481. }
  482. spin_unlock_irq(&blkcg->lock);
  483. return 0;
  484. }
  485. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  486. int chars_left, bool diskname_only)
  487. {
  488. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  489. chars_left -= strlen(str);
  490. if (chars_left <= 0) {
  491. printk(KERN_WARNING
  492. "Possibly incorrect cgroup stat display format");
  493. return;
  494. }
  495. if (diskname_only)
  496. return;
  497. switch (type) {
  498. case BLKIO_STAT_READ:
  499. strlcat(str, " Read", chars_left);
  500. break;
  501. case BLKIO_STAT_WRITE:
  502. strlcat(str, " Write", chars_left);
  503. break;
  504. case BLKIO_STAT_SYNC:
  505. strlcat(str, " Sync", chars_left);
  506. break;
  507. case BLKIO_STAT_ASYNC:
  508. strlcat(str, " Async", chars_left);
  509. break;
  510. case BLKIO_STAT_TOTAL:
  511. strlcat(str, " Total", chars_left);
  512. break;
  513. default:
  514. strlcat(str, " Invalid", chars_left);
  515. }
  516. }
  517. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  518. struct cgroup_map_cb *cb, dev_t dev)
  519. {
  520. blkio_get_key_name(0, dev, str, chars_left, true);
  521. cb->fill(cb, str, val);
  522. return val;
  523. }
  524. /* This should be called with blkg->stats_lock held */
  525. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  526. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  527. {
  528. uint64_t disk_total;
  529. char key_str[MAX_KEY_LEN];
  530. enum stat_sub_type sub_type;
  531. if (type == BLKIO_STAT_TIME)
  532. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  533. blkg->stats.time, cb, dev);
  534. if (type == BLKIO_STAT_SECTORS)
  535. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  536. blkg->stats.sectors, cb, dev);
  537. #ifdef CONFIG_DEBUG_BLK_CGROUP
  538. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  539. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  540. blkg->stats.unaccounted_time, cb, dev);
  541. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  542. uint64_t sum = blkg->stats.avg_queue_size_sum;
  543. uint64_t samples = blkg->stats.avg_queue_size_samples;
  544. if (samples)
  545. do_div(sum, samples);
  546. else
  547. sum = 0;
  548. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  549. }
  550. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  551. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  552. blkg->stats.group_wait_time, cb, dev);
  553. if (type == BLKIO_STAT_IDLE_TIME)
  554. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  555. blkg->stats.idle_time, cb, dev);
  556. if (type == BLKIO_STAT_EMPTY_TIME)
  557. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  558. blkg->stats.empty_time, cb, dev);
  559. if (type == BLKIO_STAT_DEQUEUE)
  560. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  561. blkg->stats.dequeue, cb, dev);
  562. #endif
  563. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  564. sub_type++) {
  565. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  566. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  567. }
  568. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  569. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  570. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  571. cb->fill(cb, key_str, disk_total);
  572. return disk_total;
  573. }
  574. static int blkio_check_dev_num(dev_t dev)
  575. {
  576. int part = 0;
  577. struct gendisk *disk;
  578. disk = get_gendisk(dev, &part);
  579. if (!disk || part)
  580. return -ENODEV;
  581. return 0;
  582. }
  583. static int blkio_policy_parse_and_set(char *buf,
  584. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  585. {
  586. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  587. int ret;
  588. unsigned long major, minor, temp;
  589. int i = 0;
  590. dev_t dev;
  591. u64 bps, iops;
  592. memset(s, 0, sizeof(s));
  593. while ((p = strsep(&buf, " ")) != NULL) {
  594. if (!*p)
  595. continue;
  596. s[i++] = p;
  597. /* Prevent from inputing too many things */
  598. if (i == 3)
  599. break;
  600. }
  601. if (i != 2)
  602. return -EINVAL;
  603. p = strsep(&s[0], ":");
  604. if (p != NULL)
  605. major_s = p;
  606. else
  607. return -EINVAL;
  608. minor_s = s[0];
  609. if (!minor_s)
  610. return -EINVAL;
  611. ret = strict_strtoul(major_s, 10, &major);
  612. if (ret)
  613. return -EINVAL;
  614. ret = strict_strtoul(minor_s, 10, &minor);
  615. if (ret)
  616. return -EINVAL;
  617. dev = MKDEV(major, minor);
  618. ret = blkio_check_dev_num(dev);
  619. if (ret)
  620. return ret;
  621. newpn->dev = dev;
  622. if (s[1] == NULL)
  623. return -EINVAL;
  624. switch (plid) {
  625. case BLKIO_POLICY_PROP:
  626. ret = strict_strtoul(s[1], 10, &temp);
  627. if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  628. temp > BLKIO_WEIGHT_MAX)
  629. return -EINVAL;
  630. newpn->plid = plid;
  631. newpn->fileid = fileid;
  632. newpn->val.weight = temp;
  633. break;
  634. case BLKIO_POLICY_THROTL:
  635. switch(fileid) {
  636. case BLKIO_THROTL_read_bps_device:
  637. case BLKIO_THROTL_write_bps_device:
  638. ret = strict_strtoull(s[1], 10, &bps);
  639. if (ret)
  640. return -EINVAL;
  641. newpn->plid = plid;
  642. newpn->fileid = fileid;
  643. newpn->val.bps = bps;
  644. break;
  645. case BLKIO_THROTL_read_iops_device:
  646. case BLKIO_THROTL_write_iops_device:
  647. ret = strict_strtoull(s[1], 10, &iops);
  648. if (ret)
  649. return -EINVAL;
  650. if (iops > THROTL_IOPS_MAX)
  651. return -EINVAL;
  652. newpn->plid = plid;
  653. newpn->fileid = fileid;
  654. newpn->val.iops = (unsigned int)iops;
  655. break;
  656. }
  657. break;
  658. default:
  659. BUG();
  660. }
  661. return 0;
  662. }
  663. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  664. dev_t dev)
  665. {
  666. struct blkio_policy_node *pn;
  667. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  668. BLKIO_PROP_weight_device);
  669. if (pn)
  670. return pn->val.weight;
  671. else
  672. return blkcg->weight;
  673. }
  674. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  675. uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
  676. {
  677. struct blkio_policy_node *pn;
  678. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  679. BLKIO_THROTL_read_bps_device);
  680. if (pn)
  681. return pn->val.bps;
  682. else
  683. return -1;
  684. }
  685. uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
  686. {
  687. struct blkio_policy_node *pn;
  688. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  689. BLKIO_THROTL_write_bps_device);
  690. if (pn)
  691. return pn->val.bps;
  692. else
  693. return -1;
  694. }
  695. unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
  696. {
  697. struct blkio_policy_node *pn;
  698. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  699. BLKIO_THROTL_read_iops_device);
  700. if (pn)
  701. return pn->val.iops;
  702. else
  703. return -1;
  704. }
  705. unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
  706. {
  707. struct blkio_policy_node *pn;
  708. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  709. BLKIO_THROTL_write_iops_device);
  710. if (pn)
  711. return pn->val.iops;
  712. else
  713. return -1;
  714. }
  715. /* Checks whether user asked for deleting a policy rule */
  716. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  717. {
  718. switch(pn->plid) {
  719. case BLKIO_POLICY_PROP:
  720. if (pn->val.weight == 0)
  721. return 1;
  722. break;
  723. case BLKIO_POLICY_THROTL:
  724. switch(pn->fileid) {
  725. case BLKIO_THROTL_read_bps_device:
  726. case BLKIO_THROTL_write_bps_device:
  727. if (pn->val.bps == 0)
  728. return 1;
  729. break;
  730. case BLKIO_THROTL_read_iops_device:
  731. case BLKIO_THROTL_write_iops_device:
  732. if (pn->val.iops == 0)
  733. return 1;
  734. }
  735. break;
  736. default:
  737. BUG();
  738. }
  739. return 0;
  740. }
  741. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  742. struct blkio_policy_node *newpn)
  743. {
  744. switch(oldpn->plid) {
  745. case BLKIO_POLICY_PROP:
  746. oldpn->val.weight = newpn->val.weight;
  747. break;
  748. case BLKIO_POLICY_THROTL:
  749. switch(newpn->fileid) {
  750. case BLKIO_THROTL_read_bps_device:
  751. case BLKIO_THROTL_write_bps_device:
  752. oldpn->val.bps = newpn->val.bps;
  753. break;
  754. case BLKIO_THROTL_read_iops_device:
  755. case BLKIO_THROTL_write_iops_device:
  756. oldpn->val.iops = newpn->val.iops;
  757. }
  758. break;
  759. default:
  760. BUG();
  761. }
  762. }
  763. /*
  764. * Some rules/values in blkg have changed. Propagate those to respective
  765. * policies.
  766. */
  767. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  768. struct blkio_group *blkg, struct blkio_policy_node *pn)
  769. {
  770. unsigned int weight, iops;
  771. u64 bps;
  772. switch(pn->plid) {
  773. case BLKIO_POLICY_PROP:
  774. weight = pn->val.weight ? pn->val.weight :
  775. blkcg->weight;
  776. blkio_update_group_weight(blkg, weight);
  777. break;
  778. case BLKIO_POLICY_THROTL:
  779. switch(pn->fileid) {
  780. case BLKIO_THROTL_read_bps_device:
  781. case BLKIO_THROTL_write_bps_device:
  782. bps = pn->val.bps ? pn->val.bps : (-1);
  783. blkio_update_group_bps(blkg, bps, pn->fileid);
  784. break;
  785. case BLKIO_THROTL_read_iops_device:
  786. case BLKIO_THROTL_write_iops_device:
  787. iops = pn->val.iops ? pn->val.iops : (-1);
  788. blkio_update_group_iops(blkg, iops, pn->fileid);
  789. break;
  790. }
  791. break;
  792. default:
  793. BUG();
  794. }
  795. }
  796. /*
  797. * A policy node rule has been updated. Propagate this update to all the
  798. * block groups which might be affected by this update.
  799. */
  800. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  801. struct blkio_policy_node *pn)
  802. {
  803. struct blkio_group *blkg;
  804. struct hlist_node *n;
  805. spin_lock(&blkio_list_lock);
  806. spin_lock_irq(&blkcg->lock);
  807. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  808. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  809. continue;
  810. blkio_update_blkg_policy(blkcg, blkg, pn);
  811. }
  812. spin_unlock_irq(&blkcg->lock);
  813. spin_unlock(&blkio_list_lock);
  814. }
  815. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  816. const char *buffer)
  817. {
  818. int ret = 0;
  819. char *buf;
  820. struct blkio_policy_node *newpn, *pn;
  821. struct blkio_cgroup *blkcg;
  822. int keep_newpn = 0;
  823. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  824. int fileid = BLKIOFILE_ATTR(cft->private);
  825. buf = kstrdup(buffer, GFP_KERNEL);
  826. if (!buf)
  827. return -ENOMEM;
  828. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  829. if (!newpn) {
  830. ret = -ENOMEM;
  831. goto free_buf;
  832. }
  833. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  834. if (ret)
  835. goto free_newpn;
  836. blkcg = cgroup_to_blkio_cgroup(cgrp);
  837. spin_lock_irq(&blkcg->lock);
  838. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  839. if (!pn) {
  840. if (!blkio_delete_rule_command(newpn)) {
  841. blkio_policy_insert_node(blkcg, newpn);
  842. keep_newpn = 1;
  843. }
  844. spin_unlock_irq(&blkcg->lock);
  845. goto update_io_group;
  846. }
  847. if (blkio_delete_rule_command(newpn)) {
  848. blkio_policy_delete_node(pn);
  849. spin_unlock_irq(&blkcg->lock);
  850. goto update_io_group;
  851. }
  852. spin_unlock_irq(&blkcg->lock);
  853. blkio_update_policy_rule(pn, newpn);
  854. update_io_group:
  855. blkio_update_policy_node_blkg(blkcg, newpn);
  856. free_newpn:
  857. if (!keep_newpn)
  858. kfree(newpn);
  859. free_buf:
  860. kfree(buf);
  861. return ret;
  862. }
  863. static void
  864. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  865. {
  866. switch(pn->plid) {
  867. case BLKIO_POLICY_PROP:
  868. if (pn->fileid == BLKIO_PROP_weight_device)
  869. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  870. MINOR(pn->dev), pn->val.weight);
  871. break;
  872. case BLKIO_POLICY_THROTL:
  873. switch(pn->fileid) {
  874. case BLKIO_THROTL_read_bps_device:
  875. case BLKIO_THROTL_write_bps_device:
  876. seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
  877. MINOR(pn->dev), pn->val.bps);
  878. break;
  879. case BLKIO_THROTL_read_iops_device:
  880. case BLKIO_THROTL_write_iops_device:
  881. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  882. MINOR(pn->dev), pn->val.iops);
  883. break;
  884. }
  885. break;
  886. default:
  887. BUG();
  888. }
  889. }
  890. /* cgroup files which read their data from policy nodes end up here */
  891. static void blkio_read_policy_node_files(struct cftype *cft,
  892. struct blkio_cgroup *blkcg, struct seq_file *m)
  893. {
  894. struct blkio_policy_node *pn;
  895. if (!list_empty(&blkcg->policy_list)) {
  896. spin_lock_irq(&blkcg->lock);
  897. list_for_each_entry(pn, &blkcg->policy_list, node) {
  898. if (!pn_matches_cftype(cft, pn))
  899. continue;
  900. blkio_print_policy_node(m, pn);
  901. }
  902. spin_unlock_irq(&blkcg->lock);
  903. }
  904. }
  905. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  906. struct seq_file *m)
  907. {
  908. struct blkio_cgroup *blkcg;
  909. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  910. int name = BLKIOFILE_ATTR(cft->private);
  911. blkcg = cgroup_to_blkio_cgroup(cgrp);
  912. switch(plid) {
  913. case BLKIO_POLICY_PROP:
  914. switch(name) {
  915. case BLKIO_PROP_weight_device:
  916. blkio_read_policy_node_files(cft, blkcg, m);
  917. return 0;
  918. default:
  919. BUG();
  920. }
  921. break;
  922. case BLKIO_POLICY_THROTL:
  923. switch(name){
  924. case BLKIO_THROTL_read_bps_device:
  925. case BLKIO_THROTL_write_bps_device:
  926. case BLKIO_THROTL_read_iops_device:
  927. case BLKIO_THROTL_write_iops_device:
  928. blkio_read_policy_node_files(cft, blkcg, m);
  929. return 0;
  930. default:
  931. BUG();
  932. }
  933. break;
  934. default:
  935. BUG();
  936. }
  937. return 0;
  938. }
  939. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  940. struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
  941. bool show_total)
  942. {
  943. struct blkio_group *blkg;
  944. struct hlist_node *n;
  945. uint64_t cgroup_total = 0;
  946. rcu_read_lock();
  947. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  948. if (blkg->dev) {
  949. if (!cftype_blkg_same_policy(cft, blkg))
  950. continue;
  951. spin_lock_irq(&blkg->stats_lock);
  952. cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
  953. type);
  954. spin_unlock_irq(&blkg->stats_lock);
  955. }
  956. }
  957. if (show_total)
  958. cb->fill(cb, "Total", cgroup_total);
  959. rcu_read_unlock();
  960. return 0;
  961. }
  962. /* All map kind of cgroup file get serviced by this function */
  963. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  964. struct cgroup_map_cb *cb)
  965. {
  966. struct blkio_cgroup *blkcg;
  967. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  968. int name = BLKIOFILE_ATTR(cft->private);
  969. blkcg = cgroup_to_blkio_cgroup(cgrp);
  970. switch(plid) {
  971. case BLKIO_POLICY_PROP:
  972. switch(name) {
  973. case BLKIO_PROP_time:
  974. return blkio_read_blkg_stats(blkcg, cft, cb,
  975. BLKIO_STAT_TIME, 0);
  976. case BLKIO_PROP_sectors:
  977. return blkio_read_blkg_stats(blkcg, cft, cb,
  978. BLKIO_STAT_SECTORS, 0);
  979. case BLKIO_PROP_io_service_bytes:
  980. return blkio_read_blkg_stats(blkcg, cft, cb,
  981. BLKIO_STAT_SERVICE_BYTES, 1);
  982. case BLKIO_PROP_io_serviced:
  983. return blkio_read_blkg_stats(blkcg, cft, cb,
  984. BLKIO_STAT_SERVICED, 1);
  985. case BLKIO_PROP_io_service_time:
  986. return blkio_read_blkg_stats(blkcg, cft, cb,
  987. BLKIO_STAT_SERVICE_TIME, 1);
  988. case BLKIO_PROP_io_wait_time:
  989. return blkio_read_blkg_stats(blkcg, cft, cb,
  990. BLKIO_STAT_WAIT_TIME, 1);
  991. case BLKIO_PROP_io_merged:
  992. return blkio_read_blkg_stats(blkcg, cft, cb,
  993. BLKIO_STAT_MERGED, 1);
  994. case BLKIO_PROP_io_queued:
  995. return blkio_read_blkg_stats(blkcg, cft, cb,
  996. BLKIO_STAT_QUEUED, 1);
  997. #ifdef CONFIG_DEBUG_BLK_CGROUP
  998. case BLKIO_PROP_unaccounted_time:
  999. return blkio_read_blkg_stats(blkcg, cft, cb,
  1000. BLKIO_STAT_UNACCOUNTED_TIME, 0);
  1001. case BLKIO_PROP_dequeue:
  1002. return blkio_read_blkg_stats(blkcg, cft, cb,
  1003. BLKIO_STAT_DEQUEUE, 0);
  1004. case BLKIO_PROP_avg_queue_size:
  1005. return blkio_read_blkg_stats(blkcg, cft, cb,
  1006. BLKIO_STAT_AVG_QUEUE_SIZE, 0);
  1007. case BLKIO_PROP_group_wait_time:
  1008. return blkio_read_blkg_stats(blkcg, cft, cb,
  1009. BLKIO_STAT_GROUP_WAIT_TIME, 0);
  1010. case BLKIO_PROP_idle_time:
  1011. return blkio_read_blkg_stats(blkcg, cft, cb,
  1012. BLKIO_STAT_IDLE_TIME, 0);
  1013. case BLKIO_PROP_empty_time:
  1014. return blkio_read_blkg_stats(blkcg, cft, cb,
  1015. BLKIO_STAT_EMPTY_TIME, 0);
  1016. #endif
  1017. default:
  1018. BUG();
  1019. }
  1020. break;
  1021. case BLKIO_POLICY_THROTL:
  1022. switch(name){
  1023. case BLKIO_THROTL_io_service_bytes:
  1024. return blkio_read_blkg_stats(blkcg, cft, cb,
  1025. BLKIO_STAT_SERVICE_BYTES, 1);
  1026. case BLKIO_THROTL_io_serviced:
  1027. return blkio_read_blkg_stats(blkcg, cft, cb,
  1028. BLKIO_STAT_SERVICED, 1);
  1029. default:
  1030. BUG();
  1031. }
  1032. break;
  1033. default:
  1034. BUG();
  1035. }
  1036. return 0;
  1037. }
  1038. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  1039. {
  1040. struct blkio_group *blkg;
  1041. struct hlist_node *n;
  1042. struct blkio_policy_node *pn;
  1043. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1044. return -EINVAL;
  1045. spin_lock(&blkio_list_lock);
  1046. spin_lock_irq(&blkcg->lock);
  1047. blkcg->weight = (unsigned int)val;
  1048. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1049. pn = blkio_policy_search_node(blkcg, blkg->dev,
  1050. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  1051. if (pn)
  1052. continue;
  1053. blkio_update_group_weight(blkg, blkcg->weight);
  1054. }
  1055. spin_unlock_irq(&blkcg->lock);
  1056. spin_unlock(&blkio_list_lock);
  1057. return 0;
  1058. }
  1059. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1060. struct blkio_cgroup *blkcg;
  1061. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1062. int name = BLKIOFILE_ATTR(cft->private);
  1063. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1064. switch(plid) {
  1065. case BLKIO_POLICY_PROP:
  1066. switch(name) {
  1067. case BLKIO_PROP_weight:
  1068. return (u64)blkcg->weight;
  1069. }
  1070. break;
  1071. default:
  1072. BUG();
  1073. }
  1074. return 0;
  1075. }
  1076. static int
  1077. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1078. {
  1079. struct blkio_cgroup *blkcg;
  1080. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1081. int name = BLKIOFILE_ATTR(cft->private);
  1082. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1083. switch(plid) {
  1084. case BLKIO_POLICY_PROP:
  1085. switch(name) {
  1086. case BLKIO_PROP_weight:
  1087. return blkio_weight_write(blkcg, val);
  1088. }
  1089. break;
  1090. default:
  1091. BUG();
  1092. }
  1093. return 0;
  1094. }
  1095. struct cftype blkio_files[] = {
  1096. {
  1097. .name = "weight_device",
  1098. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1099. BLKIO_PROP_weight_device),
  1100. .read_seq_string = blkiocg_file_read,
  1101. .write_string = blkiocg_file_write,
  1102. .max_write_len = 256,
  1103. },
  1104. {
  1105. .name = "weight",
  1106. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1107. BLKIO_PROP_weight),
  1108. .read_u64 = blkiocg_file_read_u64,
  1109. .write_u64 = blkiocg_file_write_u64,
  1110. },
  1111. {
  1112. .name = "time",
  1113. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1114. BLKIO_PROP_time),
  1115. .read_map = blkiocg_file_read_map,
  1116. },
  1117. {
  1118. .name = "sectors",
  1119. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1120. BLKIO_PROP_sectors),
  1121. .read_map = blkiocg_file_read_map,
  1122. },
  1123. {
  1124. .name = "io_service_bytes",
  1125. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1126. BLKIO_PROP_io_service_bytes),
  1127. .read_map = blkiocg_file_read_map,
  1128. },
  1129. {
  1130. .name = "io_serviced",
  1131. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1132. BLKIO_PROP_io_serviced),
  1133. .read_map = blkiocg_file_read_map,
  1134. },
  1135. {
  1136. .name = "io_service_time",
  1137. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1138. BLKIO_PROP_io_service_time),
  1139. .read_map = blkiocg_file_read_map,
  1140. },
  1141. {
  1142. .name = "io_wait_time",
  1143. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1144. BLKIO_PROP_io_wait_time),
  1145. .read_map = blkiocg_file_read_map,
  1146. },
  1147. {
  1148. .name = "io_merged",
  1149. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1150. BLKIO_PROP_io_merged),
  1151. .read_map = blkiocg_file_read_map,
  1152. },
  1153. {
  1154. .name = "io_queued",
  1155. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1156. BLKIO_PROP_io_queued),
  1157. .read_map = blkiocg_file_read_map,
  1158. },
  1159. {
  1160. .name = "reset_stats",
  1161. .write_u64 = blkiocg_reset_stats,
  1162. },
  1163. #ifdef CONFIG_BLK_DEV_THROTTLING
  1164. {
  1165. .name = "throttle.read_bps_device",
  1166. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1167. BLKIO_THROTL_read_bps_device),
  1168. .read_seq_string = blkiocg_file_read,
  1169. .write_string = blkiocg_file_write,
  1170. .max_write_len = 256,
  1171. },
  1172. {
  1173. .name = "throttle.write_bps_device",
  1174. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1175. BLKIO_THROTL_write_bps_device),
  1176. .read_seq_string = blkiocg_file_read,
  1177. .write_string = blkiocg_file_write,
  1178. .max_write_len = 256,
  1179. },
  1180. {
  1181. .name = "throttle.read_iops_device",
  1182. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1183. BLKIO_THROTL_read_iops_device),
  1184. .read_seq_string = blkiocg_file_read,
  1185. .write_string = blkiocg_file_write,
  1186. .max_write_len = 256,
  1187. },
  1188. {
  1189. .name = "throttle.write_iops_device",
  1190. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1191. BLKIO_THROTL_write_iops_device),
  1192. .read_seq_string = blkiocg_file_read,
  1193. .write_string = blkiocg_file_write,
  1194. .max_write_len = 256,
  1195. },
  1196. {
  1197. .name = "throttle.io_service_bytes",
  1198. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1199. BLKIO_THROTL_io_service_bytes),
  1200. .read_map = blkiocg_file_read_map,
  1201. },
  1202. {
  1203. .name = "throttle.io_serviced",
  1204. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1205. BLKIO_THROTL_io_serviced),
  1206. .read_map = blkiocg_file_read_map,
  1207. },
  1208. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1209. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1210. {
  1211. .name = "avg_queue_size",
  1212. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1213. BLKIO_PROP_avg_queue_size),
  1214. .read_map = blkiocg_file_read_map,
  1215. },
  1216. {
  1217. .name = "group_wait_time",
  1218. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1219. BLKIO_PROP_group_wait_time),
  1220. .read_map = blkiocg_file_read_map,
  1221. },
  1222. {
  1223. .name = "idle_time",
  1224. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1225. BLKIO_PROP_idle_time),
  1226. .read_map = blkiocg_file_read_map,
  1227. },
  1228. {
  1229. .name = "empty_time",
  1230. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1231. BLKIO_PROP_empty_time),
  1232. .read_map = blkiocg_file_read_map,
  1233. },
  1234. {
  1235. .name = "dequeue",
  1236. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1237. BLKIO_PROP_dequeue),
  1238. .read_map = blkiocg_file_read_map,
  1239. },
  1240. {
  1241. .name = "unaccounted_time",
  1242. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1243. BLKIO_PROP_unaccounted_time),
  1244. .read_map = blkiocg_file_read_map,
  1245. },
  1246. #endif
  1247. };
  1248. static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1249. {
  1250. return cgroup_add_files(cgroup, subsys, blkio_files,
  1251. ARRAY_SIZE(blkio_files));
  1252. }
  1253. static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1254. {
  1255. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1256. unsigned long flags;
  1257. struct blkio_group *blkg;
  1258. void *key;
  1259. struct blkio_policy_type *blkiop;
  1260. struct blkio_policy_node *pn, *pntmp;
  1261. rcu_read_lock();
  1262. do {
  1263. spin_lock_irqsave(&blkcg->lock, flags);
  1264. if (hlist_empty(&blkcg->blkg_list)) {
  1265. spin_unlock_irqrestore(&blkcg->lock, flags);
  1266. break;
  1267. }
  1268. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1269. blkcg_node);
  1270. key = rcu_dereference(blkg->key);
  1271. __blkiocg_del_blkio_group(blkg);
  1272. spin_unlock_irqrestore(&blkcg->lock, flags);
  1273. /*
  1274. * This blkio_group is being unlinked as associated cgroup is
  1275. * going away. Let all the IO controlling policies know about
  1276. * this event.
  1277. */
  1278. spin_lock(&blkio_list_lock);
  1279. list_for_each_entry(blkiop, &blkio_list, list) {
  1280. if (blkiop->plid != blkg->plid)
  1281. continue;
  1282. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1283. }
  1284. spin_unlock(&blkio_list_lock);
  1285. } while (1);
  1286. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1287. blkio_policy_delete_node(pn);
  1288. kfree(pn);
  1289. }
  1290. free_css_id(&blkio_subsys, &blkcg->css);
  1291. rcu_read_unlock();
  1292. if (blkcg != &blkio_root_cgroup)
  1293. kfree(blkcg);
  1294. }
  1295. static struct cgroup_subsys_state *
  1296. blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  1297. {
  1298. struct blkio_cgroup *blkcg;
  1299. struct cgroup *parent = cgroup->parent;
  1300. if (!parent) {
  1301. blkcg = &blkio_root_cgroup;
  1302. goto done;
  1303. }
  1304. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1305. if (!blkcg)
  1306. return ERR_PTR(-ENOMEM);
  1307. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1308. done:
  1309. spin_lock_init(&blkcg->lock);
  1310. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1311. INIT_LIST_HEAD(&blkcg->policy_list);
  1312. return &blkcg->css;
  1313. }
  1314. /*
  1315. * We cannot support shared io contexts, as we have no mean to support
  1316. * two tasks with the same ioc in two different groups without major rework
  1317. * of the main cic data structures. For now we allow a task to change
  1318. * its cgroup only if it's the only owner of its ioc.
  1319. */
  1320. static int blkiocg_can_attach(struct cgroup_subsys *subsys,
  1321. struct cgroup *cgroup, struct task_struct *tsk,
  1322. bool threadgroup)
  1323. {
  1324. struct io_context *ioc;
  1325. int ret = 0;
  1326. /* task_lock() is needed to avoid races with exit_io_context() */
  1327. task_lock(tsk);
  1328. ioc = tsk->io_context;
  1329. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1330. ret = -EINVAL;
  1331. task_unlock(tsk);
  1332. return ret;
  1333. }
  1334. static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  1335. struct cgroup *prev, struct task_struct *tsk,
  1336. bool threadgroup)
  1337. {
  1338. struct io_context *ioc;
  1339. task_lock(tsk);
  1340. ioc = tsk->io_context;
  1341. if (ioc)
  1342. ioc->cgroup_changed = 1;
  1343. task_unlock(tsk);
  1344. }
  1345. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1346. {
  1347. spin_lock(&blkio_list_lock);
  1348. list_add_tail(&blkiop->list, &blkio_list);
  1349. spin_unlock(&blkio_list_lock);
  1350. }
  1351. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1352. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1353. {
  1354. spin_lock(&blkio_list_lock);
  1355. list_del_init(&blkiop->list);
  1356. spin_unlock(&blkio_list_lock);
  1357. }
  1358. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  1359. static int __init init_cgroup_blkio(void)
  1360. {
  1361. return cgroup_load_subsys(&blkio_subsys);
  1362. }
  1363. static void __exit exit_cgroup_blkio(void)
  1364. {
  1365. cgroup_unload_subsys(&blkio_subsys);
  1366. }
  1367. module_init(init_cgroup_blkio);
  1368. module_exit(exit_cgroup_blkio);
  1369. MODULE_LICENSE("GPL");