sched_fair.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. *
  6. * Interactivity improvements by Mike Galbraith
  7. * (C) 2007 Mike Galbraith <efault@gmx.de>
  8. *
  9. * Various enhancements by Dmitry Adamushko.
  10. * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  11. *
  12. * Group scheduling enhancements by Srivatsa Vaddagiri
  13. * Copyright IBM Corporation, 2007
  14. * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  15. *
  16. * Scaled math optimizations by Thomas Gleixner
  17. * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  18. *
  19. * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  20. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  21. */
  22. /*
  23. * Targeted preemption latency for CPU-bound tasks:
  24. * (default: 20ms, units: nanoseconds)
  25. *
  26. * NOTE: this latency value is not the same as the concept of
  27. * 'timeslice length' - timeslices in CFS are of variable length.
  28. * (to see the precise effective timeslice length of your workload,
  29. * run vmstat and monitor the context-switches field)
  30. *
  31. * On SMP systems the value of this is multiplied by the log2 of the
  32. * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
  33. * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
  34. * Targeted preemption latency for CPU-bound tasks:
  35. */
  36. const_debug unsigned int sysctl_sched_latency = 20000000ULL;
  37. /*
  38. * After fork, child runs first. (default) If set to 0 then
  39. * parent will (try to) run first.
  40. */
  41. const_debug unsigned int sysctl_sched_child_runs_first = 1;
  42. /*
  43. * Minimal preemption granularity for CPU-bound tasks:
  44. * (default: 2 msec, units: nanoseconds)
  45. */
  46. const_debug unsigned int sysctl_sched_nr_latency = 20;
  47. /*
  48. * sys_sched_yield() compat mode
  49. *
  50. * This option switches the agressive yield implementation of the
  51. * old scheduler back on.
  52. */
  53. unsigned int __read_mostly sysctl_sched_compat_yield;
  54. /*
  55. * SCHED_BATCH wake-up granularity.
  56. * (default: 25 msec, units: nanoseconds)
  57. *
  58. * This option delays the preemption effects of decoupled workloads
  59. * and reduces their over-scheduling. Synchronous workloads will still
  60. * have immediate wakeup/sleep latencies.
  61. */
  62. const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
  63. /*
  64. * SCHED_OTHER wake-up granularity.
  65. * (default: 1 msec, units: nanoseconds)
  66. *
  67. * This option delays the preemption effects of decoupled workloads
  68. * and reduces their over-scheduling. Synchronous workloads will still
  69. * have immediate wakeup/sleep latencies.
  70. */
  71. const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
  72. /**************************************************************
  73. * CFS operations on generic schedulable entities:
  74. */
  75. #ifdef CONFIG_FAIR_GROUP_SCHED
  76. /* cpu runqueue to which this cfs_rq is attached */
  77. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  78. {
  79. return cfs_rq->rq;
  80. }
  81. /* An entity is a task if it doesn't "own" a runqueue */
  82. #define entity_is_task(se) (!se->my_q)
  83. #else /* CONFIG_FAIR_GROUP_SCHED */
  84. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  85. {
  86. return container_of(cfs_rq, struct rq, cfs);
  87. }
  88. #define entity_is_task(se) 1
  89. #endif /* CONFIG_FAIR_GROUP_SCHED */
  90. static inline struct task_struct *task_of(struct sched_entity *se)
  91. {
  92. return container_of(se, struct task_struct, se);
  93. }
  94. /**************************************************************
  95. * Scheduling class tree data structure manipulation methods:
  96. */
  97. static inline u64
  98. max_vruntime(u64 min_vruntime, u64 vruntime)
  99. {
  100. s64 delta = (s64)(vruntime - min_vruntime);
  101. if (delta > 0)
  102. min_vruntime = vruntime;
  103. return min_vruntime;
  104. }
  105. static inline u64
  106. min_vruntime(u64 min_vruntime, u64 vruntime)
  107. {
  108. s64 delta = (s64)(vruntime - min_vruntime);
  109. if (delta < 0)
  110. min_vruntime = vruntime;
  111. return min_vruntime;
  112. }
  113. static inline s64
  114. entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
  115. {
  116. return se->vruntime - cfs_rq->min_vruntime;
  117. }
  118. /*
  119. * Enqueue an entity into the rb-tree:
  120. */
  121. static void
  122. __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  123. {
  124. struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
  125. struct rb_node *parent = NULL;
  126. struct sched_entity *entry;
  127. s64 key = entity_key(cfs_rq, se);
  128. int leftmost = 1;
  129. /*
  130. * Find the right place in the rbtree:
  131. */
  132. while (*link) {
  133. parent = *link;
  134. entry = rb_entry(parent, struct sched_entity, run_node);
  135. /*
  136. * We dont care about collisions. Nodes with
  137. * the same key stay together.
  138. */
  139. if (key < entity_key(cfs_rq, entry)) {
  140. link = &parent->rb_left;
  141. } else {
  142. link = &parent->rb_right;
  143. leftmost = 0;
  144. }
  145. }
  146. /*
  147. * Maintain a cache of leftmost tree entries (it is frequently
  148. * used):
  149. */
  150. if (leftmost)
  151. cfs_rq->rb_leftmost = &se->run_node;
  152. rb_link_node(&se->run_node, parent, link);
  153. rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
  154. }
  155. static void
  156. __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  157. {
  158. if (cfs_rq->rb_leftmost == &se->run_node)
  159. cfs_rq->rb_leftmost = rb_next(&se->run_node);
  160. rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
  161. }
  162. static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
  163. {
  164. return cfs_rq->rb_leftmost;
  165. }
  166. static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
  167. {
  168. return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
  169. }
  170. static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  171. {
  172. struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
  173. struct sched_entity *se = NULL;
  174. struct rb_node *parent;
  175. while (*link) {
  176. parent = *link;
  177. se = rb_entry(parent, struct sched_entity, run_node);
  178. link = &parent->rb_right;
  179. }
  180. return se;
  181. }
  182. /**************************************************************
  183. * Scheduling class statistics methods:
  184. */
  185. /*
  186. * The idea is to set a period in which each task runs once.
  187. *
  188. * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
  189. * this period because otherwise the slices get too small.
  190. *
  191. * p = (nr <= nl) ? l : l*nr/nl
  192. */
  193. static u64 __sched_period(unsigned long nr_running)
  194. {
  195. u64 period = sysctl_sched_latency;
  196. unsigned long nr_latency = sysctl_sched_nr_latency;
  197. if (unlikely(nr_running > nr_latency)) {
  198. period *= nr_running;
  199. do_div(period, nr_latency);
  200. }
  201. return period;
  202. }
  203. /*
  204. * We calculate the wall-time slice from the period by taking a part
  205. * proportional to the weight.
  206. *
  207. * s = p*w/rw
  208. */
  209. static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  210. {
  211. u64 slice = __sched_period(cfs_rq->nr_running);
  212. slice *= se->load.weight;
  213. do_div(slice, cfs_rq->load.weight);
  214. return slice;
  215. }
  216. /*
  217. * We calculate the vruntime slice.
  218. *
  219. * vs = s/w = p/rw
  220. */
  221. static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
  222. {
  223. u64 vslice = __sched_period(nr_running);
  224. do_div(vslice, rq_weight);
  225. return vslice;
  226. }
  227. static u64 sched_vslice(struct cfs_rq *cfs_rq)
  228. {
  229. return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
  230. }
  231. static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
  232. {
  233. return __sched_vslice(cfs_rq->load.weight + se->load.weight,
  234. cfs_rq->nr_running + 1);
  235. }
  236. /*
  237. * Update the current task's runtime statistics. Skip current tasks that
  238. * are not in our scheduling class.
  239. */
  240. static inline void
  241. __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
  242. unsigned long delta_exec)
  243. {
  244. unsigned long delta_exec_weighted;
  245. u64 vruntime;
  246. schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
  247. curr->sum_exec_runtime += delta_exec;
  248. schedstat_add(cfs_rq, exec_clock, delta_exec);
  249. delta_exec_weighted = delta_exec;
  250. if (unlikely(curr->load.weight != NICE_0_LOAD)) {
  251. delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
  252. &curr->load);
  253. }
  254. curr->vruntime += delta_exec_weighted;
  255. /*
  256. * maintain cfs_rq->min_vruntime to be a monotonic increasing
  257. * value tracking the leftmost vruntime in the tree.
  258. */
  259. if (first_fair(cfs_rq)) {
  260. vruntime = min_vruntime(curr->vruntime,
  261. __pick_next_entity(cfs_rq)->vruntime);
  262. } else
  263. vruntime = curr->vruntime;
  264. cfs_rq->min_vruntime =
  265. max_vruntime(cfs_rq->min_vruntime, vruntime);
  266. }
  267. static void update_curr(struct cfs_rq *cfs_rq)
  268. {
  269. struct sched_entity *curr = cfs_rq->curr;
  270. u64 now = rq_of(cfs_rq)->clock;
  271. unsigned long delta_exec;
  272. if (unlikely(!curr))
  273. return;
  274. /*
  275. * Get the amount of time the current task was running
  276. * since the last time we changed load (this cannot
  277. * overflow on 32 bits):
  278. */
  279. delta_exec = (unsigned long)(now - curr->exec_start);
  280. __update_curr(cfs_rq, curr, delta_exec);
  281. curr->exec_start = now;
  282. }
  283. static inline void
  284. update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  285. {
  286. schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
  287. }
  288. static inline unsigned long
  289. calc_weighted(unsigned long delta, struct sched_entity *se)
  290. {
  291. unsigned long weight = se->load.weight;
  292. if (unlikely(weight != NICE_0_LOAD))
  293. return (u64)delta * se->load.weight >> NICE_0_SHIFT;
  294. else
  295. return delta;
  296. }
  297. /*
  298. * Task is being enqueued - update stats:
  299. */
  300. static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  301. {
  302. /*
  303. * Are we enqueueing a waiting task? (for current tasks
  304. * a dequeue/enqueue event is a NOP)
  305. */
  306. if (se != cfs_rq->curr)
  307. update_stats_wait_start(cfs_rq, se);
  308. }
  309. static void
  310. update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
  311. {
  312. schedstat_set(se->wait_max, max(se->wait_max,
  313. rq_of(cfs_rq)->clock - se->wait_start));
  314. schedstat_set(se->wait_start, 0);
  315. }
  316. static inline void
  317. update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  318. {
  319. update_curr(cfs_rq);
  320. /*
  321. * Mark the end of the wait period if dequeueing a
  322. * waiting task:
  323. */
  324. if (se != cfs_rq->curr)
  325. update_stats_wait_end(cfs_rq, se);
  326. }
  327. /*
  328. * We are picking a new current task - update its stats:
  329. */
  330. static inline void
  331. update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  332. {
  333. /*
  334. * We are starting a new run period:
  335. */
  336. se->exec_start = rq_of(cfs_rq)->clock;
  337. }
  338. /*
  339. * We are descheduling a task - update its stats:
  340. */
  341. static inline void
  342. update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
  343. {
  344. se->exec_start = 0;
  345. }
  346. /**************************************************
  347. * Scheduling class queueing methods:
  348. */
  349. static void
  350. account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  351. {
  352. update_load_add(&cfs_rq->load, se->load.weight);
  353. cfs_rq->nr_running++;
  354. se->on_rq = 1;
  355. }
  356. static void
  357. account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  358. {
  359. update_load_sub(&cfs_rq->load, se->load.weight);
  360. cfs_rq->nr_running--;
  361. se->on_rq = 0;
  362. }
  363. static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
  364. {
  365. #ifdef CONFIG_SCHEDSTATS
  366. if (se->sleep_start) {
  367. u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
  368. if ((s64)delta < 0)
  369. delta = 0;
  370. if (unlikely(delta > se->sleep_max))
  371. se->sleep_max = delta;
  372. se->sleep_start = 0;
  373. se->sum_sleep_runtime += delta;
  374. }
  375. if (se->block_start) {
  376. u64 delta = rq_of(cfs_rq)->clock - se->block_start;
  377. if ((s64)delta < 0)
  378. delta = 0;
  379. if (unlikely(delta > se->block_max))
  380. se->block_max = delta;
  381. se->block_start = 0;
  382. se->sum_sleep_runtime += delta;
  383. /*
  384. * Blocking time is in units of nanosecs, so shift by 20 to
  385. * get a milliseconds-range estimation of the amount of
  386. * time that the task spent sleeping:
  387. */
  388. if (unlikely(prof_on == SLEEP_PROFILING)) {
  389. struct task_struct *tsk = task_of(se);
  390. profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
  391. delta >> 20);
  392. }
  393. }
  394. #endif
  395. }
  396. static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
  397. {
  398. #ifdef CONFIG_SCHED_DEBUG
  399. s64 d = se->vruntime - cfs_rq->min_vruntime;
  400. if (d < 0)
  401. d = -d;
  402. if (d > 3*sysctl_sched_latency)
  403. schedstat_inc(cfs_rq, nr_spread_over);
  404. #endif
  405. }
  406. static void
  407. place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
  408. {
  409. u64 vruntime;
  410. vruntime = cfs_rq->min_vruntime;
  411. if (sched_feat(USE_TREE_AVG)) {
  412. struct sched_entity *last = __pick_last_entity(cfs_rq);
  413. if (last) {
  414. vruntime += last->vruntime;
  415. vruntime >>= 1;
  416. }
  417. } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
  418. vruntime += sched_vslice(cfs_rq)/2;
  419. if (initial && sched_feat(START_DEBIT))
  420. vruntime += sched_vslice_add(cfs_rq, se);
  421. if (!initial) {
  422. if (sched_feat(NEW_FAIR_SLEEPERS))
  423. vruntime -= sysctl_sched_latency;
  424. vruntime = max_t(s64, vruntime, se->vruntime);
  425. }
  426. se->vruntime = vruntime;
  427. }
  428. static void
  429. enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
  430. {
  431. /*
  432. * Update the fair clock.
  433. */
  434. update_curr(cfs_rq);
  435. if (wakeup) {
  436. place_entity(cfs_rq, se, 0);
  437. enqueue_sleeper(cfs_rq, se);
  438. }
  439. update_stats_enqueue(cfs_rq, se);
  440. check_spread(cfs_rq, se);
  441. if (se != cfs_rq->curr)
  442. __enqueue_entity(cfs_rq, se);
  443. account_entity_enqueue(cfs_rq, se);
  444. }
  445. static void
  446. dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
  447. {
  448. update_stats_dequeue(cfs_rq, se);
  449. if (sleep) {
  450. #ifdef CONFIG_SCHEDSTATS
  451. if (entity_is_task(se)) {
  452. struct task_struct *tsk = task_of(se);
  453. if (tsk->state & TASK_INTERRUPTIBLE)
  454. se->sleep_start = rq_of(cfs_rq)->clock;
  455. if (tsk->state & TASK_UNINTERRUPTIBLE)
  456. se->block_start = rq_of(cfs_rq)->clock;
  457. }
  458. #endif
  459. }
  460. if (se != cfs_rq->curr)
  461. __dequeue_entity(cfs_rq, se);
  462. account_entity_dequeue(cfs_rq, se);
  463. }
  464. /*
  465. * Preempt the current task with a newly woken task if needed:
  466. */
  467. static void
  468. check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  469. {
  470. unsigned long ideal_runtime, delta_exec;
  471. ideal_runtime = sched_slice(cfs_rq, curr);
  472. delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  473. if (delta_exec > ideal_runtime)
  474. resched_task(rq_of(cfs_rq)->curr);
  475. }
  476. static void
  477. set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  478. {
  479. /* 'current' is not kept within the tree. */
  480. if (se->on_rq) {
  481. /*
  482. * Any task has to be enqueued before it get to execute on
  483. * a CPU. So account for the time it spent waiting on the
  484. * runqueue.
  485. */
  486. update_stats_wait_end(cfs_rq, se);
  487. __dequeue_entity(cfs_rq, se);
  488. }
  489. update_stats_curr_start(cfs_rq, se);
  490. cfs_rq->curr = se;
  491. #ifdef CONFIG_SCHEDSTATS
  492. /*
  493. * Track our maximum slice length, if the CPU's load is at
  494. * least twice that of our own weight (i.e. dont track it
  495. * when there are only lesser-weight tasks around):
  496. */
  497. if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
  498. se->slice_max = max(se->slice_max,
  499. se->sum_exec_runtime - se->prev_sum_exec_runtime);
  500. }
  501. #endif
  502. se->prev_sum_exec_runtime = se->sum_exec_runtime;
  503. }
  504. static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
  505. {
  506. struct sched_entity *se = __pick_next_entity(cfs_rq);
  507. set_next_entity(cfs_rq, se);
  508. return se;
  509. }
  510. static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
  511. {
  512. /*
  513. * If still on the runqueue then deactivate_task()
  514. * was not called and update_curr() has to be done:
  515. */
  516. if (prev->on_rq)
  517. update_curr(cfs_rq);
  518. update_stats_curr_end(cfs_rq, prev);
  519. check_spread(cfs_rq, prev);
  520. if (prev->on_rq) {
  521. update_stats_wait_start(cfs_rq, prev);
  522. /* Put 'current' back into the tree. */
  523. __enqueue_entity(cfs_rq, prev);
  524. }
  525. cfs_rq->curr = NULL;
  526. }
  527. static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  528. {
  529. /*
  530. * Update run-time statistics of the 'current'.
  531. */
  532. update_curr(cfs_rq);
  533. if (cfs_rq->nr_running > 1)
  534. check_preempt_tick(cfs_rq, curr);
  535. }
  536. /**************************************************
  537. * CFS operations on tasks:
  538. */
  539. #ifdef CONFIG_FAIR_GROUP_SCHED
  540. /* Walk up scheduling entities hierarchy */
  541. #define for_each_sched_entity(se) \
  542. for (; se; se = se->parent)
  543. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  544. {
  545. return p->se.cfs_rq;
  546. }
  547. /* runqueue on which this entity is (to be) queued */
  548. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  549. {
  550. return se->cfs_rq;
  551. }
  552. /* runqueue "owned" by this group */
  553. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  554. {
  555. return grp->my_q;
  556. }
  557. /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
  558. * another cpu ('this_cpu')
  559. */
  560. static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
  561. {
  562. return cfs_rq->tg->cfs_rq[this_cpu];
  563. }
  564. /* Iterate thr' all leaf cfs_rq's on a runqueue */
  565. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  566. list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
  567. /* Do the two (enqueued) entities belong to the same group ? */
  568. static inline int
  569. is_same_group(struct sched_entity *se, struct sched_entity *pse)
  570. {
  571. if (se->cfs_rq == pse->cfs_rq)
  572. return 1;
  573. return 0;
  574. }
  575. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  576. {
  577. return se->parent;
  578. }
  579. #else /* CONFIG_FAIR_GROUP_SCHED */
  580. #define for_each_sched_entity(se) \
  581. for (; se; se = NULL)
  582. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  583. {
  584. return &task_rq(p)->cfs;
  585. }
  586. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  587. {
  588. struct task_struct *p = task_of(se);
  589. struct rq *rq = task_rq(p);
  590. return &rq->cfs;
  591. }
  592. /* runqueue "owned" by this group */
  593. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  594. {
  595. return NULL;
  596. }
  597. static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
  598. {
  599. return &cpu_rq(this_cpu)->cfs;
  600. }
  601. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  602. for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
  603. static inline int
  604. is_same_group(struct sched_entity *se, struct sched_entity *pse)
  605. {
  606. return 1;
  607. }
  608. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  609. {
  610. return NULL;
  611. }
  612. #endif /* CONFIG_FAIR_GROUP_SCHED */
  613. /*
  614. * The enqueue_task method is called before nr_running is
  615. * increased. Here we update the fair scheduling stats and
  616. * then put the task into the rbtree:
  617. */
  618. static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
  619. {
  620. struct cfs_rq *cfs_rq;
  621. struct sched_entity *se = &p->se;
  622. for_each_sched_entity(se) {
  623. if (se->on_rq)
  624. break;
  625. cfs_rq = cfs_rq_of(se);
  626. enqueue_entity(cfs_rq, se, wakeup);
  627. wakeup = 1;
  628. }
  629. }
  630. /*
  631. * The dequeue_task method is called before nr_running is
  632. * decreased. We remove the task from the rbtree and
  633. * update the fair scheduling stats:
  634. */
  635. static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
  636. {
  637. struct cfs_rq *cfs_rq;
  638. struct sched_entity *se = &p->se;
  639. for_each_sched_entity(se) {
  640. cfs_rq = cfs_rq_of(se);
  641. dequeue_entity(cfs_rq, se, sleep);
  642. /* Don't dequeue parent if it has other entities besides us */
  643. if (cfs_rq->load.weight)
  644. break;
  645. sleep = 1;
  646. }
  647. }
  648. /*
  649. * sched_yield() support is very simple - we dequeue and enqueue.
  650. *
  651. * If compat_yield is turned on then we requeue to the end of the tree.
  652. */
  653. static void yield_task_fair(struct rq *rq)
  654. {
  655. struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
  656. struct sched_entity *rightmost, *se = &rq->curr->se;
  657. /*
  658. * Are we the only task in the tree?
  659. */
  660. if (unlikely(cfs_rq->nr_running == 1))
  661. return;
  662. if (likely(!sysctl_sched_compat_yield)) {
  663. __update_rq_clock(rq);
  664. /*
  665. * Dequeue and enqueue the task to update its
  666. * position within the tree:
  667. */
  668. update_curr(cfs_rq);
  669. return;
  670. }
  671. /*
  672. * Find the rightmost entry in the rbtree:
  673. */
  674. rightmost = __pick_last_entity(cfs_rq);
  675. /*
  676. * Already in the rightmost position?
  677. */
  678. if (unlikely(rightmost->vruntime < se->vruntime))
  679. return;
  680. /*
  681. * Minimally necessary key value to be last in the tree:
  682. * Upon rescheduling, sched_class::put_prev_task() will place
  683. * 'current' within the tree based on its new key value.
  684. */
  685. se->vruntime = rightmost->vruntime + 1;
  686. }
  687. /*
  688. * Preempt the current task with a newly woken task if needed:
  689. */
  690. static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
  691. {
  692. struct task_struct *curr = rq->curr;
  693. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  694. struct sched_entity *se = &curr->se, *pse = &p->se;
  695. s64 delta;
  696. if (unlikely(rt_prio(p->prio))) {
  697. update_rq_clock(rq);
  698. update_curr(cfs_rq);
  699. resched_task(curr);
  700. return;
  701. }
  702. while (!is_same_group(se, pse)) {
  703. se = parent_entity(se);
  704. pse = parent_entity(pse);
  705. }
  706. delta = se->vruntime - pse->vruntime;
  707. if (delta > (s64)sysctl_sched_wakeup_granularity)
  708. resched_task(curr);
  709. }
  710. static struct task_struct *pick_next_task_fair(struct rq *rq)
  711. {
  712. struct cfs_rq *cfs_rq = &rq->cfs;
  713. struct sched_entity *se;
  714. if (unlikely(!cfs_rq->nr_running))
  715. return NULL;
  716. do {
  717. se = pick_next_entity(cfs_rq);
  718. cfs_rq = group_cfs_rq(se);
  719. } while (cfs_rq);
  720. return task_of(se);
  721. }
  722. /*
  723. * Account for a descheduled task:
  724. */
  725. static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
  726. {
  727. struct sched_entity *se = &prev->se;
  728. struct cfs_rq *cfs_rq;
  729. for_each_sched_entity(se) {
  730. cfs_rq = cfs_rq_of(se);
  731. put_prev_entity(cfs_rq, se);
  732. }
  733. }
  734. /**************************************************
  735. * Fair scheduling class load-balancing methods:
  736. */
  737. /*
  738. * Load-balancing iterator. Note: while the runqueue stays locked
  739. * during the whole iteration, the current task might be
  740. * dequeued so the iterator has to be dequeue-safe. Here we
  741. * achieve that by always pre-iterating before returning
  742. * the current task:
  743. */
  744. static inline struct task_struct *
  745. __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
  746. {
  747. struct task_struct *p;
  748. if (!curr)
  749. return NULL;
  750. p = rb_entry(curr, struct task_struct, se.run_node);
  751. cfs_rq->rb_load_balance_curr = rb_next(curr);
  752. return p;
  753. }
  754. static struct task_struct *load_balance_start_fair(void *arg)
  755. {
  756. struct cfs_rq *cfs_rq = arg;
  757. return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
  758. }
  759. static struct task_struct *load_balance_next_fair(void *arg)
  760. {
  761. struct cfs_rq *cfs_rq = arg;
  762. return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
  763. }
  764. #ifdef CONFIG_FAIR_GROUP_SCHED
  765. static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
  766. {
  767. struct sched_entity *curr;
  768. struct task_struct *p;
  769. if (!cfs_rq->nr_running)
  770. return MAX_PRIO;
  771. curr = cfs_rq->curr;
  772. if (!curr)
  773. curr = __pick_next_entity(cfs_rq);
  774. p = task_of(curr);
  775. return p->prio;
  776. }
  777. #endif
  778. static unsigned long
  779. load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  780. unsigned long max_nr_move, unsigned long max_load_move,
  781. struct sched_domain *sd, enum cpu_idle_type idle,
  782. int *all_pinned, int *this_best_prio)
  783. {
  784. struct cfs_rq *busy_cfs_rq;
  785. unsigned long load_moved, total_nr_moved = 0, nr_moved;
  786. long rem_load_move = max_load_move;
  787. struct rq_iterator cfs_rq_iterator;
  788. cfs_rq_iterator.start = load_balance_start_fair;
  789. cfs_rq_iterator.next = load_balance_next_fair;
  790. for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
  791. #ifdef CONFIG_FAIR_GROUP_SCHED
  792. struct cfs_rq *this_cfs_rq;
  793. long imbalance;
  794. unsigned long maxload;
  795. this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
  796. imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
  797. /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
  798. if (imbalance <= 0)
  799. continue;
  800. /* Don't pull more than imbalance/2 */
  801. imbalance /= 2;
  802. maxload = min(rem_load_move, imbalance);
  803. *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
  804. #else
  805. # define maxload rem_load_move
  806. #endif
  807. /* pass busy_cfs_rq argument into
  808. * load_balance_[start|next]_fair iterators
  809. */
  810. cfs_rq_iterator.arg = busy_cfs_rq;
  811. nr_moved = balance_tasks(this_rq, this_cpu, busiest,
  812. max_nr_move, maxload, sd, idle, all_pinned,
  813. &load_moved, this_best_prio, &cfs_rq_iterator);
  814. total_nr_moved += nr_moved;
  815. max_nr_move -= nr_moved;
  816. rem_load_move -= load_moved;
  817. if (max_nr_move <= 0 || rem_load_move <= 0)
  818. break;
  819. }
  820. return max_load_move - rem_load_move;
  821. }
  822. /*
  823. * scheduler tick hitting a task of our scheduling class:
  824. */
  825. static void task_tick_fair(struct rq *rq, struct task_struct *curr)
  826. {
  827. struct cfs_rq *cfs_rq;
  828. struct sched_entity *se = &curr->se;
  829. for_each_sched_entity(se) {
  830. cfs_rq = cfs_rq_of(se);
  831. entity_tick(cfs_rq, se);
  832. }
  833. }
  834. #define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
  835. /*
  836. * Share the fairness runtime between parent and child, thus the
  837. * total amount of pressure for CPU stays equal - new tasks
  838. * get a chance to run but frequent forkers are not allowed to
  839. * monopolize the CPU. Note: the parent runqueue is locked,
  840. * the child is not running yet.
  841. */
  842. static void task_new_fair(struct rq *rq, struct task_struct *p)
  843. {
  844. struct cfs_rq *cfs_rq = task_cfs_rq(p);
  845. struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
  846. sched_info_queued(p);
  847. update_curr(cfs_rq);
  848. place_entity(cfs_rq, se, 1);
  849. if (sysctl_sched_child_runs_first &&
  850. curr->vruntime < se->vruntime) {
  851. /*
  852. * Upon rescheduling, sched_class::put_prev_task() will place
  853. * 'current' within the tree based on its new key value.
  854. */
  855. swap(curr->vruntime, se->vruntime);
  856. }
  857. update_stats_enqueue(cfs_rq, se);
  858. check_spread(cfs_rq, se);
  859. check_spread(cfs_rq, curr);
  860. __enqueue_entity(cfs_rq, se);
  861. account_entity_enqueue(cfs_rq, se);
  862. resched_task(rq->curr);
  863. }
  864. /* Account for a task changing its policy or group.
  865. *
  866. * This routine is mostly called to set cfs_rq->curr field when a task
  867. * migrates between groups/classes.
  868. */
  869. static void set_curr_task_fair(struct rq *rq)
  870. {
  871. struct sched_entity *se = &rq->curr->se;
  872. for_each_sched_entity(se)
  873. set_next_entity(cfs_rq_of(se), se);
  874. }
  875. /*
  876. * All the scheduling class methods:
  877. */
  878. static const struct sched_class fair_sched_class = {
  879. .next = &idle_sched_class,
  880. .enqueue_task = enqueue_task_fair,
  881. .dequeue_task = dequeue_task_fair,
  882. .yield_task = yield_task_fair,
  883. .check_preempt_curr = check_preempt_wakeup,
  884. .pick_next_task = pick_next_task_fair,
  885. .put_prev_task = put_prev_task_fair,
  886. .load_balance = load_balance_fair,
  887. .set_curr_task = set_curr_task_fair,
  888. .task_tick = task_tick_fair,
  889. .task_new = task_new_fair,
  890. };
  891. #ifdef CONFIG_SCHED_DEBUG
  892. static void print_cfs_stats(struct seq_file *m, int cpu)
  893. {
  894. struct cfs_rq *cfs_rq;
  895. #ifdef CONFIG_FAIR_GROUP_SCHED
  896. print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
  897. #endif
  898. for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
  899. print_cfs_rq(m, cpu, cfs_rq);
  900. }
  901. #endif