sched_fair.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. /*
  2. * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. *
  6. * Interactivity improvements by Mike Galbraith
  7. * (C) 2007 Mike Galbraith <efault@gmx.de>
  8. *
  9. * Various enhancements by Dmitry Adamushko.
  10. * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  11. *
  12. * Group scheduling enhancements by Srivatsa Vaddagiri
  13. * Copyright IBM Corporation, 2007
  14. * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  15. *
  16. * Scaled math optimizations by Thomas Gleixner
  17. * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  18. */
  19. /*
  20. * Preemption granularity:
  21. * (default: 2 msec, units: nanoseconds)
  22. *
  23. * NOTE: this granularity value is not the same as the concept of
  24. * 'timeslice length' - timeslices in CFS will typically be somewhat
  25. * larger than this value. (to see the precise effective timeslice
  26. * length of your workload, run vmstat and monitor the context-switches
  27. * field)
  28. *
  29. * On SMP systems the value of this is multiplied by the log2 of the
  30. * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
  31. * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
  32. */
  33. unsigned int sysctl_sched_granularity __read_mostly = 2000000000ULL/HZ;
  34. /*
  35. * SCHED_BATCH wake-up granularity.
  36. * (default: 10 msec, units: nanoseconds)
  37. *
  38. * This option delays the preemption effects of decoupled workloads
  39. * and reduces their over-scheduling. Synchronous workloads will still
  40. * have immediate wakeup/sleep latencies.
  41. */
  42. unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly =
  43. 10000000000ULL/HZ;
  44. /*
  45. * SCHED_OTHER wake-up granularity.
  46. * (default: 1 msec, units: nanoseconds)
  47. *
  48. * This option delays the preemption effects of decoupled workloads
  49. * and reduces their over-scheduling. Synchronous workloads will still
  50. * have immediate wakeup/sleep latencies.
  51. */
  52. unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000000ULL/HZ;
  53. unsigned int sysctl_sched_stat_granularity __read_mostly;
  54. /*
  55. * Initialized in sched_init_granularity():
  56. */
  57. unsigned int sysctl_sched_runtime_limit __read_mostly;
  58. /*
  59. * Debugging: various feature bits
  60. */
  61. enum {
  62. SCHED_FEAT_FAIR_SLEEPERS = 1,
  63. SCHED_FEAT_SLEEPER_AVG = 2,
  64. SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
  65. SCHED_FEAT_PRECISE_CPU_LOAD = 8,
  66. SCHED_FEAT_START_DEBIT = 16,
  67. SCHED_FEAT_SKIP_INITIAL = 32,
  68. };
  69. unsigned int sysctl_sched_features __read_mostly =
  70. SCHED_FEAT_FAIR_SLEEPERS *1 |
  71. SCHED_FEAT_SLEEPER_AVG *1 |
  72. SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
  73. SCHED_FEAT_PRECISE_CPU_LOAD *1 |
  74. SCHED_FEAT_START_DEBIT *1 |
  75. SCHED_FEAT_SKIP_INITIAL *0;
  76. extern struct sched_class fair_sched_class;
  77. /**************************************************************
  78. * CFS operations on generic schedulable entities:
  79. */
  80. #ifdef CONFIG_FAIR_GROUP_SCHED
  81. /* cpu runqueue to which this cfs_rq is attached */
  82. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  83. {
  84. return cfs_rq->rq;
  85. }
  86. /* currently running entity (if any) on this cfs_rq */
  87. static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
  88. {
  89. return cfs_rq->curr;
  90. }
  91. /* An entity is a task if it doesn't "own" a runqueue */
  92. #define entity_is_task(se) (!se->my_q)
  93. static inline void
  94. set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
  95. {
  96. cfs_rq->curr = se;
  97. }
  98. #else /* CONFIG_FAIR_GROUP_SCHED */
  99. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  100. {
  101. return container_of(cfs_rq, struct rq, cfs);
  102. }
  103. static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
  104. {
  105. struct rq *rq = rq_of(cfs_rq);
  106. if (unlikely(rq->curr->sched_class != &fair_sched_class))
  107. return NULL;
  108. return &rq->curr->se;
  109. }
  110. #define entity_is_task(se) 1
  111. static inline void
  112. set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
  113. #endif /* CONFIG_FAIR_GROUP_SCHED */
  114. static inline struct task_struct *task_of(struct sched_entity *se)
  115. {
  116. return container_of(se, struct task_struct, se);
  117. }
  118. /**************************************************************
  119. * Scheduling class tree data structure manipulation methods:
  120. */
  121. /*
  122. * Enqueue an entity into the rb-tree:
  123. */
  124. static inline void
  125. __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  126. {
  127. struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
  128. struct rb_node *parent = NULL;
  129. struct sched_entity *entry;
  130. s64 key = se->fair_key;
  131. int leftmost = 1;
  132. /*
  133. * Find the right place in the rbtree:
  134. */
  135. while (*link) {
  136. parent = *link;
  137. entry = rb_entry(parent, struct sched_entity, run_node);
  138. /*
  139. * We dont care about collisions. Nodes with
  140. * the same key stay together.
  141. */
  142. if (key - entry->fair_key < 0) {
  143. link = &parent->rb_left;
  144. } else {
  145. link = &parent->rb_right;
  146. leftmost = 0;
  147. }
  148. }
  149. /*
  150. * Maintain a cache of leftmost tree entries (it is frequently
  151. * used):
  152. */
  153. if (leftmost)
  154. cfs_rq->rb_leftmost = &se->run_node;
  155. rb_link_node(&se->run_node, parent, link);
  156. rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
  157. update_load_add(&cfs_rq->load, se->load.weight);
  158. cfs_rq->nr_running++;
  159. se->on_rq = 1;
  160. }
  161. static inline void
  162. __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  163. {
  164. if (cfs_rq->rb_leftmost == &se->run_node)
  165. cfs_rq->rb_leftmost = rb_next(&se->run_node);
  166. rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
  167. update_load_sub(&cfs_rq->load, se->load.weight);
  168. cfs_rq->nr_running--;
  169. se->on_rq = 0;
  170. }
  171. static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
  172. {
  173. return cfs_rq->rb_leftmost;
  174. }
  175. static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
  176. {
  177. return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
  178. }
  179. /**************************************************************
  180. * Scheduling class statistics methods:
  181. */
  182. /*
  183. * We rescale the rescheduling granularity of tasks according to their
  184. * nice level, but only linearly, not exponentially:
  185. */
  186. static long
  187. niced_granularity(struct sched_entity *curr, unsigned long granularity)
  188. {
  189. u64 tmp;
  190. /*
  191. * Negative nice levels get the same granularity as nice-0:
  192. */
  193. if (likely(curr->load.weight >= NICE_0_LOAD))
  194. return granularity;
  195. /*
  196. * Positive nice level tasks get linearly finer
  197. * granularity:
  198. */
  199. tmp = curr->load.weight * (u64)granularity;
  200. /*
  201. * It will always fit into 'long':
  202. */
  203. return (long) (tmp >> NICE_0_SHIFT);
  204. }
  205. static inline void
  206. limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
  207. {
  208. long limit = sysctl_sched_runtime_limit;
  209. /*
  210. * Niced tasks have the same history dynamic range as
  211. * non-niced tasks:
  212. */
  213. if (unlikely(se->wait_runtime > limit)) {
  214. se->wait_runtime = limit;
  215. schedstat_inc(se, wait_runtime_overruns);
  216. schedstat_inc(cfs_rq, wait_runtime_overruns);
  217. }
  218. if (unlikely(se->wait_runtime < -limit)) {
  219. se->wait_runtime = -limit;
  220. schedstat_inc(se, wait_runtime_underruns);
  221. schedstat_inc(cfs_rq, wait_runtime_underruns);
  222. }
  223. }
  224. static inline void
  225. __add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
  226. {
  227. se->wait_runtime += delta;
  228. schedstat_add(se, sum_wait_runtime, delta);
  229. limit_wait_runtime(cfs_rq, se);
  230. }
  231. static void
  232. add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
  233. {
  234. schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
  235. __add_wait_runtime(cfs_rq, se, delta);
  236. schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
  237. }
  238. /*
  239. * Update the current task's runtime statistics. Skip current tasks that
  240. * are not in our scheduling class.
  241. */
  242. static inline void
  243. __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now)
  244. {
  245. unsigned long delta, delta_exec, delta_fair, delta_mine;
  246. struct load_weight *lw = &cfs_rq->load;
  247. unsigned long load = lw->weight;
  248. delta_exec = curr->delta_exec;
  249. schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
  250. curr->sum_exec_runtime += delta_exec;
  251. cfs_rq->exec_clock += delta_exec;
  252. if (unlikely(!load))
  253. return;
  254. delta_fair = calc_delta_fair(delta_exec, lw);
  255. delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
  256. if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) {
  257. delta = calc_delta_mine(cfs_rq->sleeper_bonus,
  258. curr->load.weight, lw);
  259. if (unlikely(delta > cfs_rq->sleeper_bonus))
  260. delta = cfs_rq->sleeper_bonus;
  261. cfs_rq->sleeper_bonus -= delta;
  262. delta_mine -= delta;
  263. }
  264. cfs_rq->fair_clock += delta_fair;
  265. /*
  266. * We executed delta_exec amount of time on the CPU,
  267. * but we were only entitled to delta_mine amount of
  268. * time during that period (if nr_running == 1 then
  269. * the two values are equal)
  270. * [Note: delta_mine - delta_exec is negative]:
  271. */
  272. add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
  273. }
  274. static void update_curr(struct cfs_rq *cfs_rq, u64 now)
  275. {
  276. struct sched_entity *curr = cfs_rq_curr(cfs_rq);
  277. unsigned long delta_exec;
  278. if (unlikely(!curr))
  279. return;
  280. /*
  281. * Get the amount of time the current task was running
  282. * since the last time we changed load (this cannot
  283. * overflow on 32 bits):
  284. */
  285. delta_exec = (unsigned long)(now - curr->exec_start);
  286. curr->delta_exec += delta_exec;
  287. if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
  288. __update_curr(cfs_rq, curr, now);
  289. curr->delta_exec = 0;
  290. }
  291. curr->exec_start = now;
  292. }
  293. static inline void
  294. update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  295. {
  296. se->wait_start_fair = cfs_rq->fair_clock;
  297. schedstat_set(se->wait_start, now);
  298. }
  299. /*
  300. * We calculate fair deltas here, so protect against the random effects
  301. * of a multiplication overflow by capping it to the runtime limit:
  302. */
  303. #if BITS_PER_LONG == 32
  304. static inline unsigned long
  305. calc_weighted(unsigned long delta, unsigned long weight, int shift)
  306. {
  307. u64 tmp = (u64)delta * weight >> shift;
  308. if (unlikely(tmp > sysctl_sched_runtime_limit*2))
  309. return sysctl_sched_runtime_limit*2;
  310. return tmp;
  311. }
  312. #else
  313. static inline unsigned long
  314. calc_weighted(unsigned long delta, unsigned long weight, int shift)
  315. {
  316. return delta * weight >> shift;
  317. }
  318. #endif
  319. /*
  320. * Task is being enqueued - update stats:
  321. */
  322. static void
  323. update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  324. {
  325. s64 key;
  326. /*
  327. * Are we enqueueing a waiting task? (for current tasks
  328. * a dequeue/enqueue event is a NOP)
  329. */
  330. if (se != cfs_rq_curr(cfs_rq))
  331. update_stats_wait_start(cfs_rq, se, now);
  332. /*
  333. * Update the key:
  334. */
  335. key = cfs_rq->fair_clock;
  336. /*
  337. * Optimize the common nice 0 case:
  338. */
  339. if (likely(se->load.weight == NICE_0_LOAD)) {
  340. key -= se->wait_runtime;
  341. } else {
  342. u64 tmp;
  343. if (se->wait_runtime < 0) {
  344. tmp = -se->wait_runtime;
  345. key += (tmp * se->load.inv_weight) >>
  346. (WMULT_SHIFT - NICE_0_SHIFT);
  347. } else {
  348. tmp = se->wait_runtime;
  349. key -= (tmp * se->load.weight) >> NICE_0_SHIFT;
  350. }
  351. }
  352. se->fair_key = key;
  353. }
  354. /*
  355. * Note: must be called with a freshly updated rq->fair_clock.
  356. */
  357. static inline void
  358. __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  359. {
  360. unsigned long delta_fair = se->delta_fair_run;
  361. schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start));
  362. if (unlikely(se->load.weight != NICE_0_LOAD))
  363. delta_fair = calc_weighted(delta_fair, se->load.weight,
  364. NICE_0_SHIFT);
  365. add_wait_runtime(cfs_rq, se, delta_fair);
  366. }
  367. static void
  368. update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  369. {
  370. unsigned long delta_fair;
  371. delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
  372. (u64)(cfs_rq->fair_clock - se->wait_start_fair));
  373. se->delta_fair_run += delta_fair;
  374. if (unlikely(abs(se->delta_fair_run) >=
  375. sysctl_sched_stat_granularity)) {
  376. __update_stats_wait_end(cfs_rq, se, now);
  377. se->delta_fair_run = 0;
  378. }
  379. se->wait_start_fair = 0;
  380. schedstat_set(se->wait_start, 0);
  381. }
  382. static inline void
  383. update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  384. {
  385. update_curr(cfs_rq, now);
  386. /*
  387. * Mark the end of the wait period if dequeueing a
  388. * waiting task:
  389. */
  390. if (se != cfs_rq_curr(cfs_rq))
  391. update_stats_wait_end(cfs_rq, se, now);
  392. }
  393. /*
  394. * We are picking a new current task - update its stats:
  395. */
  396. static inline void
  397. update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  398. {
  399. /*
  400. * We are starting a new run period:
  401. */
  402. se->exec_start = now;
  403. }
  404. /*
  405. * We are descheduling a task - update its stats:
  406. */
  407. static inline void
  408. update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  409. {
  410. se->exec_start = 0;
  411. }
  412. /**************************************************
  413. * Scheduling class queueing methods:
  414. */
  415. static void
  416. __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  417. {
  418. unsigned long load = cfs_rq->load.weight, delta_fair;
  419. long prev_runtime;
  420. if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
  421. load = rq_of(cfs_rq)->cpu_load[2];
  422. delta_fair = se->delta_fair_sleep;
  423. /*
  424. * Fix up delta_fair with the effect of us running
  425. * during the whole sleep period:
  426. */
  427. if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
  428. delta_fair = div64_likely32((u64)delta_fair * load,
  429. load + se->load.weight);
  430. if (unlikely(se->load.weight != NICE_0_LOAD))
  431. delta_fair = calc_weighted(delta_fair, se->load.weight,
  432. NICE_0_SHIFT);
  433. prev_runtime = se->wait_runtime;
  434. __add_wait_runtime(cfs_rq, se, delta_fair);
  435. delta_fair = se->wait_runtime - prev_runtime;
  436. /*
  437. * Track the amount of bonus we've given to sleepers:
  438. */
  439. cfs_rq->sleeper_bonus += delta_fair;
  440. schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
  441. }
  442. static void
  443. enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  444. {
  445. struct task_struct *tsk = task_of(se);
  446. unsigned long delta_fair;
  447. if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
  448. !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
  449. return;
  450. delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
  451. (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
  452. se->delta_fair_sleep += delta_fair;
  453. if (unlikely(abs(se->delta_fair_sleep) >=
  454. sysctl_sched_stat_granularity)) {
  455. __enqueue_sleeper(cfs_rq, se, now);
  456. se->delta_fair_sleep = 0;
  457. }
  458. se->sleep_start_fair = 0;
  459. #ifdef CONFIG_SCHEDSTATS
  460. if (se->sleep_start) {
  461. u64 delta = now - se->sleep_start;
  462. if ((s64)delta < 0)
  463. delta = 0;
  464. if (unlikely(delta > se->sleep_max))
  465. se->sleep_max = delta;
  466. se->sleep_start = 0;
  467. se->sum_sleep_runtime += delta;
  468. }
  469. if (se->block_start) {
  470. u64 delta = now - se->block_start;
  471. if ((s64)delta < 0)
  472. delta = 0;
  473. if (unlikely(delta > se->block_max))
  474. se->block_max = delta;
  475. se->block_start = 0;
  476. se->sum_sleep_runtime += delta;
  477. }
  478. #endif
  479. }
  480. static void
  481. enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
  482. int wakeup, u64 now)
  483. {
  484. /*
  485. * Update the fair clock.
  486. */
  487. update_curr(cfs_rq, now);
  488. if (wakeup)
  489. enqueue_sleeper(cfs_rq, se, now);
  490. update_stats_enqueue(cfs_rq, se, now);
  491. __enqueue_entity(cfs_rq, se);
  492. }
  493. static void
  494. dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
  495. int sleep, u64 now)
  496. {
  497. update_stats_dequeue(cfs_rq, se, now);
  498. if (sleep) {
  499. se->sleep_start_fair = cfs_rq->fair_clock;
  500. #ifdef CONFIG_SCHEDSTATS
  501. if (entity_is_task(se)) {
  502. struct task_struct *tsk = task_of(se);
  503. if (tsk->state & TASK_INTERRUPTIBLE)
  504. se->sleep_start = now;
  505. if (tsk->state & TASK_UNINTERRUPTIBLE)
  506. se->block_start = now;
  507. }
  508. cfs_rq->wait_runtime -= se->wait_runtime;
  509. #endif
  510. }
  511. __dequeue_entity(cfs_rq, se);
  512. }
  513. /*
  514. * Preempt the current task with a newly woken task if needed:
  515. */
  516. static void
  517. __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
  518. struct sched_entity *curr, unsigned long granularity)
  519. {
  520. s64 __delta = curr->fair_key - se->fair_key;
  521. /*
  522. * Take scheduling granularity into account - do not
  523. * preempt the current task unless the best task has
  524. * a larger than sched_granularity fairness advantage:
  525. */
  526. if (__delta > niced_granularity(curr, granularity))
  527. resched_task(rq_of(cfs_rq)->curr);
  528. }
  529. static inline void
  530. set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
  531. {
  532. /*
  533. * Any task has to be enqueued before it get to execute on
  534. * a CPU. So account for the time it spent waiting on the
  535. * runqueue. (note, here we rely on pick_next_task() having
  536. * done a put_prev_task_fair() shortly before this, which
  537. * updated rq->fair_clock - used by update_stats_wait_end())
  538. */
  539. update_stats_wait_end(cfs_rq, se, now);
  540. update_stats_curr_start(cfs_rq, se, now);
  541. set_cfs_rq_curr(cfs_rq, se);
  542. }
  543. static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq, u64 now)
  544. {
  545. struct sched_entity *se = __pick_next_entity(cfs_rq);
  546. set_next_entity(cfs_rq, se, now);
  547. return se;
  548. }
  549. static void
  550. put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev, u64 now)
  551. {
  552. /*
  553. * If still on the runqueue then deactivate_task()
  554. * was not called and update_curr() has to be done:
  555. */
  556. if (prev->on_rq)
  557. update_curr(cfs_rq, now);
  558. update_stats_curr_end(cfs_rq, prev, now);
  559. if (prev->on_rq)
  560. update_stats_wait_start(cfs_rq, prev, now);
  561. set_cfs_rq_curr(cfs_rq, NULL);
  562. }
  563. static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  564. {
  565. struct rq *rq = rq_of(cfs_rq);
  566. struct sched_entity *next;
  567. u64 now = __rq_clock(rq);
  568. /*
  569. * Dequeue and enqueue the task to update its
  570. * position within the tree:
  571. */
  572. dequeue_entity(cfs_rq, curr, 0, now);
  573. enqueue_entity(cfs_rq, curr, 0, now);
  574. /*
  575. * Reschedule if another task tops the current one.
  576. */
  577. next = __pick_next_entity(cfs_rq);
  578. if (next == curr)
  579. return;
  580. __check_preempt_curr_fair(cfs_rq, next, curr, sysctl_sched_granularity);
  581. }
  582. /**************************************************
  583. * CFS operations on tasks:
  584. */
  585. #ifdef CONFIG_FAIR_GROUP_SCHED
  586. /* Walk up scheduling entities hierarchy */
  587. #define for_each_sched_entity(se) \
  588. for (; se; se = se->parent)
  589. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  590. {
  591. return p->se.cfs_rq;
  592. }
  593. /* runqueue on which this entity is (to be) queued */
  594. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  595. {
  596. return se->cfs_rq;
  597. }
  598. /* runqueue "owned" by this group */
  599. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  600. {
  601. return grp->my_q;
  602. }
  603. /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
  604. * another cpu ('this_cpu')
  605. */
  606. static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
  607. {
  608. /* A later patch will take group into account */
  609. return &cpu_rq(this_cpu)->cfs;
  610. }
  611. /* Iterate thr' all leaf cfs_rq's on a runqueue */
  612. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  613. list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
  614. /* Do the two (enqueued) tasks belong to the same group ? */
  615. static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
  616. {
  617. if (curr->se.cfs_rq == p->se.cfs_rq)
  618. return 1;
  619. return 0;
  620. }
  621. #else /* CONFIG_FAIR_GROUP_SCHED */
  622. #define for_each_sched_entity(se) \
  623. for (; se; se = NULL)
  624. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  625. {
  626. return &task_rq(p)->cfs;
  627. }
  628. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  629. {
  630. struct task_struct *p = task_of(se);
  631. struct rq *rq = task_rq(p);
  632. return &rq->cfs;
  633. }
  634. /* runqueue "owned" by this group */
  635. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  636. {
  637. return NULL;
  638. }
  639. static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
  640. {
  641. return &cpu_rq(this_cpu)->cfs;
  642. }
  643. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  644. for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
  645. static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
  646. {
  647. return 1;
  648. }
  649. #endif /* CONFIG_FAIR_GROUP_SCHED */
  650. /*
  651. * The enqueue_task method is called before nr_running is
  652. * increased. Here we update the fair scheduling stats and
  653. * then put the task into the rbtree:
  654. */
  655. static void
  656. enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
  657. {
  658. struct cfs_rq *cfs_rq;
  659. struct sched_entity *se = &p->se;
  660. for_each_sched_entity(se) {
  661. if (se->on_rq)
  662. break;
  663. cfs_rq = cfs_rq_of(se);
  664. enqueue_entity(cfs_rq, se, wakeup, now);
  665. }
  666. }
  667. /*
  668. * The dequeue_task method is called before nr_running is
  669. * decreased. We remove the task from the rbtree and
  670. * update the fair scheduling stats:
  671. */
  672. static void
  673. dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep, u64 now)
  674. {
  675. struct cfs_rq *cfs_rq;
  676. struct sched_entity *se = &p->se;
  677. for_each_sched_entity(se) {
  678. cfs_rq = cfs_rq_of(se);
  679. dequeue_entity(cfs_rq, se, sleep, now);
  680. /* Don't dequeue parent if it has other entities besides us */
  681. if (cfs_rq->load.weight)
  682. break;
  683. }
  684. }
  685. /*
  686. * sched_yield() support is very simple - we dequeue and enqueue
  687. */
  688. static void yield_task_fair(struct rq *rq, struct task_struct *p)
  689. {
  690. struct cfs_rq *cfs_rq = task_cfs_rq(p);
  691. u64 now = __rq_clock(rq);
  692. /*
  693. * Dequeue and enqueue the task to update its
  694. * position within the tree:
  695. */
  696. dequeue_entity(cfs_rq, &p->se, 0, now);
  697. enqueue_entity(cfs_rq, &p->se, 0, now);
  698. }
  699. /*
  700. * Preempt the current task with a newly woken task if needed:
  701. */
  702. static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
  703. {
  704. struct task_struct *curr = rq->curr;
  705. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  706. unsigned long gran;
  707. if (unlikely(rt_prio(p->prio))) {
  708. update_curr(cfs_rq, rq_clock(rq));
  709. resched_task(curr);
  710. return;
  711. }
  712. gran = sysctl_sched_wakeup_granularity;
  713. /*
  714. * Batch tasks prefer throughput over latency:
  715. */
  716. if (unlikely(p->policy == SCHED_BATCH))
  717. gran = sysctl_sched_batch_wakeup_granularity;
  718. if (is_same_group(curr, p))
  719. __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
  720. }
  721. static struct task_struct *pick_next_task_fair(struct rq *rq, u64 now)
  722. {
  723. struct cfs_rq *cfs_rq = &rq->cfs;
  724. struct sched_entity *se;
  725. if (unlikely(!cfs_rq->nr_running))
  726. return NULL;
  727. do {
  728. se = pick_next_entity(cfs_rq, now);
  729. cfs_rq = group_cfs_rq(se);
  730. } while (cfs_rq);
  731. return task_of(se);
  732. }
  733. /*
  734. * Account for a descheduled task:
  735. */
  736. static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, u64 now)
  737. {
  738. struct sched_entity *se = &prev->se;
  739. struct cfs_rq *cfs_rq;
  740. for_each_sched_entity(se) {
  741. cfs_rq = cfs_rq_of(se);
  742. put_prev_entity(cfs_rq, se, now);
  743. }
  744. }
  745. /**************************************************
  746. * Fair scheduling class load-balancing methods:
  747. */
  748. /*
  749. * Load-balancing iterator. Note: while the runqueue stays locked
  750. * during the whole iteration, the current task might be
  751. * dequeued so the iterator has to be dequeue-safe. Here we
  752. * achieve that by always pre-iterating before returning
  753. * the current task:
  754. */
  755. static inline struct task_struct *
  756. __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
  757. {
  758. struct task_struct *p;
  759. if (!curr)
  760. return NULL;
  761. p = rb_entry(curr, struct task_struct, se.run_node);
  762. cfs_rq->rb_load_balance_curr = rb_next(curr);
  763. return p;
  764. }
  765. static struct task_struct *load_balance_start_fair(void *arg)
  766. {
  767. struct cfs_rq *cfs_rq = arg;
  768. return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
  769. }
  770. static struct task_struct *load_balance_next_fair(void *arg)
  771. {
  772. struct cfs_rq *cfs_rq = arg;
  773. return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
  774. }
  775. static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
  776. {
  777. struct sched_entity *curr;
  778. struct task_struct *p;
  779. if (!cfs_rq->nr_running)
  780. return MAX_PRIO;
  781. curr = __pick_next_entity(cfs_rq);
  782. p = task_of(curr);
  783. return p->prio;
  784. }
  785. static unsigned long
  786. load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  787. unsigned long max_nr_move, unsigned long max_load_move,
  788. struct sched_domain *sd, enum cpu_idle_type idle,
  789. int *all_pinned)
  790. {
  791. struct cfs_rq *busy_cfs_rq;
  792. unsigned long load_moved, total_nr_moved = 0, nr_moved;
  793. long rem_load_move = max_load_move;
  794. struct rq_iterator cfs_rq_iterator;
  795. cfs_rq_iterator.start = load_balance_start_fair;
  796. cfs_rq_iterator.next = load_balance_next_fair;
  797. for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
  798. struct cfs_rq *this_cfs_rq;
  799. long imbalance;
  800. unsigned long maxload;
  801. int this_best_prio, best_prio, best_prio_seen = 0;
  802. this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
  803. imbalance = busy_cfs_rq->load.weight -
  804. this_cfs_rq->load.weight;
  805. /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
  806. if (imbalance <= 0)
  807. continue;
  808. /* Don't pull more than imbalance/2 */
  809. imbalance /= 2;
  810. maxload = min(rem_load_move, imbalance);
  811. this_best_prio = cfs_rq_best_prio(this_cfs_rq);
  812. best_prio = cfs_rq_best_prio(busy_cfs_rq);
  813. /*
  814. * Enable handling of the case where there is more than one task
  815. * with the best priority. If the current running task is one
  816. * of those with prio==best_prio we know it won't be moved
  817. * and therefore it's safe to override the skip (based on load)
  818. * of any task we find with that prio.
  819. */
  820. if (cfs_rq_curr(busy_cfs_rq) == &busiest->curr->se)
  821. best_prio_seen = 1;
  822. /* pass busy_cfs_rq argument into
  823. * load_balance_[start|next]_fair iterators
  824. */
  825. cfs_rq_iterator.arg = busy_cfs_rq;
  826. nr_moved = balance_tasks(this_rq, this_cpu, busiest,
  827. max_nr_move, maxload, sd, idle, all_pinned,
  828. &load_moved, this_best_prio, best_prio,
  829. best_prio_seen, &cfs_rq_iterator);
  830. total_nr_moved += nr_moved;
  831. max_nr_move -= nr_moved;
  832. rem_load_move -= load_moved;
  833. if (max_nr_move <= 0 || rem_load_move <= 0)
  834. break;
  835. }
  836. return max_load_move - rem_load_move;
  837. }
  838. /*
  839. * scheduler tick hitting a task of our scheduling class:
  840. */
  841. static void task_tick_fair(struct rq *rq, struct task_struct *curr)
  842. {
  843. struct cfs_rq *cfs_rq;
  844. struct sched_entity *se = &curr->se;
  845. for_each_sched_entity(se) {
  846. cfs_rq = cfs_rq_of(se);
  847. entity_tick(cfs_rq, se);
  848. }
  849. }
  850. /*
  851. * Share the fairness runtime between parent and child, thus the
  852. * total amount of pressure for CPU stays equal - new tasks
  853. * get a chance to run but frequent forkers are not allowed to
  854. * monopolize the CPU. Note: the parent runqueue is locked,
  855. * the child is not running yet.
  856. */
  857. static void task_new_fair(struct rq *rq, struct task_struct *p, u64 now)
  858. {
  859. struct cfs_rq *cfs_rq = task_cfs_rq(p);
  860. struct sched_entity *se = &p->se;
  861. sched_info_queued(p);
  862. update_stats_enqueue(cfs_rq, se, now);
  863. /*
  864. * Child runs first: we let it run before the parent
  865. * until it reschedules once. We set up the key so that
  866. * it will preempt the parent:
  867. */
  868. p->se.fair_key = current->se.fair_key -
  869. niced_granularity(&rq->curr->se, sysctl_sched_granularity) - 1;
  870. /*
  871. * The first wait is dominated by the child-runs-first logic,
  872. * so do not credit it with that waiting time yet:
  873. */
  874. if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
  875. p->se.wait_start_fair = 0;
  876. /*
  877. * The statistical average of wait_runtime is about
  878. * -granularity/2, so initialize the task with that:
  879. */
  880. if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
  881. p->se.wait_runtime = -(sysctl_sched_granularity / 2);
  882. __enqueue_entity(cfs_rq, se);
  883. }
  884. #ifdef CONFIG_FAIR_GROUP_SCHED
  885. /* Account for a task changing its policy or group.
  886. *
  887. * This routine is mostly called to set cfs_rq->curr field when a task
  888. * migrates between groups/classes.
  889. */
  890. static void set_curr_task_fair(struct rq *rq)
  891. {
  892. struct task_struct *curr = rq->curr;
  893. struct sched_entity *se = &curr->se;
  894. u64 now = rq_clock(rq);
  895. struct cfs_rq *cfs_rq;
  896. for_each_sched_entity(se) {
  897. cfs_rq = cfs_rq_of(se);
  898. set_next_entity(cfs_rq, se, now);
  899. }
  900. }
  901. #else
  902. static void set_curr_task_fair(struct rq *rq)
  903. {
  904. }
  905. #endif
  906. /*
  907. * All the scheduling class methods:
  908. */
  909. struct sched_class fair_sched_class __read_mostly = {
  910. .enqueue_task = enqueue_task_fair,
  911. .dequeue_task = dequeue_task_fair,
  912. .yield_task = yield_task_fair,
  913. .check_preempt_curr = check_preempt_curr_fair,
  914. .pick_next_task = pick_next_task_fair,
  915. .put_prev_task = put_prev_task_fair,
  916. .load_balance = load_balance_fair,
  917. .set_curr_task = set_curr_task_fair,
  918. .task_tick = task_tick_fair,
  919. .task_new = task_new_fair,
  920. };
  921. #ifdef CONFIG_SCHED_DEBUG
  922. static void print_cfs_stats(struct seq_file *m, int cpu, u64 now)
  923. {
  924. struct rq *rq = cpu_rq(cpu);
  925. struct cfs_rq *cfs_rq;
  926. for_each_leaf_cfs_rq(rq, cfs_rq)
  927. print_cfs_rq(m, cpu, cfs_rq, now);
  928. }
  929. #endif