sched_fair.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836
  1. /*
  2. * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. *
  6. * Interactivity improvements by Mike Galbraith
  7. * (C) 2007 Mike Galbraith <efault@gmx.de>
  8. *
  9. * Various enhancements by Dmitry Adamushko.
  10. * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  11. *
  12. * Group scheduling enhancements by Srivatsa Vaddagiri
  13. * Copyright IBM Corporation, 2007
  14. * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  15. *
  16. * Scaled math optimizations by Thomas Gleixner
  17. * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  18. *
  19. * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  20. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  21. */
  22. #include <linux/latencytop.h>
  23. /*
  24. * Targeted preemption latency for CPU-bound tasks:
  25. * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
  26. *
  27. * NOTE: this latency value is not the same as the concept of
  28. * 'timeslice length' - timeslices in CFS are of variable length
  29. * and have no persistent notion like in traditional, time-slice
  30. * based scheduling concepts.
  31. *
  32. * (to see the precise effective timeslice length of your workload,
  33. * run vmstat and monitor the context-switches (cs) field)
  34. */
  35. unsigned int sysctl_sched_latency = 20000000ULL;
  36. /*
  37. * Minimal preemption granularity for CPU-bound tasks:
  38. * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
  39. */
  40. unsigned int sysctl_sched_min_granularity = 4000000ULL;
  41. /*
  42. * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  43. */
  44. static unsigned int sched_nr_latency = 5;
  45. /*
  46. * After fork, child runs first. (default) If set to 0 then
  47. * parent will (try to) run first.
  48. */
  49. const_debug unsigned int sysctl_sched_child_runs_first = 1;
  50. /*
  51. * sys_sched_yield() compat mode
  52. *
  53. * This option switches the agressive yield implementation of the
  54. * old scheduler back on.
  55. */
  56. unsigned int __read_mostly sysctl_sched_compat_yield;
  57. /*
  58. * SCHED_OTHER wake-up granularity.
  59. * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
  60. *
  61. * This option delays the preemption effects of decoupled workloads
  62. * and reduces their over-scheduling. Synchronous workloads will still
  63. * have immediate wakeup/sleep latencies.
  64. */
  65. unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
  66. const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  67. static const struct sched_class fair_sched_class;
  68. /**************************************************************
  69. * CFS operations on generic schedulable entities:
  70. */
  71. static inline struct task_struct *task_of(struct sched_entity *se)
  72. {
  73. return container_of(se, struct task_struct, se);
  74. }
  75. #ifdef CONFIG_FAIR_GROUP_SCHED
  76. /* cpu runqueue to which this cfs_rq is attached */
  77. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  78. {
  79. return cfs_rq->rq;
  80. }
  81. /* An entity is a task if it doesn't "own" a runqueue */
  82. #define entity_is_task(se) (!se->my_q)
  83. /* Walk up scheduling entities hierarchy */
  84. #define for_each_sched_entity(se) \
  85. for (; se; se = se->parent)
  86. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  87. {
  88. return p->se.cfs_rq;
  89. }
  90. /* runqueue on which this entity is (to be) queued */
  91. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  92. {
  93. return se->cfs_rq;
  94. }
  95. /* runqueue "owned" by this group */
  96. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  97. {
  98. return grp->my_q;
  99. }
  100. /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
  101. * another cpu ('this_cpu')
  102. */
  103. static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
  104. {
  105. return cfs_rq->tg->cfs_rq[this_cpu];
  106. }
  107. /* Iterate thr' all leaf cfs_rq's on a runqueue */
  108. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  109. list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
  110. /* Do the two (enqueued) entities belong to the same group ? */
  111. static inline int
  112. is_same_group(struct sched_entity *se, struct sched_entity *pse)
  113. {
  114. if (se->cfs_rq == pse->cfs_rq)
  115. return 1;
  116. return 0;
  117. }
  118. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  119. {
  120. return se->parent;
  121. }
  122. /* return depth at which a sched entity is present in the hierarchy */
  123. static inline int depth_se(struct sched_entity *se)
  124. {
  125. int depth = 0;
  126. for_each_sched_entity(se)
  127. depth++;
  128. return depth;
  129. }
  130. static void
  131. find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  132. {
  133. int se_depth, pse_depth;
  134. /*
  135. * preemption test can be made between sibling entities who are in the
  136. * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
  137. * both tasks until we find their ancestors who are siblings of common
  138. * parent.
  139. */
  140. /* First walk up until both entities are at same depth */
  141. se_depth = depth_se(*se);
  142. pse_depth = depth_se(*pse);
  143. while (se_depth > pse_depth) {
  144. se_depth--;
  145. *se = parent_entity(*se);
  146. }
  147. while (pse_depth > se_depth) {
  148. pse_depth--;
  149. *pse = parent_entity(*pse);
  150. }
  151. while (!is_same_group(*se, *pse)) {
  152. *se = parent_entity(*se);
  153. *pse = parent_entity(*pse);
  154. }
  155. }
  156. #else /* CONFIG_FAIR_GROUP_SCHED */
  157. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  158. {
  159. return container_of(cfs_rq, struct rq, cfs);
  160. }
  161. #define entity_is_task(se) 1
  162. #define for_each_sched_entity(se) \
  163. for (; se; se = NULL)
  164. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  165. {
  166. return &task_rq(p)->cfs;
  167. }
  168. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  169. {
  170. struct task_struct *p = task_of(se);
  171. struct rq *rq = task_rq(p);
  172. return &rq->cfs;
  173. }
  174. /* runqueue "owned" by this group */
  175. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  176. {
  177. return NULL;
  178. }
  179. static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
  180. {
  181. return &cpu_rq(this_cpu)->cfs;
  182. }
  183. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  184. for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
  185. static inline int
  186. is_same_group(struct sched_entity *se, struct sched_entity *pse)
  187. {
  188. return 1;
  189. }
  190. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  191. {
  192. return NULL;
  193. }
  194. static inline void
  195. find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  196. {
  197. }
  198. #endif /* CONFIG_FAIR_GROUP_SCHED */
  199. /**************************************************************
  200. * Scheduling class tree data structure manipulation methods:
  201. */
  202. static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
  203. {
  204. s64 delta = (s64)(vruntime - min_vruntime);
  205. if (delta > 0)
  206. min_vruntime = vruntime;
  207. return min_vruntime;
  208. }
  209. static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
  210. {
  211. s64 delta = (s64)(vruntime - min_vruntime);
  212. if (delta < 0)
  213. min_vruntime = vruntime;
  214. return min_vruntime;
  215. }
  216. static inline int entity_before(struct sched_entity *a,
  217. struct sched_entity *b)
  218. {
  219. return (s64)(a->vruntime - b->vruntime) < 0;
  220. }
  221. static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
  222. {
  223. return se->vruntime - cfs_rq->min_vruntime;
  224. }
  225. static void update_min_vruntime(struct cfs_rq *cfs_rq)
  226. {
  227. u64 vruntime = cfs_rq->min_vruntime;
  228. if (cfs_rq->curr)
  229. vruntime = cfs_rq->curr->vruntime;
  230. if (cfs_rq->rb_leftmost) {
  231. struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
  232. struct sched_entity,
  233. run_node);
  234. if (!cfs_rq->curr)
  235. vruntime = se->vruntime;
  236. else
  237. vruntime = min_vruntime(vruntime, se->vruntime);
  238. }
  239. cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
  240. }
  241. /*
  242. * Enqueue an entity into the rb-tree:
  243. */
  244. static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  245. {
  246. struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
  247. struct rb_node *parent = NULL;
  248. struct sched_entity *entry;
  249. s64 key = entity_key(cfs_rq, se);
  250. int leftmost = 1;
  251. /*
  252. * Find the right place in the rbtree:
  253. */
  254. while (*link) {
  255. parent = *link;
  256. entry = rb_entry(parent, struct sched_entity, run_node);
  257. /*
  258. * We dont care about collisions. Nodes with
  259. * the same key stay together.
  260. */
  261. if (key < entity_key(cfs_rq, entry)) {
  262. link = &parent->rb_left;
  263. } else {
  264. link = &parent->rb_right;
  265. leftmost = 0;
  266. }
  267. }
  268. /*
  269. * Maintain a cache of leftmost tree entries (it is frequently
  270. * used):
  271. */
  272. if (leftmost)
  273. cfs_rq->rb_leftmost = &se->run_node;
  274. rb_link_node(&se->run_node, parent, link);
  275. rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
  276. }
  277. static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  278. {
  279. if (cfs_rq->rb_leftmost == &se->run_node) {
  280. struct rb_node *next_node;
  281. next_node = rb_next(&se->run_node);
  282. cfs_rq->rb_leftmost = next_node;
  283. }
  284. rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
  285. }
  286. static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
  287. {
  288. struct rb_node *left = cfs_rq->rb_leftmost;
  289. if (!left)
  290. return NULL;
  291. return rb_entry(left, struct sched_entity, run_node);
  292. }
  293. static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  294. {
  295. struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
  296. if (!last)
  297. return NULL;
  298. return rb_entry(last, struct sched_entity, run_node);
  299. }
  300. /**************************************************************
  301. * Scheduling class statistics methods:
  302. */
  303. #ifdef CONFIG_SCHED_DEBUG
  304. int sched_nr_latency_handler(struct ctl_table *table, int write,
  305. struct file *filp, void __user *buffer, size_t *lenp,
  306. loff_t *ppos)
  307. {
  308. int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
  309. if (ret || !write)
  310. return ret;
  311. sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
  312. sysctl_sched_min_granularity);
  313. return 0;
  314. }
  315. #endif
  316. /*
  317. * delta /= w
  318. */
  319. static inline unsigned long
  320. calc_delta_fair(unsigned long delta, struct sched_entity *se)
  321. {
  322. if (unlikely(se->load.weight != NICE_0_LOAD))
  323. delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
  324. return delta;
  325. }
  326. /*
  327. * The idea is to set a period in which each task runs once.
  328. *
  329. * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
  330. * this period because otherwise the slices get too small.
  331. *
  332. * p = (nr <= nl) ? l : l*nr/nl
  333. */
  334. static u64 __sched_period(unsigned long nr_running)
  335. {
  336. u64 period = sysctl_sched_latency;
  337. unsigned long nr_latency = sched_nr_latency;
  338. if (unlikely(nr_running > nr_latency)) {
  339. period = sysctl_sched_min_granularity;
  340. period *= nr_running;
  341. }
  342. return period;
  343. }
  344. /*
  345. * We calculate the wall-time slice from the period by taking a part
  346. * proportional to the weight.
  347. *
  348. * s = p*P[w/rw]
  349. */
  350. static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  351. {
  352. u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
  353. for_each_sched_entity(se) {
  354. struct load_weight *load;
  355. struct load_weight lw;
  356. cfs_rq = cfs_rq_of(se);
  357. load = &cfs_rq->load;
  358. if (unlikely(!se->on_rq)) {
  359. lw = cfs_rq->load;
  360. update_load_add(&lw, se->load.weight);
  361. load = &lw;
  362. }
  363. slice = calc_delta_mine(slice, se->load.weight, load);
  364. }
  365. return slice;
  366. }
  367. /*
  368. * We calculate the vruntime slice of a to be inserted task
  369. *
  370. * vs = s/w
  371. */
  372. static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  373. {
  374. return calc_delta_fair(sched_slice(cfs_rq, se), se);
  375. }
  376. /*
  377. * Update the current task's runtime statistics. Skip current tasks that
  378. * are not in our scheduling class.
  379. */
  380. static inline void
  381. __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
  382. unsigned long delta_exec)
  383. {
  384. unsigned long delta_exec_weighted;
  385. schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
  386. curr->sum_exec_runtime += delta_exec;
  387. schedstat_add(cfs_rq, exec_clock, delta_exec);
  388. delta_exec_weighted = calc_delta_fair(delta_exec, curr);
  389. curr->vruntime += delta_exec_weighted;
  390. update_min_vruntime(cfs_rq);
  391. }
  392. static void update_curr(struct cfs_rq *cfs_rq)
  393. {
  394. struct sched_entity *curr = cfs_rq->curr;
  395. u64 now = rq_of(cfs_rq)->clock;
  396. unsigned long delta_exec;
  397. if (unlikely(!curr))
  398. return;
  399. /*
  400. * Get the amount of time the current task was running
  401. * since the last time we changed load (this cannot
  402. * overflow on 32 bits):
  403. */
  404. delta_exec = (unsigned long)(now - curr->exec_start);
  405. if (!delta_exec)
  406. return;
  407. __update_curr(cfs_rq, curr, delta_exec);
  408. curr->exec_start = now;
  409. if (entity_is_task(curr)) {
  410. struct task_struct *curtask = task_of(curr);
  411. cpuacct_charge(curtask, delta_exec);
  412. account_group_exec_runtime(curtask, delta_exec);
  413. }
  414. }
  415. static inline void
  416. update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  417. {
  418. schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
  419. }
  420. /*
  421. * Task is being enqueued - update stats:
  422. */
  423. static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  424. {
  425. /*
  426. * Are we enqueueing a waiting task? (for current tasks
  427. * a dequeue/enqueue event is a NOP)
  428. */
  429. if (se != cfs_rq->curr)
  430. update_stats_wait_start(cfs_rq, se);
  431. }
  432. static void
  433. update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
  434. {
  435. schedstat_set(se->wait_max, max(se->wait_max,
  436. rq_of(cfs_rq)->clock - se->wait_start));
  437. schedstat_set(se->wait_count, se->wait_count + 1);
  438. schedstat_set(se->wait_sum, se->wait_sum +
  439. rq_of(cfs_rq)->clock - se->wait_start);
  440. schedstat_set(se->wait_start, 0);
  441. }
  442. static inline void
  443. update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  444. {
  445. /*
  446. * Mark the end of the wait period if dequeueing a
  447. * waiting task:
  448. */
  449. if (se != cfs_rq->curr)
  450. update_stats_wait_end(cfs_rq, se);
  451. }
  452. /*
  453. * We are picking a new current task - update its stats:
  454. */
  455. static inline void
  456. update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  457. {
  458. /*
  459. * We are starting a new run period:
  460. */
  461. se->exec_start = rq_of(cfs_rq)->clock;
  462. }
  463. /**************************************************
  464. * Scheduling class queueing methods:
  465. */
  466. #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
  467. static void
  468. add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
  469. {
  470. cfs_rq->task_weight += weight;
  471. }
  472. #else
  473. static inline void
  474. add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
  475. {
  476. }
  477. #endif
  478. static void
  479. account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  480. {
  481. update_load_add(&cfs_rq->load, se->load.weight);
  482. if (!parent_entity(se))
  483. inc_cpu_load(rq_of(cfs_rq), se->load.weight);
  484. if (entity_is_task(se)) {
  485. add_cfs_task_weight(cfs_rq, se->load.weight);
  486. list_add(&se->group_node, &cfs_rq->tasks);
  487. }
  488. cfs_rq->nr_running++;
  489. se->on_rq = 1;
  490. }
  491. static void
  492. account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  493. {
  494. update_load_sub(&cfs_rq->load, se->load.weight);
  495. if (!parent_entity(se))
  496. dec_cpu_load(rq_of(cfs_rq), se->load.weight);
  497. if (entity_is_task(se)) {
  498. add_cfs_task_weight(cfs_rq, -se->load.weight);
  499. list_del_init(&se->group_node);
  500. }
  501. cfs_rq->nr_running--;
  502. se->on_rq = 0;
  503. }
  504. static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
  505. {
  506. #ifdef CONFIG_SCHEDSTATS
  507. if (se->sleep_start) {
  508. u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
  509. struct task_struct *tsk = task_of(se);
  510. if ((s64)delta < 0)
  511. delta = 0;
  512. if (unlikely(delta > se->sleep_max))
  513. se->sleep_max = delta;
  514. se->sleep_start = 0;
  515. se->sum_sleep_runtime += delta;
  516. account_scheduler_latency(tsk, delta >> 10, 1);
  517. }
  518. if (se->block_start) {
  519. u64 delta = rq_of(cfs_rq)->clock - se->block_start;
  520. struct task_struct *tsk = task_of(se);
  521. if ((s64)delta < 0)
  522. delta = 0;
  523. if (unlikely(delta > se->block_max))
  524. se->block_max = delta;
  525. se->block_start = 0;
  526. se->sum_sleep_runtime += delta;
  527. /*
  528. * Blocking time is in units of nanosecs, so shift by 20 to
  529. * get a milliseconds-range estimation of the amount of
  530. * time that the task spent sleeping:
  531. */
  532. if (unlikely(prof_on == SLEEP_PROFILING)) {
  533. profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
  534. delta >> 20);
  535. }
  536. account_scheduler_latency(tsk, delta >> 10, 0);
  537. }
  538. #endif
  539. }
  540. static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
  541. {
  542. #ifdef CONFIG_SCHED_DEBUG
  543. s64 d = se->vruntime - cfs_rq->min_vruntime;
  544. if (d < 0)
  545. d = -d;
  546. if (d > 3*sysctl_sched_latency)
  547. schedstat_inc(cfs_rq, nr_spread_over);
  548. #endif
  549. }
  550. static void
  551. place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
  552. {
  553. u64 vruntime = cfs_rq->min_vruntime;
  554. /*
  555. * The 'current' period is already promised to the current tasks,
  556. * however the extra weight of the new task will slow them down a
  557. * little, place the new task so that it fits in the slot that
  558. * stays open at the end.
  559. */
  560. if (initial && sched_feat(START_DEBIT))
  561. vruntime += sched_vslice(cfs_rq, se);
  562. if (!initial) {
  563. /* sleeps upto a single latency don't count. */
  564. if (sched_feat(NEW_FAIR_SLEEPERS)) {
  565. unsigned long thresh = sysctl_sched_latency;
  566. /*
  567. * Convert the sleeper threshold into virtual time.
  568. * SCHED_IDLE is a special sub-class. We care about
  569. * fairness only relative to other SCHED_IDLE tasks,
  570. * all of which have the same weight.
  571. */
  572. if (sched_feat(NORMALIZED_SLEEPER) &&
  573. (!entity_is_task(se) ||
  574. task_of(se)->policy != SCHED_IDLE))
  575. thresh = calc_delta_fair(thresh, se);
  576. vruntime -= thresh;
  577. }
  578. /* ensure we never gain time by being placed backwards. */
  579. vruntime = max_vruntime(se->vruntime, vruntime);
  580. }
  581. se->vruntime = vruntime;
  582. }
  583. static void
  584. enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
  585. {
  586. /*
  587. * Update run-time statistics of the 'current'.
  588. */
  589. update_curr(cfs_rq);
  590. account_entity_enqueue(cfs_rq, se);
  591. if (wakeup) {
  592. place_entity(cfs_rq, se, 0);
  593. enqueue_sleeper(cfs_rq, se);
  594. }
  595. update_stats_enqueue(cfs_rq, se);
  596. check_spread(cfs_rq, se);
  597. if (se != cfs_rq->curr)
  598. __enqueue_entity(cfs_rq, se);
  599. }
  600. static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
  601. {
  602. if (cfs_rq->last == se)
  603. cfs_rq->last = NULL;
  604. if (cfs_rq->next == se)
  605. cfs_rq->next = NULL;
  606. }
  607. static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
  608. {
  609. for_each_sched_entity(se)
  610. __clear_buddies(cfs_rq_of(se), se);
  611. }
  612. static void
  613. dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
  614. {
  615. /*
  616. * Update run-time statistics of the 'current'.
  617. */
  618. update_curr(cfs_rq);
  619. update_stats_dequeue(cfs_rq, se);
  620. if (sleep) {
  621. #ifdef CONFIG_SCHEDSTATS
  622. if (entity_is_task(se)) {
  623. struct task_struct *tsk = task_of(se);
  624. if (tsk->state & TASK_INTERRUPTIBLE)
  625. se->sleep_start = rq_of(cfs_rq)->clock;
  626. if (tsk->state & TASK_UNINTERRUPTIBLE)
  627. se->block_start = rq_of(cfs_rq)->clock;
  628. }
  629. #endif
  630. }
  631. clear_buddies(cfs_rq, se);
  632. if (se != cfs_rq->curr)
  633. __dequeue_entity(cfs_rq, se);
  634. account_entity_dequeue(cfs_rq, se);
  635. update_min_vruntime(cfs_rq);
  636. }
  637. /*
  638. * Preempt the current task with a newly woken task if needed:
  639. */
  640. static void
  641. check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  642. {
  643. unsigned long ideal_runtime, delta_exec;
  644. ideal_runtime = sched_slice(cfs_rq, curr);
  645. delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  646. if (delta_exec > ideal_runtime) {
  647. resched_task(rq_of(cfs_rq)->curr);
  648. /*
  649. * The current task ran long enough, ensure it doesn't get
  650. * re-elected due to buddy favours.
  651. */
  652. clear_buddies(cfs_rq, curr);
  653. }
  654. }
  655. static void
  656. set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  657. {
  658. /* 'current' is not kept within the tree. */
  659. if (se->on_rq) {
  660. /*
  661. * Any task has to be enqueued before it get to execute on
  662. * a CPU. So account for the time it spent waiting on the
  663. * runqueue.
  664. */
  665. update_stats_wait_end(cfs_rq, se);
  666. __dequeue_entity(cfs_rq, se);
  667. }
  668. update_stats_curr_start(cfs_rq, se);
  669. cfs_rq->curr = se;
  670. #ifdef CONFIG_SCHEDSTATS
  671. /*
  672. * Track our maximum slice length, if the CPU's load is at
  673. * least twice that of our own weight (i.e. dont track it
  674. * when there are only lesser-weight tasks around):
  675. */
  676. if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
  677. se->slice_max = max(se->slice_max,
  678. se->sum_exec_runtime - se->prev_sum_exec_runtime);
  679. }
  680. #endif
  681. se->prev_sum_exec_runtime = se->sum_exec_runtime;
  682. }
  683. static int
  684. wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
  685. static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
  686. {
  687. struct sched_entity *se = __pick_next_entity(cfs_rq);
  688. if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
  689. return cfs_rq->next;
  690. if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
  691. return cfs_rq->last;
  692. return se;
  693. }
  694. static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
  695. {
  696. /*
  697. * If still on the runqueue then deactivate_task()
  698. * was not called and update_curr() has to be done:
  699. */
  700. if (prev->on_rq)
  701. update_curr(cfs_rq);
  702. check_spread(cfs_rq, prev);
  703. if (prev->on_rq) {
  704. update_stats_wait_start(cfs_rq, prev);
  705. /* Put 'current' back into the tree. */
  706. __enqueue_entity(cfs_rq, prev);
  707. }
  708. cfs_rq->curr = NULL;
  709. }
  710. static void
  711. entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
  712. {
  713. /*
  714. * Update run-time statistics of the 'current'.
  715. */
  716. update_curr(cfs_rq);
  717. #ifdef CONFIG_SCHED_HRTICK
  718. /*
  719. * queued ticks are scheduled to match the slice, so don't bother
  720. * validating it and just reschedule.
  721. */
  722. if (queued) {
  723. resched_task(rq_of(cfs_rq)->curr);
  724. return;
  725. }
  726. /*
  727. * don't let the period tick interfere with the hrtick preemption
  728. */
  729. if (!sched_feat(DOUBLE_TICK) &&
  730. hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
  731. return;
  732. #endif
  733. if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
  734. check_preempt_tick(cfs_rq, curr);
  735. }
  736. /**************************************************
  737. * CFS operations on tasks:
  738. */
  739. #ifdef CONFIG_SCHED_HRTICK
  740. static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
  741. {
  742. struct sched_entity *se = &p->se;
  743. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  744. WARN_ON(task_rq(p) != rq);
  745. if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
  746. u64 slice = sched_slice(cfs_rq, se);
  747. u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
  748. s64 delta = slice - ran;
  749. if (delta < 0) {
  750. if (rq->curr == p)
  751. resched_task(p);
  752. return;
  753. }
  754. /*
  755. * Don't schedule slices shorter than 10000ns, that just
  756. * doesn't make sense. Rely on vruntime for fairness.
  757. */
  758. if (rq->curr != p)
  759. delta = max_t(s64, 10000LL, delta);
  760. hrtick_start(rq, delta);
  761. }
  762. }
  763. /*
  764. * called from enqueue/dequeue and updates the hrtick when the
  765. * current task is from our class and nr_running is low enough
  766. * to matter.
  767. */
  768. static void hrtick_update(struct rq *rq)
  769. {
  770. struct task_struct *curr = rq->curr;
  771. if (curr->sched_class != &fair_sched_class)
  772. return;
  773. if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
  774. hrtick_start_fair(rq, curr);
  775. }
  776. #else /* !CONFIG_SCHED_HRTICK */
  777. static inline void
  778. hrtick_start_fair(struct rq *rq, struct task_struct *p)
  779. {
  780. }
  781. static inline void hrtick_update(struct rq *rq)
  782. {
  783. }
  784. #endif
  785. /*
  786. * The enqueue_task method is called before nr_running is
  787. * increased. Here we update the fair scheduling stats and
  788. * then put the task into the rbtree:
  789. */
  790. static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
  791. {
  792. struct cfs_rq *cfs_rq;
  793. struct sched_entity *se = &p->se;
  794. for_each_sched_entity(se) {
  795. if (se->on_rq)
  796. break;
  797. cfs_rq = cfs_rq_of(se);
  798. enqueue_entity(cfs_rq, se, wakeup);
  799. wakeup = 1;
  800. }
  801. hrtick_update(rq);
  802. }
  803. /*
  804. * The dequeue_task method is called before nr_running is
  805. * decreased. We remove the task from the rbtree and
  806. * update the fair scheduling stats:
  807. */
  808. static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
  809. {
  810. struct cfs_rq *cfs_rq;
  811. struct sched_entity *se = &p->se;
  812. for_each_sched_entity(se) {
  813. cfs_rq = cfs_rq_of(se);
  814. dequeue_entity(cfs_rq, se, sleep);
  815. /* Don't dequeue parent if it has other entities besides us */
  816. if (cfs_rq->load.weight)
  817. break;
  818. sleep = 1;
  819. }
  820. hrtick_update(rq);
  821. }
  822. /*
  823. * sched_yield() support is very simple - we dequeue and enqueue.
  824. *
  825. * If compat_yield is turned on then we requeue to the end of the tree.
  826. */
  827. static void yield_task_fair(struct rq *rq)
  828. {
  829. struct task_struct *curr = rq->curr;
  830. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  831. struct sched_entity *rightmost, *se = &curr->se;
  832. /*
  833. * Are we the only task in the tree?
  834. */
  835. if (unlikely(cfs_rq->nr_running == 1))
  836. return;
  837. clear_buddies(cfs_rq, se);
  838. if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
  839. update_rq_clock(rq);
  840. /*
  841. * Update run-time statistics of the 'current'.
  842. */
  843. update_curr(cfs_rq);
  844. return;
  845. }
  846. /*
  847. * Find the rightmost entry in the rbtree:
  848. */
  849. rightmost = __pick_last_entity(cfs_rq);
  850. /*
  851. * Already in the rightmost position?
  852. */
  853. if (unlikely(!rightmost || entity_before(rightmost, se)))
  854. return;
  855. /*
  856. * Minimally necessary key value to be last in the tree:
  857. * Upon rescheduling, sched_class::put_prev_task() will place
  858. * 'current' within the tree based on its new key value.
  859. */
  860. se->vruntime = rightmost->vruntime + 1;
  861. }
  862. /*
  863. * wake_idle() will wake a task on an idle cpu if task->cpu is
  864. * not idle and an idle cpu is available. The span of cpus to
  865. * search starts with cpus closest then further out as needed,
  866. * so we always favor a closer, idle cpu.
  867. * Domains may include CPUs that are not usable for migration,
  868. * hence we need to mask them out (cpu_active_mask)
  869. *
  870. * Returns the CPU we should wake onto.
  871. */
  872. #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
  873. static int wake_idle(int cpu, struct task_struct *p)
  874. {
  875. struct sched_domain *sd;
  876. int i;
  877. unsigned int chosen_wakeup_cpu;
  878. int this_cpu;
  879. /*
  880. * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
  881. * are idle and this is not a kernel thread and this task's affinity
  882. * allows it to be moved to preferred cpu, then just move!
  883. */
  884. this_cpu = smp_processor_id();
  885. chosen_wakeup_cpu =
  886. cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
  887. if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
  888. idle_cpu(cpu) && idle_cpu(this_cpu) &&
  889. p->mm && !(p->flags & PF_KTHREAD) &&
  890. cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
  891. return chosen_wakeup_cpu;
  892. /*
  893. * If it is idle, then it is the best cpu to run this task.
  894. *
  895. * This cpu is also the best, if it has more than one task already.
  896. * Siblings must be also busy(in most cases) as they didn't already
  897. * pickup the extra load from this cpu and hence we need not check
  898. * sibling runqueue info. This will avoid the checks and cache miss
  899. * penalities associated with that.
  900. */
  901. if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
  902. return cpu;
  903. for_each_domain(cpu, sd) {
  904. if ((sd->flags & SD_WAKE_IDLE)
  905. || ((sd->flags & SD_WAKE_IDLE_FAR)
  906. && !task_hot(p, task_rq(p)->clock, sd))) {
  907. for_each_cpu_and(i, sched_domain_span(sd),
  908. &p->cpus_allowed) {
  909. if (cpu_active(i) && idle_cpu(i)) {
  910. if (i != task_cpu(p)) {
  911. schedstat_inc(p,
  912. se.nr_wakeups_idle);
  913. }
  914. return i;
  915. }
  916. }
  917. } else {
  918. break;
  919. }
  920. }
  921. return cpu;
  922. }
  923. #else /* !ARCH_HAS_SCHED_WAKE_IDLE*/
  924. static inline int wake_idle(int cpu, struct task_struct *p)
  925. {
  926. return cpu;
  927. }
  928. #endif
  929. #ifdef CONFIG_SMP
  930. #ifdef CONFIG_FAIR_GROUP_SCHED
  931. /*
  932. * effective_load() calculates the load change as seen from the root_task_group
  933. *
  934. * Adding load to a group doesn't make a group heavier, but can cause movement
  935. * of group shares between cpus. Assuming the shares were perfectly aligned one
  936. * can calculate the shift in shares.
  937. *
  938. * The problem is that perfectly aligning the shares is rather expensive, hence
  939. * we try to avoid doing that too often - see update_shares(), which ratelimits
  940. * this change.
  941. *
  942. * We compensate this by not only taking the current delta into account, but
  943. * also considering the delta between when the shares were last adjusted and
  944. * now.
  945. *
  946. * We still saw a performance dip, some tracing learned us that between
  947. * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
  948. * significantly. Therefore try to bias the error in direction of failing
  949. * the affine wakeup.
  950. *
  951. */
  952. static long effective_load(struct task_group *tg, int cpu,
  953. long wl, long wg)
  954. {
  955. struct sched_entity *se = tg->se[cpu];
  956. if (!tg->parent)
  957. return wl;
  958. /*
  959. * By not taking the decrease of shares on the other cpu into
  960. * account our error leans towards reducing the affine wakeups.
  961. */
  962. if (!wl && sched_feat(ASYM_EFF_LOAD))
  963. return wl;
  964. for_each_sched_entity(se) {
  965. long S, rw, s, a, b;
  966. long more_w;
  967. /*
  968. * Instead of using this increment, also add the difference
  969. * between when the shares were last updated and now.
  970. */
  971. more_w = se->my_q->load.weight - se->my_q->rq_weight;
  972. wl += more_w;
  973. wg += more_w;
  974. S = se->my_q->tg->shares;
  975. s = se->my_q->shares;
  976. rw = se->my_q->rq_weight;
  977. a = S*(rw + wl);
  978. b = S*rw + s*wg;
  979. wl = s*(a-b);
  980. if (likely(b))
  981. wl /= b;
  982. /*
  983. * Assume the group is already running and will
  984. * thus already be accounted for in the weight.
  985. *
  986. * That is, moving shares between CPUs, does not
  987. * alter the group weight.
  988. */
  989. wg = 0;
  990. }
  991. return wl;
  992. }
  993. #else
  994. static inline unsigned long effective_load(struct task_group *tg, int cpu,
  995. unsigned long wl, unsigned long wg)
  996. {
  997. return wl;
  998. }
  999. #endif
  1000. static int
  1001. wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
  1002. struct task_struct *p, int prev_cpu, int this_cpu, int sync,
  1003. int idx, unsigned long load, unsigned long this_load,
  1004. unsigned int imbalance)
  1005. {
  1006. struct task_struct *curr = this_rq->curr;
  1007. struct task_group *tg;
  1008. unsigned long tl = this_load;
  1009. unsigned long tl_per_task;
  1010. unsigned long weight;
  1011. int balanced;
  1012. if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
  1013. return 0;
  1014. if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
  1015. p->se.avg_overlap > sysctl_sched_migration_cost))
  1016. sync = 0;
  1017. /*
  1018. * If sync wakeup then subtract the (maximum possible)
  1019. * effect of the currently running task from the load
  1020. * of the current CPU:
  1021. */
  1022. if (sync) {
  1023. tg = task_group(current);
  1024. weight = current->se.load.weight;
  1025. tl += effective_load(tg, this_cpu, -weight, -weight);
  1026. load += effective_load(tg, prev_cpu, 0, -weight);
  1027. }
  1028. tg = task_group(p);
  1029. weight = p->se.load.weight;
  1030. balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
  1031. imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
  1032. /*
  1033. * If the currently running task will sleep within
  1034. * a reasonable amount of time then attract this newly
  1035. * woken task:
  1036. */
  1037. if (sync && balanced)
  1038. return 1;
  1039. schedstat_inc(p, se.nr_wakeups_affine_attempts);
  1040. tl_per_task = cpu_avg_load_per_task(this_cpu);
  1041. if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
  1042. tl_per_task)) {
  1043. /*
  1044. * This domain has SD_WAKE_AFFINE and
  1045. * p is cache cold in this domain, and
  1046. * there is no bad imbalance.
  1047. */
  1048. schedstat_inc(this_sd, ttwu_move_affine);
  1049. schedstat_inc(p, se.nr_wakeups_affine);
  1050. return 1;
  1051. }
  1052. return 0;
  1053. }
  1054. static int select_task_rq_fair(struct task_struct *p, int sync)
  1055. {
  1056. struct sched_domain *sd, *this_sd = NULL;
  1057. int prev_cpu, this_cpu, new_cpu;
  1058. unsigned long load, this_load;
  1059. struct rq *this_rq;
  1060. unsigned int imbalance;
  1061. int idx;
  1062. prev_cpu = task_cpu(p);
  1063. this_cpu = smp_processor_id();
  1064. this_rq = cpu_rq(this_cpu);
  1065. new_cpu = prev_cpu;
  1066. if (prev_cpu == this_cpu)
  1067. goto out;
  1068. /*
  1069. * 'this_sd' is the first domain that both
  1070. * this_cpu and prev_cpu are present in:
  1071. */
  1072. for_each_domain(this_cpu, sd) {
  1073. if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
  1074. this_sd = sd;
  1075. break;
  1076. }
  1077. }
  1078. if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
  1079. goto out;
  1080. /*
  1081. * Check for affine wakeup and passive balancing possibilities.
  1082. */
  1083. if (!this_sd)
  1084. goto out;
  1085. idx = this_sd->wake_idx;
  1086. imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
  1087. load = source_load(prev_cpu, idx);
  1088. this_load = target_load(this_cpu, idx);
  1089. if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
  1090. load, this_load, imbalance))
  1091. return this_cpu;
  1092. /*
  1093. * Start passive balancing when half the imbalance_pct
  1094. * limit is reached.
  1095. */
  1096. if (this_sd->flags & SD_WAKE_BALANCE) {
  1097. if (imbalance*this_load <= 100*load) {
  1098. schedstat_inc(this_sd, ttwu_move_balance);
  1099. schedstat_inc(p, se.nr_wakeups_passive);
  1100. return this_cpu;
  1101. }
  1102. }
  1103. out:
  1104. return wake_idle(new_cpu, p);
  1105. }
  1106. #endif /* CONFIG_SMP */
  1107. /*
  1108. * Adaptive granularity
  1109. *
  1110. * se->avg_wakeup gives the average time a task runs until it does a wakeup,
  1111. * with the limit of wakeup_gran -- when it never does a wakeup.
  1112. *
  1113. * So the smaller avg_wakeup is the faster we want this task to preempt,
  1114. * but we don't want to treat the preemptee unfairly and therefore allow it
  1115. * to run for at least the amount of time we'd like to run.
  1116. *
  1117. * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
  1118. *
  1119. * NOTE: we use *nr_running to scale with load, this nicely matches the
  1120. * degrading latency on load.
  1121. */
  1122. static unsigned long
  1123. adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
  1124. {
  1125. u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  1126. u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
  1127. u64 gran = 0;
  1128. if (this_run < expected_wakeup)
  1129. gran = expected_wakeup - this_run;
  1130. return min_t(s64, gran, sysctl_sched_wakeup_granularity);
  1131. }
  1132. static unsigned long
  1133. wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
  1134. {
  1135. unsigned long gran = sysctl_sched_wakeup_granularity;
  1136. if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
  1137. gran = adaptive_gran(curr, se);
  1138. /*
  1139. * Since its curr running now, convert the gran from real-time
  1140. * to virtual-time in his units.
  1141. */
  1142. if (sched_feat(ASYM_GRAN)) {
  1143. /*
  1144. * By using 'se' instead of 'curr' we penalize light tasks, so
  1145. * they get preempted easier. That is, if 'se' < 'curr' then
  1146. * the resulting gran will be larger, therefore penalizing the
  1147. * lighter, if otoh 'se' > 'curr' then the resulting gran will
  1148. * be smaller, again penalizing the lighter task.
  1149. *
  1150. * This is especially important for buddies when the leftmost
  1151. * task is higher priority than the buddy.
  1152. */
  1153. if (unlikely(se->load.weight != NICE_0_LOAD))
  1154. gran = calc_delta_fair(gran, se);
  1155. } else {
  1156. if (unlikely(curr->load.weight != NICE_0_LOAD))
  1157. gran = calc_delta_fair(gran, curr);
  1158. }
  1159. return gran;
  1160. }
  1161. /*
  1162. * Should 'se' preempt 'curr'.
  1163. *
  1164. * |s1
  1165. * |s2
  1166. * |s3
  1167. * g
  1168. * |<--->|c
  1169. *
  1170. * w(c, s1) = -1
  1171. * w(c, s2) = 0
  1172. * w(c, s3) = 1
  1173. *
  1174. */
  1175. static int
  1176. wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
  1177. {
  1178. s64 gran, vdiff = curr->vruntime - se->vruntime;
  1179. if (vdiff <= 0)
  1180. return -1;
  1181. gran = wakeup_gran(curr, se);
  1182. if (vdiff > gran)
  1183. return 1;
  1184. return 0;
  1185. }
  1186. static void set_last_buddy(struct sched_entity *se)
  1187. {
  1188. if (likely(task_of(se)->policy != SCHED_IDLE)) {
  1189. for_each_sched_entity(se)
  1190. cfs_rq_of(se)->last = se;
  1191. }
  1192. }
  1193. static void set_next_buddy(struct sched_entity *se)
  1194. {
  1195. if (likely(task_of(se)->policy != SCHED_IDLE)) {
  1196. for_each_sched_entity(se)
  1197. cfs_rq_of(se)->next = se;
  1198. }
  1199. }
  1200. /*
  1201. * Preempt the current task with a newly woken task if needed:
  1202. */
  1203. static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
  1204. {
  1205. struct task_struct *curr = rq->curr;
  1206. struct sched_entity *se = &curr->se, *pse = &p->se;
  1207. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  1208. update_curr(cfs_rq);
  1209. if (unlikely(rt_prio(p->prio))) {
  1210. resched_task(curr);
  1211. return;
  1212. }
  1213. if (unlikely(p->sched_class != &fair_sched_class))
  1214. return;
  1215. if (unlikely(se == pse))
  1216. return;
  1217. /*
  1218. * Only set the backward buddy when the current task is still on the
  1219. * rq. This can happen when a wakeup gets interleaved with schedule on
  1220. * the ->pre_schedule() or idle_balance() point, either of which can
  1221. * drop the rq lock.
  1222. *
  1223. * Also, during early boot the idle thread is in the fair class, for
  1224. * obvious reasons its a bad idea to schedule back to the idle thread.
  1225. */
  1226. if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
  1227. set_last_buddy(se);
  1228. set_next_buddy(pse);
  1229. /*
  1230. * We can come here with TIF_NEED_RESCHED already set from new task
  1231. * wake up path.
  1232. */
  1233. if (test_tsk_need_resched(curr))
  1234. return;
  1235. /*
  1236. * Batch and idle tasks do not preempt (their preemption is driven by
  1237. * the tick):
  1238. */
  1239. if (unlikely(p->policy != SCHED_NORMAL))
  1240. return;
  1241. /* Idle tasks are by definition preempted by everybody. */
  1242. if (unlikely(curr->policy == SCHED_IDLE)) {
  1243. resched_task(curr);
  1244. return;
  1245. }
  1246. if (!sched_feat(WAKEUP_PREEMPT))
  1247. return;
  1248. if (sched_feat(WAKEUP_OVERLAP) && (sync ||
  1249. (se->avg_overlap < sysctl_sched_migration_cost &&
  1250. pse->avg_overlap < sysctl_sched_migration_cost))) {
  1251. resched_task(curr);
  1252. return;
  1253. }
  1254. find_matching_se(&se, &pse);
  1255. BUG_ON(!pse);
  1256. if (wakeup_preempt_entity(se, pse) == 1)
  1257. resched_task(curr);
  1258. }
  1259. static struct task_struct *pick_next_task_fair(struct rq *rq)
  1260. {
  1261. struct task_struct *p;
  1262. struct cfs_rq *cfs_rq = &rq->cfs;
  1263. struct sched_entity *se;
  1264. if (unlikely(!cfs_rq->nr_running))
  1265. return NULL;
  1266. do {
  1267. se = pick_next_entity(cfs_rq);
  1268. /*
  1269. * If se was a buddy, clear it so that it will have to earn
  1270. * the favour again.
  1271. */
  1272. __clear_buddies(cfs_rq, se);
  1273. set_next_entity(cfs_rq, se);
  1274. cfs_rq = group_cfs_rq(se);
  1275. } while (cfs_rq);
  1276. p = task_of(se);
  1277. hrtick_start_fair(rq, p);
  1278. return p;
  1279. }
  1280. /*
  1281. * Account for a descheduled task:
  1282. */
  1283. static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
  1284. {
  1285. struct sched_entity *se = &prev->se;
  1286. struct cfs_rq *cfs_rq;
  1287. for_each_sched_entity(se) {
  1288. cfs_rq = cfs_rq_of(se);
  1289. put_prev_entity(cfs_rq, se);
  1290. }
  1291. }
  1292. #ifdef CONFIG_SMP
  1293. /**************************************************
  1294. * Fair scheduling class load-balancing methods:
  1295. */
  1296. /*
  1297. * Load-balancing iterator. Note: while the runqueue stays locked
  1298. * during the whole iteration, the current task might be
  1299. * dequeued so the iterator has to be dequeue-safe. Here we
  1300. * achieve that by always pre-iterating before returning
  1301. * the current task:
  1302. */
  1303. static struct task_struct *
  1304. __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
  1305. {
  1306. struct task_struct *p = NULL;
  1307. struct sched_entity *se;
  1308. if (next == &cfs_rq->tasks)
  1309. return NULL;
  1310. se = list_entry(next, struct sched_entity, group_node);
  1311. p = task_of(se);
  1312. cfs_rq->balance_iterator = next->next;
  1313. return p;
  1314. }
  1315. static struct task_struct *load_balance_start_fair(void *arg)
  1316. {
  1317. struct cfs_rq *cfs_rq = arg;
  1318. return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
  1319. }
  1320. static struct task_struct *load_balance_next_fair(void *arg)
  1321. {
  1322. struct cfs_rq *cfs_rq = arg;
  1323. return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
  1324. }
  1325. static unsigned long
  1326. __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1327. unsigned long max_load_move, struct sched_domain *sd,
  1328. enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
  1329. struct cfs_rq *cfs_rq)
  1330. {
  1331. struct rq_iterator cfs_rq_iterator;
  1332. cfs_rq_iterator.start = load_balance_start_fair;
  1333. cfs_rq_iterator.next = load_balance_next_fair;
  1334. cfs_rq_iterator.arg = cfs_rq;
  1335. return balance_tasks(this_rq, this_cpu, busiest,
  1336. max_load_move, sd, idle, all_pinned,
  1337. this_best_prio, &cfs_rq_iterator);
  1338. }
  1339. #ifdef CONFIG_FAIR_GROUP_SCHED
  1340. static unsigned long
  1341. load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1342. unsigned long max_load_move,
  1343. struct sched_domain *sd, enum cpu_idle_type idle,
  1344. int *all_pinned, int *this_best_prio)
  1345. {
  1346. long rem_load_move = max_load_move;
  1347. int busiest_cpu = cpu_of(busiest);
  1348. struct task_group *tg;
  1349. rcu_read_lock();
  1350. update_h_load(busiest_cpu);
  1351. list_for_each_entry_rcu(tg, &task_groups, list) {
  1352. struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
  1353. unsigned long busiest_h_load = busiest_cfs_rq->h_load;
  1354. unsigned long busiest_weight = busiest_cfs_rq->load.weight;
  1355. u64 rem_load, moved_load;
  1356. /*
  1357. * empty group
  1358. */
  1359. if (!busiest_cfs_rq->task_weight)
  1360. continue;
  1361. rem_load = (u64)rem_load_move * busiest_weight;
  1362. rem_load = div_u64(rem_load, busiest_h_load + 1);
  1363. moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
  1364. rem_load, sd, idle, all_pinned, this_best_prio,
  1365. tg->cfs_rq[busiest_cpu]);
  1366. if (!moved_load)
  1367. continue;
  1368. moved_load *= busiest_h_load;
  1369. moved_load = div_u64(moved_load, busiest_weight + 1);
  1370. rem_load_move -= moved_load;
  1371. if (rem_load_move < 0)
  1372. break;
  1373. }
  1374. rcu_read_unlock();
  1375. return max_load_move - rem_load_move;
  1376. }
  1377. #else
  1378. static unsigned long
  1379. load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1380. unsigned long max_load_move,
  1381. struct sched_domain *sd, enum cpu_idle_type idle,
  1382. int *all_pinned, int *this_best_prio)
  1383. {
  1384. return __load_balance_fair(this_rq, this_cpu, busiest,
  1385. max_load_move, sd, idle, all_pinned,
  1386. this_best_prio, &busiest->cfs);
  1387. }
  1388. #endif
  1389. static int
  1390. move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1391. struct sched_domain *sd, enum cpu_idle_type idle)
  1392. {
  1393. struct cfs_rq *busy_cfs_rq;
  1394. struct rq_iterator cfs_rq_iterator;
  1395. cfs_rq_iterator.start = load_balance_start_fair;
  1396. cfs_rq_iterator.next = load_balance_next_fair;
  1397. for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
  1398. /*
  1399. * pass busy_cfs_rq argument into
  1400. * load_balance_[start|next]_fair iterators
  1401. */
  1402. cfs_rq_iterator.arg = busy_cfs_rq;
  1403. if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
  1404. &cfs_rq_iterator))
  1405. return 1;
  1406. }
  1407. return 0;
  1408. }
  1409. #endif /* CONFIG_SMP */
  1410. /*
  1411. * scheduler tick hitting a task of our scheduling class:
  1412. */
  1413. static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
  1414. {
  1415. struct cfs_rq *cfs_rq;
  1416. struct sched_entity *se = &curr->se;
  1417. for_each_sched_entity(se) {
  1418. cfs_rq = cfs_rq_of(se);
  1419. entity_tick(cfs_rq, se, queued);
  1420. }
  1421. }
  1422. /*
  1423. * Share the fairness runtime between parent and child, thus the
  1424. * total amount of pressure for CPU stays equal - new tasks
  1425. * get a chance to run but frequent forkers are not allowed to
  1426. * monopolize the CPU. Note: the parent runqueue is locked,
  1427. * the child is not running yet.
  1428. */
  1429. static void task_new_fair(struct rq *rq, struct task_struct *p)
  1430. {
  1431. struct cfs_rq *cfs_rq = task_cfs_rq(p);
  1432. struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
  1433. int this_cpu = smp_processor_id();
  1434. sched_info_queued(p);
  1435. update_curr(cfs_rq);
  1436. place_entity(cfs_rq, se, 1);
  1437. /* 'curr' will be NULL if the child belongs to a different group */
  1438. if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
  1439. curr && entity_before(curr, se)) {
  1440. /*
  1441. * Upon rescheduling, sched_class::put_prev_task() will place
  1442. * 'current' within the tree based on its new key value.
  1443. */
  1444. swap(curr->vruntime, se->vruntime);
  1445. resched_task(rq->curr);
  1446. }
  1447. enqueue_task_fair(rq, p, 0);
  1448. }
  1449. /*
  1450. * Priority of the task has changed. Check to see if we preempt
  1451. * the current task.
  1452. */
  1453. static void prio_changed_fair(struct rq *rq, struct task_struct *p,
  1454. int oldprio, int running)
  1455. {
  1456. /*
  1457. * Reschedule if we are currently running on this runqueue and
  1458. * our priority decreased, or if we are not currently running on
  1459. * this runqueue and our priority is higher than the current's
  1460. */
  1461. if (running) {
  1462. if (p->prio > oldprio)
  1463. resched_task(rq->curr);
  1464. } else
  1465. check_preempt_curr(rq, p, 0);
  1466. }
  1467. /*
  1468. * We switched to the sched_fair class.
  1469. */
  1470. static void switched_to_fair(struct rq *rq, struct task_struct *p,
  1471. int running)
  1472. {
  1473. /*
  1474. * We were most likely switched from sched_rt, so
  1475. * kick off the schedule if running, otherwise just see
  1476. * if we can still preempt the current task.
  1477. */
  1478. if (running)
  1479. resched_task(rq->curr);
  1480. else
  1481. check_preempt_curr(rq, p, 0);
  1482. }
  1483. /* Account for a task changing its policy or group.
  1484. *
  1485. * This routine is mostly called to set cfs_rq->curr field when a task
  1486. * migrates between groups/classes.
  1487. */
  1488. static void set_curr_task_fair(struct rq *rq)
  1489. {
  1490. struct sched_entity *se = &rq->curr->se;
  1491. for_each_sched_entity(se)
  1492. set_next_entity(cfs_rq_of(se), se);
  1493. }
  1494. #ifdef CONFIG_FAIR_GROUP_SCHED
  1495. static void moved_group_fair(struct task_struct *p)
  1496. {
  1497. struct cfs_rq *cfs_rq = task_cfs_rq(p);
  1498. update_curr(cfs_rq);
  1499. place_entity(cfs_rq, &p->se, 1);
  1500. }
  1501. #endif
  1502. /*
  1503. * All the scheduling class methods:
  1504. */
  1505. static const struct sched_class fair_sched_class = {
  1506. .next = &idle_sched_class,
  1507. .enqueue_task = enqueue_task_fair,
  1508. .dequeue_task = dequeue_task_fair,
  1509. .yield_task = yield_task_fair,
  1510. .check_preempt_curr = check_preempt_wakeup,
  1511. .pick_next_task = pick_next_task_fair,
  1512. .put_prev_task = put_prev_task_fair,
  1513. #ifdef CONFIG_SMP
  1514. .select_task_rq = select_task_rq_fair,
  1515. .load_balance = load_balance_fair,
  1516. .move_one_task = move_one_task_fair,
  1517. #endif
  1518. .set_curr_task = set_curr_task_fair,
  1519. .task_tick = task_tick_fair,
  1520. .task_new = task_new_fair,
  1521. .prio_changed = prio_changed_fair,
  1522. .switched_to = switched_to_fair,
  1523. #ifdef CONFIG_FAIR_GROUP_SCHED
  1524. .moved_group = moved_group_fair,
  1525. #endif
  1526. };
  1527. #ifdef CONFIG_SCHED_DEBUG
  1528. static void print_cfs_stats(struct seq_file *m, int cpu)
  1529. {
  1530. struct cfs_rq *cfs_rq;
  1531. rcu_read_lock();
  1532. for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
  1533. print_cfs_rq(m, cpu, cfs_rq);
  1534. rcu_read_unlock();
  1535. }
  1536. #endif