sched_fair.c 128 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008
  1. /*
  2. * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. *
  6. * Interactivity improvements by Mike Galbraith
  7. * (C) 2007 Mike Galbraith <efault@gmx.de>
  8. *
  9. * Various enhancements by Dmitry Adamushko.
  10. * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  11. *
  12. * Group scheduling enhancements by Srivatsa Vaddagiri
  13. * Copyright IBM Corporation, 2007
  14. * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  15. *
  16. * Scaled math optimizations by Thomas Gleixner
  17. * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  18. *
  19. * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  20. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  21. */
  22. #include <linux/latencytop.h>
  23. #include <linux/sched.h>
  24. #include <linux/cpumask.h>
  25. /*
  26. * Targeted preemption latency for CPU-bound tasks:
  27. * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
  28. *
  29. * NOTE: this latency value is not the same as the concept of
  30. * 'timeslice length' - timeslices in CFS are of variable length
  31. * and have no persistent notion like in traditional, time-slice
  32. * based scheduling concepts.
  33. *
  34. * (to see the precise effective timeslice length of your workload,
  35. * run vmstat and monitor the context-switches (cs) field)
  36. */
  37. unsigned int sysctl_sched_latency = 6000000ULL;
  38. unsigned int normalized_sysctl_sched_latency = 6000000ULL;
  39. /*
  40. * The initial- and re-scaling of tunables is configurable
  41. * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
  42. *
  43. * Options are:
  44. * SCHED_TUNABLESCALING_NONE - unscaled, always *1
  45. * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
  46. * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
  47. */
  48. enum sched_tunable_scaling sysctl_sched_tunable_scaling
  49. = SCHED_TUNABLESCALING_LOG;
  50. /*
  51. * Minimal preemption granularity for CPU-bound tasks:
  52. * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
  53. */
  54. unsigned int sysctl_sched_min_granularity = 750000ULL;
  55. unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
  56. /*
  57. * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  58. */
  59. static unsigned int sched_nr_latency = 8;
  60. /*
  61. * After fork, child runs first. If set to 0 (default) then
  62. * parent will (try to) run first.
  63. */
  64. unsigned int sysctl_sched_child_runs_first __read_mostly;
  65. /*
  66. * SCHED_OTHER wake-up granularity.
  67. * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  68. *
  69. * This option delays the preemption effects of decoupled workloads
  70. * and reduces their over-scheduling. Synchronous workloads will still
  71. * have immediate wakeup/sleep latencies.
  72. */
  73. unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
  74. unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
  75. const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  76. /*
  77. * The exponential sliding window over which load is averaged for shares
  78. * distribution.
  79. * (default: 10msec)
  80. */
  81. unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
  82. #ifdef CONFIG_CFS_BANDWIDTH
  83. /*
  84. * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
  85. * each time a cfs_rq requests quota.
  86. *
  87. * Note: in the case that the slice exceeds the runtime remaining (either due
  88. * to consumption or the quota being specified to be smaller than the slice)
  89. * we will always only issue the remaining available time.
  90. *
  91. * default: 5 msec, units: microseconds
  92. */
  93. unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
  94. #endif
  95. static const struct sched_class fair_sched_class;
  96. /**************************************************************
  97. * CFS operations on generic schedulable entities:
  98. */
  99. #ifdef CONFIG_FAIR_GROUP_SCHED
  100. /* cpu runqueue to which this cfs_rq is attached */
  101. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  102. {
  103. return cfs_rq->rq;
  104. }
  105. /* An entity is a task if it doesn't "own" a runqueue */
  106. #define entity_is_task(se) (!se->my_q)
  107. static inline struct task_struct *task_of(struct sched_entity *se)
  108. {
  109. #ifdef CONFIG_SCHED_DEBUG
  110. WARN_ON_ONCE(!entity_is_task(se));
  111. #endif
  112. return container_of(se, struct task_struct, se);
  113. }
  114. /* Walk up scheduling entities hierarchy */
  115. #define for_each_sched_entity(se) \
  116. for (; se; se = se->parent)
  117. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  118. {
  119. return p->se.cfs_rq;
  120. }
  121. /* runqueue on which this entity is (to be) queued */
  122. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  123. {
  124. return se->cfs_rq;
  125. }
  126. /* runqueue "owned" by this group */
  127. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  128. {
  129. return grp->my_q;
  130. }
  131. static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  132. {
  133. if (!cfs_rq->on_list) {
  134. /*
  135. * Ensure we either appear before our parent (if already
  136. * enqueued) or force our parent to appear after us when it is
  137. * enqueued. The fact that we always enqueue bottom-up
  138. * reduces this to two cases.
  139. */
  140. if (cfs_rq->tg->parent &&
  141. cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
  142. list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
  143. &rq_of(cfs_rq)->leaf_cfs_rq_list);
  144. } else {
  145. list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
  146. &rq_of(cfs_rq)->leaf_cfs_rq_list);
  147. }
  148. cfs_rq->on_list = 1;
  149. }
  150. }
  151. static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  152. {
  153. if (cfs_rq->on_list) {
  154. list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
  155. cfs_rq->on_list = 0;
  156. }
  157. }
  158. /* Iterate thr' all leaf cfs_rq's on a runqueue */
  159. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  160. list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
  161. /* Do the two (enqueued) entities belong to the same group ? */
  162. static inline int
  163. is_same_group(struct sched_entity *se, struct sched_entity *pse)
  164. {
  165. if (se->cfs_rq == pse->cfs_rq)
  166. return 1;
  167. return 0;
  168. }
  169. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  170. {
  171. return se->parent;
  172. }
  173. /* return depth at which a sched entity is present in the hierarchy */
  174. static inline int depth_se(struct sched_entity *se)
  175. {
  176. int depth = 0;
  177. for_each_sched_entity(se)
  178. depth++;
  179. return depth;
  180. }
  181. static void
  182. find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  183. {
  184. int se_depth, pse_depth;
  185. /*
  186. * preemption test can be made between sibling entities who are in the
  187. * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
  188. * both tasks until we find their ancestors who are siblings of common
  189. * parent.
  190. */
  191. /* First walk up until both entities are at same depth */
  192. se_depth = depth_se(*se);
  193. pse_depth = depth_se(*pse);
  194. while (se_depth > pse_depth) {
  195. se_depth--;
  196. *se = parent_entity(*se);
  197. }
  198. while (pse_depth > se_depth) {
  199. pse_depth--;
  200. *pse = parent_entity(*pse);
  201. }
  202. while (!is_same_group(*se, *pse)) {
  203. *se = parent_entity(*se);
  204. *pse = parent_entity(*pse);
  205. }
  206. }
  207. #else /* !CONFIG_FAIR_GROUP_SCHED */
  208. static inline struct task_struct *task_of(struct sched_entity *se)
  209. {
  210. return container_of(se, struct task_struct, se);
  211. }
  212. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  213. {
  214. return container_of(cfs_rq, struct rq, cfs);
  215. }
  216. #define entity_is_task(se) 1
  217. #define for_each_sched_entity(se) \
  218. for (; se; se = NULL)
  219. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  220. {
  221. return &task_rq(p)->cfs;
  222. }
  223. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  224. {
  225. struct task_struct *p = task_of(se);
  226. struct rq *rq = task_rq(p);
  227. return &rq->cfs;
  228. }
  229. /* runqueue "owned" by this group */
  230. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  231. {
  232. return NULL;
  233. }
  234. static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  235. {
  236. }
  237. static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  238. {
  239. }
  240. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  241. for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
  242. static inline int
  243. is_same_group(struct sched_entity *se, struct sched_entity *pse)
  244. {
  245. return 1;
  246. }
  247. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  248. {
  249. return NULL;
  250. }
  251. static inline void
  252. find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  253. {
  254. }
  255. #endif /* CONFIG_FAIR_GROUP_SCHED */
  256. static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
  257. unsigned long delta_exec);
  258. /**************************************************************
  259. * Scheduling class tree data structure manipulation methods:
  260. */
  261. static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
  262. {
  263. s64 delta = (s64)(vruntime - min_vruntime);
  264. if (delta > 0)
  265. min_vruntime = vruntime;
  266. return min_vruntime;
  267. }
  268. static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
  269. {
  270. s64 delta = (s64)(vruntime - min_vruntime);
  271. if (delta < 0)
  272. min_vruntime = vruntime;
  273. return min_vruntime;
  274. }
  275. static inline int entity_before(struct sched_entity *a,
  276. struct sched_entity *b)
  277. {
  278. return (s64)(a->vruntime - b->vruntime) < 0;
  279. }
  280. static void update_min_vruntime(struct cfs_rq *cfs_rq)
  281. {
  282. u64 vruntime = cfs_rq->min_vruntime;
  283. if (cfs_rq->curr)
  284. vruntime = cfs_rq->curr->vruntime;
  285. if (cfs_rq->rb_leftmost) {
  286. struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
  287. struct sched_entity,
  288. run_node);
  289. if (!cfs_rq->curr)
  290. vruntime = se->vruntime;
  291. else
  292. vruntime = min_vruntime(vruntime, se->vruntime);
  293. }
  294. cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
  295. #ifndef CONFIG_64BIT
  296. smp_wmb();
  297. cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
  298. #endif
  299. }
  300. /*
  301. * Enqueue an entity into the rb-tree:
  302. */
  303. static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  304. {
  305. struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
  306. struct rb_node *parent = NULL;
  307. struct sched_entity *entry;
  308. int leftmost = 1;
  309. /*
  310. * Find the right place in the rbtree:
  311. */
  312. while (*link) {
  313. parent = *link;
  314. entry = rb_entry(parent, struct sched_entity, run_node);
  315. /*
  316. * We dont care about collisions. Nodes with
  317. * the same key stay together.
  318. */
  319. if (entity_before(se, entry)) {
  320. link = &parent->rb_left;
  321. } else {
  322. link = &parent->rb_right;
  323. leftmost = 0;
  324. }
  325. }
  326. /*
  327. * Maintain a cache of leftmost tree entries (it is frequently
  328. * used):
  329. */
  330. if (leftmost)
  331. cfs_rq->rb_leftmost = &se->run_node;
  332. rb_link_node(&se->run_node, parent, link);
  333. rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
  334. }
  335. static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  336. {
  337. if (cfs_rq->rb_leftmost == &se->run_node) {
  338. struct rb_node *next_node;
  339. next_node = rb_next(&se->run_node);
  340. cfs_rq->rb_leftmost = next_node;
  341. }
  342. rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
  343. }
  344. static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
  345. {
  346. struct rb_node *left = cfs_rq->rb_leftmost;
  347. if (!left)
  348. return NULL;
  349. return rb_entry(left, struct sched_entity, run_node);
  350. }
  351. static struct sched_entity *__pick_next_entity(struct sched_entity *se)
  352. {
  353. struct rb_node *next = rb_next(&se->run_node);
  354. if (!next)
  355. return NULL;
  356. return rb_entry(next, struct sched_entity, run_node);
  357. }
  358. #ifdef CONFIG_SCHED_DEBUG
  359. static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  360. {
  361. struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
  362. if (!last)
  363. return NULL;
  364. return rb_entry(last, struct sched_entity, run_node);
  365. }
  366. /**************************************************************
  367. * Scheduling class statistics methods:
  368. */
  369. int sched_proc_update_handler(struct ctl_table *table, int write,
  370. void __user *buffer, size_t *lenp,
  371. loff_t *ppos)
  372. {
  373. int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  374. int factor = get_update_sysctl_factor();
  375. if (ret || !write)
  376. return ret;
  377. sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
  378. sysctl_sched_min_granularity);
  379. #define WRT_SYSCTL(name) \
  380. (normalized_sysctl_##name = sysctl_##name / (factor))
  381. WRT_SYSCTL(sched_min_granularity);
  382. WRT_SYSCTL(sched_latency);
  383. WRT_SYSCTL(sched_wakeup_granularity);
  384. #undef WRT_SYSCTL
  385. return 0;
  386. }
  387. #endif
  388. /*
  389. * delta /= w
  390. */
  391. static inline unsigned long
  392. calc_delta_fair(unsigned long delta, struct sched_entity *se)
  393. {
  394. if (unlikely(se->load.weight != NICE_0_LOAD))
  395. delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
  396. return delta;
  397. }
  398. /*
  399. * The idea is to set a period in which each task runs once.
  400. *
  401. * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
  402. * this period because otherwise the slices get too small.
  403. *
  404. * p = (nr <= nl) ? l : l*nr/nl
  405. */
  406. static u64 __sched_period(unsigned long nr_running)
  407. {
  408. u64 period = sysctl_sched_latency;
  409. unsigned long nr_latency = sched_nr_latency;
  410. if (unlikely(nr_running > nr_latency)) {
  411. period = sysctl_sched_min_granularity;
  412. period *= nr_running;
  413. }
  414. return period;
  415. }
  416. /*
  417. * We calculate the wall-time slice from the period by taking a part
  418. * proportional to the weight.
  419. *
  420. * s = p*P[w/rw]
  421. */
  422. static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  423. {
  424. u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
  425. for_each_sched_entity(se) {
  426. struct load_weight *load;
  427. struct load_weight lw;
  428. cfs_rq = cfs_rq_of(se);
  429. load = &cfs_rq->load;
  430. if (unlikely(!se->on_rq)) {
  431. lw = cfs_rq->load;
  432. update_load_add(&lw, se->load.weight);
  433. load = &lw;
  434. }
  435. slice = calc_delta_mine(slice, se->load.weight, load);
  436. }
  437. return slice;
  438. }
  439. /*
  440. * We calculate the vruntime slice of a to be inserted task
  441. *
  442. * vs = s/w
  443. */
  444. static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  445. {
  446. return calc_delta_fair(sched_slice(cfs_rq, se), se);
  447. }
  448. static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
  449. static void update_cfs_shares(struct cfs_rq *cfs_rq);
  450. /*
  451. * Update the current task's runtime statistics. Skip current tasks that
  452. * are not in our scheduling class.
  453. */
  454. static inline void
  455. __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
  456. unsigned long delta_exec)
  457. {
  458. unsigned long delta_exec_weighted;
  459. schedstat_set(curr->statistics.exec_max,
  460. max((u64)delta_exec, curr->statistics.exec_max));
  461. curr->sum_exec_runtime += delta_exec;
  462. schedstat_add(cfs_rq, exec_clock, delta_exec);
  463. delta_exec_weighted = calc_delta_fair(delta_exec, curr);
  464. curr->vruntime += delta_exec_weighted;
  465. update_min_vruntime(cfs_rq);
  466. #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
  467. cfs_rq->load_unacc_exec_time += delta_exec;
  468. #endif
  469. }
  470. static void update_curr(struct cfs_rq *cfs_rq)
  471. {
  472. struct sched_entity *curr = cfs_rq->curr;
  473. u64 now = rq_of(cfs_rq)->clock_task;
  474. unsigned long delta_exec;
  475. if (unlikely(!curr))
  476. return;
  477. /*
  478. * Get the amount of time the current task was running
  479. * since the last time we changed load (this cannot
  480. * overflow on 32 bits):
  481. */
  482. delta_exec = (unsigned long)(now - curr->exec_start);
  483. if (!delta_exec)
  484. return;
  485. __update_curr(cfs_rq, curr, delta_exec);
  486. curr->exec_start = now;
  487. if (entity_is_task(curr)) {
  488. struct task_struct *curtask = task_of(curr);
  489. trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
  490. cpuacct_charge(curtask, delta_exec);
  491. account_group_exec_runtime(curtask, delta_exec);
  492. }
  493. account_cfs_rq_runtime(cfs_rq, delta_exec);
  494. }
  495. static inline void
  496. update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  497. {
  498. schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
  499. }
  500. /*
  501. * Task is being enqueued - update stats:
  502. */
  503. static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  504. {
  505. /*
  506. * Are we enqueueing a waiting task? (for current tasks
  507. * a dequeue/enqueue event is a NOP)
  508. */
  509. if (se != cfs_rq->curr)
  510. update_stats_wait_start(cfs_rq, se);
  511. }
  512. static void
  513. update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
  514. {
  515. schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
  516. rq_of(cfs_rq)->clock - se->statistics.wait_start));
  517. schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
  518. schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
  519. rq_of(cfs_rq)->clock - se->statistics.wait_start);
  520. #ifdef CONFIG_SCHEDSTATS
  521. if (entity_is_task(se)) {
  522. trace_sched_stat_wait(task_of(se),
  523. rq_of(cfs_rq)->clock - se->statistics.wait_start);
  524. }
  525. #endif
  526. schedstat_set(se->statistics.wait_start, 0);
  527. }
  528. static inline void
  529. update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  530. {
  531. /*
  532. * Mark the end of the wait period if dequeueing a
  533. * waiting task:
  534. */
  535. if (se != cfs_rq->curr)
  536. update_stats_wait_end(cfs_rq, se);
  537. }
  538. /*
  539. * We are picking a new current task - update its stats:
  540. */
  541. static inline void
  542. update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  543. {
  544. /*
  545. * We are starting a new run period:
  546. */
  547. se->exec_start = rq_of(cfs_rq)->clock_task;
  548. }
  549. /**************************************************
  550. * Scheduling class queueing methods:
  551. */
  552. #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
  553. static void
  554. add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
  555. {
  556. cfs_rq->task_weight += weight;
  557. }
  558. #else
  559. static inline void
  560. add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
  561. {
  562. }
  563. #endif
  564. static void
  565. account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  566. {
  567. update_load_add(&cfs_rq->load, se->load.weight);
  568. if (!parent_entity(se))
  569. inc_cpu_load(rq_of(cfs_rq), se->load.weight);
  570. if (entity_is_task(se)) {
  571. add_cfs_task_weight(cfs_rq, se->load.weight);
  572. list_add(&se->group_node, &cfs_rq->tasks);
  573. }
  574. cfs_rq->nr_running++;
  575. }
  576. static void
  577. account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  578. {
  579. update_load_sub(&cfs_rq->load, se->load.weight);
  580. if (!parent_entity(se))
  581. dec_cpu_load(rq_of(cfs_rq), se->load.weight);
  582. if (entity_is_task(se)) {
  583. add_cfs_task_weight(cfs_rq, -se->load.weight);
  584. list_del_init(&se->group_node);
  585. }
  586. cfs_rq->nr_running--;
  587. }
  588. #ifdef CONFIG_FAIR_GROUP_SCHED
  589. /* we need this in update_cfs_load and load-balance functions below */
  590. static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
  591. # ifdef CONFIG_SMP
  592. static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
  593. int global_update)
  594. {
  595. struct task_group *tg = cfs_rq->tg;
  596. long load_avg;
  597. load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
  598. load_avg -= cfs_rq->load_contribution;
  599. if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
  600. atomic_add(load_avg, &tg->load_weight);
  601. cfs_rq->load_contribution += load_avg;
  602. }
  603. }
  604. static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
  605. {
  606. u64 period = sysctl_sched_shares_window;
  607. u64 now, delta;
  608. unsigned long load = cfs_rq->load.weight;
  609. if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq))
  610. return;
  611. now = rq_of(cfs_rq)->clock_task;
  612. delta = now - cfs_rq->load_stamp;
  613. /* truncate load history at 4 idle periods */
  614. if (cfs_rq->load_stamp > cfs_rq->load_last &&
  615. now - cfs_rq->load_last > 4 * period) {
  616. cfs_rq->load_period = 0;
  617. cfs_rq->load_avg = 0;
  618. delta = period - 1;
  619. }
  620. cfs_rq->load_stamp = now;
  621. cfs_rq->load_unacc_exec_time = 0;
  622. cfs_rq->load_period += delta;
  623. if (load) {
  624. cfs_rq->load_last = now;
  625. cfs_rq->load_avg += delta * load;
  626. }
  627. /* consider updating load contribution on each fold or truncate */
  628. if (global_update || cfs_rq->load_period > period
  629. || !cfs_rq->load_period)
  630. update_cfs_rq_load_contribution(cfs_rq, global_update);
  631. while (cfs_rq->load_period > period) {
  632. /*
  633. * Inline assembly required to prevent the compiler
  634. * optimising this loop into a divmod call.
  635. * See __iter_div_u64_rem() for another example of this.
  636. */
  637. asm("" : "+rm" (cfs_rq->load_period));
  638. cfs_rq->load_period /= 2;
  639. cfs_rq->load_avg /= 2;
  640. }
  641. if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
  642. list_del_leaf_cfs_rq(cfs_rq);
  643. }
  644. static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
  645. {
  646. long load_weight, load, shares;
  647. load = cfs_rq->load.weight;
  648. load_weight = atomic_read(&tg->load_weight);
  649. load_weight += load;
  650. load_weight -= cfs_rq->load_contribution;
  651. shares = (tg->shares * load);
  652. if (load_weight)
  653. shares /= load_weight;
  654. if (shares < MIN_SHARES)
  655. shares = MIN_SHARES;
  656. if (shares > tg->shares)
  657. shares = tg->shares;
  658. return shares;
  659. }
  660. static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
  661. {
  662. if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
  663. update_cfs_load(cfs_rq, 0);
  664. update_cfs_shares(cfs_rq);
  665. }
  666. }
  667. # else /* CONFIG_SMP */
  668. static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
  669. {
  670. }
  671. static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
  672. {
  673. return tg->shares;
  674. }
  675. static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
  676. {
  677. }
  678. # endif /* CONFIG_SMP */
  679. static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
  680. unsigned long weight)
  681. {
  682. if (se->on_rq) {
  683. /* commit outstanding execution time */
  684. if (cfs_rq->curr == se)
  685. update_curr(cfs_rq);
  686. account_entity_dequeue(cfs_rq, se);
  687. }
  688. update_load_set(&se->load, weight);
  689. if (se->on_rq)
  690. account_entity_enqueue(cfs_rq, se);
  691. }
  692. static void update_cfs_shares(struct cfs_rq *cfs_rq)
  693. {
  694. struct task_group *tg;
  695. struct sched_entity *se;
  696. long shares;
  697. tg = cfs_rq->tg;
  698. se = tg->se[cpu_of(rq_of(cfs_rq))];
  699. if (!se || throttled_hierarchy(cfs_rq))
  700. return;
  701. #ifndef CONFIG_SMP
  702. if (likely(se->load.weight == tg->shares))
  703. return;
  704. #endif
  705. shares = calc_cfs_shares(cfs_rq, tg);
  706. reweight_entity(cfs_rq_of(se), se, shares);
  707. }
  708. #else /* CONFIG_FAIR_GROUP_SCHED */
  709. static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
  710. {
  711. }
  712. static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
  713. {
  714. }
  715. static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
  716. {
  717. }
  718. #endif /* CONFIG_FAIR_GROUP_SCHED */
  719. static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
  720. {
  721. #ifdef CONFIG_SCHEDSTATS
  722. struct task_struct *tsk = NULL;
  723. if (entity_is_task(se))
  724. tsk = task_of(se);
  725. if (se->statistics.sleep_start) {
  726. u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
  727. if ((s64)delta < 0)
  728. delta = 0;
  729. if (unlikely(delta > se->statistics.sleep_max))
  730. se->statistics.sleep_max = delta;
  731. se->statistics.sleep_start = 0;
  732. se->statistics.sum_sleep_runtime += delta;
  733. if (tsk) {
  734. account_scheduler_latency(tsk, delta >> 10, 1);
  735. trace_sched_stat_sleep(tsk, delta);
  736. }
  737. }
  738. if (se->statistics.block_start) {
  739. u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
  740. if ((s64)delta < 0)
  741. delta = 0;
  742. if (unlikely(delta > se->statistics.block_max))
  743. se->statistics.block_max = delta;
  744. se->statistics.block_start = 0;
  745. se->statistics.sum_sleep_runtime += delta;
  746. if (tsk) {
  747. if (tsk->in_iowait) {
  748. se->statistics.iowait_sum += delta;
  749. se->statistics.iowait_count++;
  750. trace_sched_stat_iowait(tsk, delta);
  751. }
  752. /*
  753. * Blocking time is in units of nanosecs, so shift by
  754. * 20 to get a milliseconds-range estimation of the
  755. * amount of time that the task spent sleeping:
  756. */
  757. if (unlikely(prof_on == SLEEP_PROFILING)) {
  758. profile_hits(SLEEP_PROFILING,
  759. (void *)get_wchan(tsk),
  760. delta >> 20);
  761. }
  762. account_scheduler_latency(tsk, delta >> 10, 0);
  763. }
  764. }
  765. #endif
  766. }
  767. static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
  768. {
  769. #ifdef CONFIG_SCHED_DEBUG
  770. s64 d = se->vruntime - cfs_rq->min_vruntime;
  771. if (d < 0)
  772. d = -d;
  773. if (d > 3*sysctl_sched_latency)
  774. schedstat_inc(cfs_rq, nr_spread_over);
  775. #endif
  776. }
  777. static void
  778. place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
  779. {
  780. u64 vruntime = cfs_rq->min_vruntime;
  781. /*
  782. * The 'current' period is already promised to the current tasks,
  783. * however the extra weight of the new task will slow them down a
  784. * little, place the new task so that it fits in the slot that
  785. * stays open at the end.
  786. */
  787. if (initial && sched_feat(START_DEBIT))
  788. vruntime += sched_vslice(cfs_rq, se);
  789. /* sleeps up to a single latency don't count. */
  790. if (!initial) {
  791. unsigned long thresh = sysctl_sched_latency;
  792. /*
  793. * Halve their sleep time's effect, to allow
  794. * for a gentler effect of sleepers:
  795. */
  796. if (sched_feat(GENTLE_FAIR_SLEEPERS))
  797. thresh >>= 1;
  798. vruntime -= thresh;
  799. }
  800. /* ensure we never gain time by being placed backwards. */
  801. vruntime = max_vruntime(se->vruntime, vruntime);
  802. se->vruntime = vruntime;
  803. }
  804. static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
  805. static void
  806. enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  807. {
  808. /*
  809. * Update the normalized vruntime before updating min_vruntime
  810. * through callig update_curr().
  811. */
  812. if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
  813. se->vruntime += cfs_rq->min_vruntime;
  814. /*
  815. * Update run-time statistics of the 'current'.
  816. */
  817. update_curr(cfs_rq);
  818. update_cfs_load(cfs_rq, 0);
  819. account_entity_enqueue(cfs_rq, se);
  820. update_cfs_shares(cfs_rq);
  821. if (flags & ENQUEUE_WAKEUP) {
  822. place_entity(cfs_rq, se, 0);
  823. enqueue_sleeper(cfs_rq, se);
  824. }
  825. update_stats_enqueue(cfs_rq, se);
  826. check_spread(cfs_rq, se);
  827. if (se != cfs_rq->curr)
  828. __enqueue_entity(cfs_rq, se);
  829. se->on_rq = 1;
  830. if (cfs_rq->nr_running == 1) {
  831. list_add_leaf_cfs_rq(cfs_rq);
  832. check_enqueue_throttle(cfs_rq);
  833. }
  834. }
  835. static void __clear_buddies_last(struct sched_entity *se)
  836. {
  837. for_each_sched_entity(se) {
  838. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  839. if (cfs_rq->last == se)
  840. cfs_rq->last = NULL;
  841. else
  842. break;
  843. }
  844. }
  845. static void __clear_buddies_next(struct sched_entity *se)
  846. {
  847. for_each_sched_entity(se) {
  848. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  849. if (cfs_rq->next == se)
  850. cfs_rq->next = NULL;
  851. else
  852. break;
  853. }
  854. }
  855. static void __clear_buddies_skip(struct sched_entity *se)
  856. {
  857. for_each_sched_entity(se) {
  858. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  859. if (cfs_rq->skip == se)
  860. cfs_rq->skip = NULL;
  861. else
  862. break;
  863. }
  864. }
  865. static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
  866. {
  867. if (cfs_rq->last == se)
  868. __clear_buddies_last(se);
  869. if (cfs_rq->next == se)
  870. __clear_buddies_next(se);
  871. if (cfs_rq->skip == se)
  872. __clear_buddies_skip(se);
  873. }
  874. static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
  875. static void
  876. dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  877. {
  878. /*
  879. * Update run-time statistics of the 'current'.
  880. */
  881. update_curr(cfs_rq);
  882. update_stats_dequeue(cfs_rq, se);
  883. if (flags & DEQUEUE_SLEEP) {
  884. #ifdef CONFIG_SCHEDSTATS
  885. if (entity_is_task(se)) {
  886. struct task_struct *tsk = task_of(se);
  887. if (tsk->state & TASK_INTERRUPTIBLE)
  888. se->statistics.sleep_start = rq_of(cfs_rq)->clock;
  889. if (tsk->state & TASK_UNINTERRUPTIBLE)
  890. se->statistics.block_start = rq_of(cfs_rq)->clock;
  891. }
  892. #endif
  893. }
  894. clear_buddies(cfs_rq, se);
  895. if (se != cfs_rq->curr)
  896. __dequeue_entity(cfs_rq, se);
  897. se->on_rq = 0;
  898. update_cfs_load(cfs_rq, 0);
  899. account_entity_dequeue(cfs_rq, se);
  900. /*
  901. * Normalize the entity after updating the min_vruntime because the
  902. * update can refer to the ->curr item and we need to reflect this
  903. * movement in our normalized position.
  904. */
  905. if (!(flags & DEQUEUE_SLEEP))
  906. se->vruntime -= cfs_rq->min_vruntime;
  907. /* return excess runtime on last dequeue */
  908. return_cfs_rq_runtime(cfs_rq);
  909. update_min_vruntime(cfs_rq);
  910. update_cfs_shares(cfs_rq);
  911. }
  912. /*
  913. * Preempt the current task with a newly woken task if needed:
  914. */
  915. static void
  916. check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  917. {
  918. unsigned long ideal_runtime, delta_exec;
  919. ideal_runtime = sched_slice(cfs_rq, curr);
  920. delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  921. if (delta_exec > ideal_runtime) {
  922. resched_task(rq_of(cfs_rq)->curr);
  923. /*
  924. * The current task ran long enough, ensure it doesn't get
  925. * re-elected due to buddy favours.
  926. */
  927. clear_buddies(cfs_rq, curr);
  928. return;
  929. }
  930. /*
  931. * Ensure that a task that missed wakeup preemption by a
  932. * narrow margin doesn't have to wait for a full slice.
  933. * This also mitigates buddy induced latencies under load.
  934. */
  935. if (delta_exec < sysctl_sched_min_granularity)
  936. return;
  937. if (cfs_rq->nr_running > 1) {
  938. struct sched_entity *se = __pick_first_entity(cfs_rq);
  939. s64 delta = curr->vruntime - se->vruntime;
  940. if (delta < 0)
  941. return;
  942. if (delta > ideal_runtime)
  943. resched_task(rq_of(cfs_rq)->curr);
  944. }
  945. }
  946. static void
  947. set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  948. {
  949. /* 'current' is not kept within the tree. */
  950. if (se->on_rq) {
  951. /*
  952. * Any task has to be enqueued before it get to execute on
  953. * a CPU. So account for the time it spent waiting on the
  954. * runqueue.
  955. */
  956. update_stats_wait_end(cfs_rq, se);
  957. __dequeue_entity(cfs_rq, se);
  958. }
  959. update_stats_curr_start(cfs_rq, se);
  960. cfs_rq->curr = se;
  961. #ifdef CONFIG_SCHEDSTATS
  962. /*
  963. * Track our maximum slice length, if the CPU's load is at
  964. * least twice that of our own weight (i.e. dont track it
  965. * when there are only lesser-weight tasks around):
  966. */
  967. if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
  968. se->statistics.slice_max = max(se->statistics.slice_max,
  969. se->sum_exec_runtime - se->prev_sum_exec_runtime);
  970. }
  971. #endif
  972. se->prev_sum_exec_runtime = se->sum_exec_runtime;
  973. }
  974. static int
  975. wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
  976. /*
  977. * Pick the next process, keeping these things in mind, in this order:
  978. * 1) keep things fair between processes/task groups
  979. * 2) pick the "next" process, since someone really wants that to run
  980. * 3) pick the "last" process, for cache locality
  981. * 4) do not run the "skip" process, if something else is available
  982. */
  983. static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
  984. {
  985. struct sched_entity *se = __pick_first_entity(cfs_rq);
  986. struct sched_entity *left = se;
  987. /*
  988. * Avoid running the skip buddy, if running something else can
  989. * be done without getting too unfair.
  990. */
  991. if (cfs_rq->skip == se) {
  992. struct sched_entity *second = __pick_next_entity(se);
  993. if (second && wakeup_preempt_entity(second, left) < 1)
  994. se = second;
  995. }
  996. /*
  997. * Prefer last buddy, try to return the CPU to a preempted task.
  998. */
  999. if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
  1000. se = cfs_rq->last;
  1001. /*
  1002. * Someone really wants this to run. If it's not unfair, run it.
  1003. */
  1004. if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
  1005. se = cfs_rq->next;
  1006. clear_buddies(cfs_rq, se);
  1007. return se;
  1008. }
  1009. static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
  1010. static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
  1011. {
  1012. /*
  1013. * If still on the runqueue then deactivate_task()
  1014. * was not called and update_curr() has to be done:
  1015. */
  1016. if (prev->on_rq)
  1017. update_curr(cfs_rq);
  1018. /* throttle cfs_rqs exceeding runtime */
  1019. check_cfs_rq_runtime(cfs_rq);
  1020. check_spread(cfs_rq, prev);
  1021. if (prev->on_rq) {
  1022. update_stats_wait_start(cfs_rq, prev);
  1023. /* Put 'current' back into the tree. */
  1024. __enqueue_entity(cfs_rq, prev);
  1025. }
  1026. cfs_rq->curr = NULL;
  1027. }
  1028. static void
  1029. entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
  1030. {
  1031. /*
  1032. * Update run-time statistics of the 'current'.
  1033. */
  1034. update_curr(cfs_rq);
  1035. /*
  1036. * Update share accounting for long-running entities.
  1037. */
  1038. update_entity_shares_tick(cfs_rq);
  1039. #ifdef CONFIG_SCHED_HRTICK
  1040. /*
  1041. * queued ticks are scheduled to match the slice, so don't bother
  1042. * validating it and just reschedule.
  1043. */
  1044. if (queued) {
  1045. resched_task(rq_of(cfs_rq)->curr);
  1046. return;
  1047. }
  1048. /*
  1049. * don't let the period tick interfere with the hrtick preemption
  1050. */
  1051. if (!sched_feat(DOUBLE_TICK) &&
  1052. hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
  1053. return;
  1054. #endif
  1055. if (cfs_rq->nr_running > 1)
  1056. check_preempt_tick(cfs_rq, curr);
  1057. }
  1058. /**************************************************
  1059. * CFS bandwidth control machinery
  1060. */
  1061. #ifdef CONFIG_CFS_BANDWIDTH
  1062. /*
  1063. * default period for cfs group bandwidth.
  1064. * default: 0.1s, units: nanoseconds
  1065. */
  1066. static inline u64 default_cfs_period(void)
  1067. {
  1068. return 100000000ULL;
  1069. }
  1070. static inline u64 sched_cfs_bandwidth_slice(void)
  1071. {
  1072. return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
  1073. }
  1074. /*
  1075. * Replenish runtime according to assigned quota and update expiration time.
  1076. * We use sched_clock_cpu directly instead of rq->clock to avoid adding
  1077. * additional synchronization around rq->lock.
  1078. *
  1079. * requires cfs_b->lock
  1080. */
  1081. static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
  1082. {
  1083. u64 now;
  1084. if (cfs_b->quota == RUNTIME_INF)
  1085. return;
  1086. now = sched_clock_cpu(smp_processor_id());
  1087. cfs_b->runtime = cfs_b->quota;
  1088. cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
  1089. }
  1090. /* returns 0 on failure to allocate runtime */
  1091. static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  1092. {
  1093. struct task_group *tg = cfs_rq->tg;
  1094. struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
  1095. u64 amount = 0, min_amount, expires;
  1096. /* note: this is a positive sum as runtime_remaining <= 0 */
  1097. min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
  1098. raw_spin_lock(&cfs_b->lock);
  1099. if (cfs_b->quota == RUNTIME_INF)
  1100. amount = min_amount;
  1101. else {
  1102. /*
  1103. * If the bandwidth pool has become inactive, then at least one
  1104. * period must have elapsed since the last consumption.
  1105. * Refresh the global state and ensure bandwidth timer becomes
  1106. * active.
  1107. */
  1108. if (!cfs_b->timer_active) {
  1109. __refill_cfs_bandwidth_runtime(cfs_b);
  1110. __start_cfs_bandwidth(cfs_b);
  1111. }
  1112. if (cfs_b->runtime > 0) {
  1113. amount = min(cfs_b->runtime, min_amount);
  1114. cfs_b->runtime -= amount;
  1115. cfs_b->idle = 0;
  1116. }
  1117. }
  1118. expires = cfs_b->runtime_expires;
  1119. raw_spin_unlock(&cfs_b->lock);
  1120. cfs_rq->runtime_remaining += amount;
  1121. /*
  1122. * we may have advanced our local expiration to account for allowed
  1123. * spread between our sched_clock and the one on which runtime was
  1124. * issued.
  1125. */
  1126. if ((s64)(expires - cfs_rq->runtime_expires) > 0)
  1127. cfs_rq->runtime_expires = expires;
  1128. return cfs_rq->runtime_remaining > 0;
  1129. }
  1130. /*
  1131. * Note: This depends on the synchronization provided by sched_clock and the
  1132. * fact that rq->clock snapshots this value.
  1133. */
  1134. static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  1135. {
  1136. struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
  1137. struct rq *rq = rq_of(cfs_rq);
  1138. /* if the deadline is ahead of our clock, nothing to do */
  1139. if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0))
  1140. return;
  1141. if (cfs_rq->runtime_remaining < 0)
  1142. return;
  1143. /*
  1144. * If the local deadline has passed we have to consider the
  1145. * possibility that our sched_clock is 'fast' and the global deadline
  1146. * has not truly expired.
  1147. *
  1148. * Fortunately we can check determine whether this the case by checking
  1149. * whether the global deadline has advanced.
  1150. */
  1151. if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
  1152. /* extend local deadline, drift is bounded above by 2 ticks */
  1153. cfs_rq->runtime_expires += TICK_NSEC;
  1154. } else {
  1155. /* global deadline is ahead, expiration has passed */
  1156. cfs_rq->runtime_remaining = 0;
  1157. }
  1158. }
  1159. static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
  1160. unsigned long delta_exec)
  1161. {
  1162. /* dock delta_exec before expiring quota (as it could span periods) */
  1163. cfs_rq->runtime_remaining -= delta_exec;
  1164. expire_cfs_rq_runtime(cfs_rq);
  1165. if (likely(cfs_rq->runtime_remaining > 0))
  1166. return;
  1167. /*
  1168. * if we're unable to extend our runtime we resched so that the active
  1169. * hierarchy can be throttled
  1170. */
  1171. if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
  1172. resched_task(rq_of(cfs_rq)->curr);
  1173. }
  1174. static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
  1175. unsigned long delta_exec)
  1176. {
  1177. if (!cfs_rq->runtime_enabled)
  1178. return;
  1179. __account_cfs_rq_runtime(cfs_rq, delta_exec);
  1180. }
  1181. static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
  1182. {
  1183. return cfs_rq->throttled;
  1184. }
  1185. /* check whether cfs_rq, or any parent, is throttled */
  1186. static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
  1187. {
  1188. return cfs_rq->throttle_count;
  1189. }
  1190. /*
  1191. * Ensure that neither of the group entities corresponding to src_cpu or
  1192. * dest_cpu are members of a throttled hierarchy when performing group
  1193. * load-balance operations.
  1194. */
  1195. static inline int throttled_lb_pair(struct task_group *tg,
  1196. int src_cpu, int dest_cpu)
  1197. {
  1198. struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
  1199. src_cfs_rq = tg->cfs_rq[src_cpu];
  1200. dest_cfs_rq = tg->cfs_rq[dest_cpu];
  1201. return throttled_hierarchy(src_cfs_rq) ||
  1202. throttled_hierarchy(dest_cfs_rq);
  1203. }
  1204. /* updated child weight may affect parent so we have to do this bottom up */
  1205. static int tg_unthrottle_up(struct task_group *tg, void *data)
  1206. {
  1207. struct rq *rq = data;
  1208. struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
  1209. cfs_rq->throttle_count--;
  1210. #ifdef CONFIG_SMP
  1211. if (!cfs_rq->throttle_count) {
  1212. u64 delta = rq->clock_task - cfs_rq->load_stamp;
  1213. /* leaving throttled state, advance shares averaging windows */
  1214. cfs_rq->load_stamp += delta;
  1215. cfs_rq->load_last += delta;
  1216. /* update entity weight now that we are on_rq again */
  1217. update_cfs_shares(cfs_rq);
  1218. }
  1219. #endif
  1220. return 0;
  1221. }
  1222. static int tg_throttle_down(struct task_group *tg, void *data)
  1223. {
  1224. struct rq *rq = data;
  1225. struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
  1226. /* group is entering throttled state, record last load */
  1227. if (!cfs_rq->throttle_count)
  1228. update_cfs_load(cfs_rq, 0);
  1229. cfs_rq->throttle_count++;
  1230. return 0;
  1231. }
  1232. static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
  1233. {
  1234. struct rq *rq = rq_of(cfs_rq);
  1235. struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
  1236. struct sched_entity *se;
  1237. long task_delta, dequeue = 1;
  1238. se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
  1239. /* account load preceding throttle */
  1240. rcu_read_lock();
  1241. walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
  1242. rcu_read_unlock();
  1243. task_delta = cfs_rq->h_nr_running;
  1244. for_each_sched_entity(se) {
  1245. struct cfs_rq *qcfs_rq = cfs_rq_of(se);
  1246. /* throttled entity or throttle-on-deactivate */
  1247. if (!se->on_rq)
  1248. break;
  1249. if (dequeue)
  1250. dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
  1251. qcfs_rq->h_nr_running -= task_delta;
  1252. if (qcfs_rq->load.weight)
  1253. dequeue = 0;
  1254. }
  1255. if (!se)
  1256. rq->nr_running -= task_delta;
  1257. cfs_rq->throttled = 1;
  1258. cfs_rq->throttled_timestamp = rq->clock;
  1259. raw_spin_lock(&cfs_b->lock);
  1260. list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
  1261. raw_spin_unlock(&cfs_b->lock);
  1262. }
  1263. static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
  1264. {
  1265. struct rq *rq = rq_of(cfs_rq);
  1266. struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
  1267. struct sched_entity *se;
  1268. int enqueue = 1;
  1269. long task_delta;
  1270. se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
  1271. cfs_rq->throttled = 0;
  1272. raw_spin_lock(&cfs_b->lock);
  1273. cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp;
  1274. list_del_rcu(&cfs_rq->throttled_list);
  1275. raw_spin_unlock(&cfs_b->lock);
  1276. cfs_rq->throttled_timestamp = 0;
  1277. update_rq_clock(rq);
  1278. /* update hierarchical throttle state */
  1279. walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
  1280. if (!cfs_rq->load.weight)
  1281. return;
  1282. task_delta = cfs_rq->h_nr_running;
  1283. for_each_sched_entity(se) {
  1284. if (se->on_rq)
  1285. enqueue = 0;
  1286. cfs_rq = cfs_rq_of(se);
  1287. if (enqueue)
  1288. enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
  1289. cfs_rq->h_nr_running += task_delta;
  1290. if (cfs_rq_throttled(cfs_rq))
  1291. break;
  1292. }
  1293. if (!se)
  1294. rq->nr_running += task_delta;
  1295. /* determine whether we need to wake up potentially idle cpu */
  1296. if (rq->curr == rq->idle && rq->cfs.nr_running)
  1297. resched_task(rq->curr);
  1298. }
  1299. static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
  1300. u64 remaining, u64 expires)
  1301. {
  1302. struct cfs_rq *cfs_rq;
  1303. u64 runtime = remaining;
  1304. rcu_read_lock();
  1305. list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
  1306. throttled_list) {
  1307. struct rq *rq = rq_of(cfs_rq);
  1308. raw_spin_lock(&rq->lock);
  1309. if (!cfs_rq_throttled(cfs_rq))
  1310. goto next;
  1311. runtime = -cfs_rq->runtime_remaining + 1;
  1312. if (runtime > remaining)
  1313. runtime = remaining;
  1314. remaining -= runtime;
  1315. cfs_rq->runtime_remaining += runtime;
  1316. cfs_rq->runtime_expires = expires;
  1317. /* we check whether we're throttled above */
  1318. if (cfs_rq->runtime_remaining > 0)
  1319. unthrottle_cfs_rq(cfs_rq);
  1320. next:
  1321. raw_spin_unlock(&rq->lock);
  1322. if (!remaining)
  1323. break;
  1324. }
  1325. rcu_read_unlock();
  1326. return remaining;
  1327. }
  1328. /*
  1329. * Responsible for refilling a task_group's bandwidth and unthrottling its
  1330. * cfs_rqs as appropriate. If there has been no activity within the last
  1331. * period the timer is deactivated until scheduling resumes; cfs_b->idle is
  1332. * used to track this state.
  1333. */
  1334. static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
  1335. {
  1336. u64 runtime, runtime_expires;
  1337. int idle = 1, throttled;
  1338. raw_spin_lock(&cfs_b->lock);
  1339. /* no need to continue the timer with no bandwidth constraint */
  1340. if (cfs_b->quota == RUNTIME_INF)
  1341. goto out_unlock;
  1342. throttled = !list_empty(&cfs_b->throttled_cfs_rq);
  1343. /* idle depends on !throttled (for the case of a large deficit) */
  1344. idle = cfs_b->idle && !throttled;
  1345. cfs_b->nr_periods += overrun;
  1346. /* if we're going inactive then everything else can be deferred */
  1347. if (idle)
  1348. goto out_unlock;
  1349. __refill_cfs_bandwidth_runtime(cfs_b);
  1350. if (!throttled) {
  1351. /* mark as potentially idle for the upcoming period */
  1352. cfs_b->idle = 1;
  1353. goto out_unlock;
  1354. }
  1355. /* account preceding periods in which throttling occurred */
  1356. cfs_b->nr_throttled += overrun;
  1357. /*
  1358. * There are throttled entities so we must first use the new bandwidth
  1359. * to unthrottle them before making it generally available. This
  1360. * ensures that all existing debts will be paid before a new cfs_rq is
  1361. * allowed to run.
  1362. */
  1363. runtime = cfs_b->runtime;
  1364. runtime_expires = cfs_b->runtime_expires;
  1365. cfs_b->runtime = 0;
  1366. /*
  1367. * This check is repeated as we are holding onto the new bandwidth
  1368. * while we unthrottle. This can potentially race with an unthrottled
  1369. * group trying to acquire new bandwidth from the global pool.
  1370. */
  1371. while (throttled && runtime > 0) {
  1372. raw_spin_unlock(&cfs_b->lock);
  1373. /* we can't nest cfs_b->lock while distributing bandwidth */
  1374. runtime = distribute_cfs_runtime(cfs_b, runtime,
  1375. runtime_expires);
  1376. raw_spin_lock(&cfs_b->lock);
  1377. throttled = !list_empty(&cfs_b->throttled_cfs_rq);
  1378. }
  1379. /* return (any) remaining runtime */
  1380. cfs_b->runtime = runtime;
  1381. /*
  1382. * While we are ensured activity in the period following an
  1383. * unthrottle, this also covers the case in which the new bandwidth is
  1384. * insufficient to cover the existing bandwidth deficit. (Forcing the
  1385. * timer to remain active while there are any throttled entities.)
  1386. */
  1387. cfs_b->idle = 0;
  1388. out_unlock:
  1389. if (idle)
  1390. cfs_b->timer_active = 0;
  1391. raw_spin_unlock(&cfs_b->lock);
  1392. return idle;
  1393. }
  1394. /* a cfs_rq won't donate quota below this amount */
  1395. static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
  1396. /* minimum remaining period time to redistribute slack quota */
  1397. static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
  1398. /* how long we wait to gather additional slack before distributing */
  1399. static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
  1400. /* are we near the end of the current quota period? */
  1401. static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
  1402. {
  1403. struct hrtimer *refresh_timer = &cfs_b->period_timer;
  1404. u64 remaining;
  1405. /* if the call-back is running a quota refresh is already occurring */
  1406. if (hrtimer_callback_running(refresh_timer))
  1407. return 1;
  1408. /* is a quota refresh about to occur? */
  1409. remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
  1410. if (remaining < min_expire)
  1411. return 1;
  1412. return 0;
  1413. }
  1414. static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
  1415. {
  1416. u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
  1417. /* if there's a quota refresh soon don't bother with slack */
  1418. if (runtime_refresh_within(cfs_b, min_left))
  1419. return;
  1420. start_bandwidth_timer(&cfs_b->slack_timer,
  1421. ns_to_ktime(cfs_bandwidth_slack_period));
  1422. }
  1423. /* we know any runtime found here is valid as update_curr() precedes return */
  1424. static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  1425. {
  1426. struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
  1427. s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
  1428. if (slack_runtime <= 0)
  1429. return;
  1430. raw_spin_lock(&cfs_b->lock);
  1431. if (cfs_b->quota != RUNTIME_INF &&
  1432. cfs_rq->runtime_expires == cfs_b->runtime_expires) {
  1433. cfs_b->runtime += slack_runtime;
  1434. /* we are under rq->lock, defer unthrottling using a timer */
  1435. if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
  1436. !list_empty(&cfs_b->throttled_cfs_rq))
  1437. start_cfs_slack_bandwidth(cfs_b);
  1438. }
  1439. raw_spin_unlock(&cfs_b->lock);
  1440. /* even if it's not valid for return we don't want to try again */
  1441. cfs_rq->runtime_remaining -= slack_runtime;
  1442. }
  1443. static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  1444. {
  1445. if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
  1446. return;
  1447. __return_cfs_rq_runtime(cfs_rq);
  1448. }
  1449. /*
  1450. * This is done with a timer (instead of inline with bandwidth return) since
  1451. * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
  1452. */
  1453. static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
  1454. {
  1455. u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
  1456. u64 expires;
  1457. /* confirm we're still not at a refresh boundary */
  1458. if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
  1459. return;
  1460. raw_spin_lock(&cfs_b->lock);
  1461. if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
  1462. runtime = cfs_b->runtime;
  1463. cfs_b->runtime = 0;
  1464. }
  1465. expires = cfs_b->runtime_expires;
  1466. raw_spin_unlock(&cfs_b->lock);
  1467. if (!runtime)
  1468. return;
  1469. runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
  1470. raw_spin_lock(&cfs_b->lock);
  1471. if (expires == cfs_b->runtime_expires)
  1472. cfs_b->runtime = runtime;
  1473. raw_spin_unlock(&cfs_b->lock);
  1474. }
  1475. /*
  1476. * When a group wakes up we want to make sure that its quota is not already
  1477. * expired/exceeded, otherwise it may be allowed to steal additional ticks of
  1478. * runtime as update_curr() throttling can not not trigger until it's on-rq.
  1479. */
  1480. static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
  1481. {
  1482. /* an active group must be handled by the update_curr()->put() path */
  1483. if (!cfs_rq->runtime_enabled || cfs_rq->curr)
  1484. return;
  1485. /* ensure the group is not already throttled */
  1486. if (cfs_rq_throttled(cfs_rq))
  1487. return;
  1488. /* update runtime allocation */
  1489. account_cfs_rq_runtime(cfs_rq, 0);
  1490. if (cfs_rq->runtime_remaining <= 0)
  1491. throttle_cfs_rq(cfs_rq);
  1492. }
  1493. /* conditionally throttle active cfs_rq's from put_prev_entity() */
  1494. static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  1495. {
  1496. if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
  1497. return;
  1498. /*
  1499. * it's possible for a throttled entity to be forced into a running
  1500. * state (e.g. set_curr_task), in this case we're finished.
  1501. */
  1502. if (cfs_rq_throttled(cfs_rq))
  1503. return;
  1504. throttle_cfs_rq(cfs_rq);
  1505. }
  1506. #else
  1507. static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
  1508. unsigned long delta_exec) {}
  1509. static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
  1510. static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
  1511. static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
  1512. static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
  1513. {
  1514. return 0;
  1515. }
  1516. static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
  1517. {
  1518. return 0;
  1519. }
  1520. static inline int throttled_lb_pair(struct task_group *tg,
  1521. int src_cpu, int dest_cpu)
  1522. {
  1523. return 0;
  1524. }
  1525. #endif
  1526. /**************************************************
  1527. * CFS operations on tasks:
  1528. */
  1529. #ifdef CONFIG_SCHED_HRTICK
  1530. static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
  1531. {
  1532. struct sched_entity *se = &p->se;
  1533. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  1534. WARN_ON(task_rq(p) != rq);
  1535. if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
  1536. u64 slice = sched_slice(cfs_rq, se);
  1537. u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
  1538. s64 delta = slice - ran;
  1539. if (delta < 0) {
  1540. if (rq->curr == p)
  1541. resched_task(p);
  1542. return;
  1543. }
  1544. /*
  1545. * Don't schedule slices shorter than 10000ns, that just
  1546. * doesn't make sense. Rely on vruntime for fairness.
  1547. */
  1548. if (rq->curr != p)
  1549. delta = max_t(s64, 10000LL, delta);
  1550. hrtick_start(rq, delta);
  1551. }
  1552. }
  1553. /*
  1554. * called from enqueue/dequeue and updates the hrtick when the
  1555. * current task is from our class and nr_running is low enough
  1556. * to matter.
  1557. */
  1558. static void hrtick_update(struct rq *rq)
  1559. {
  1560. struct task_struct *curr = rq->curr;
  1561. if (curr->sched_class != &fair_sched_class)
  1562. return;
  1563. if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
  1564. hrtick_start_fair(rq, curr);
  1565. }
  1566. #else /* !CONFIG_SCHED_HRTICK */
  1567. static inline void
  1568. hrtick_start_fair(struct rq *rq, struct task_struct *p)
  1569. {
  1570. }
  1571. static inline void hrtick_update(struct rq *rq)
  1572. {
  1573. }
  1574. #endif
  1575. /*
  1576. * The enqueue_task method is called before nr_running is
  1577. * increased. Here we update the fair scheduling stats and
  1578. * then put the task into the rbtree:
  1579. */
  1580. static void
  1581. enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
  1582. {
  1583. struct cfs_rq *cfs_rq;
  1584. struct sched_entity *se = &p->se;
  1585. for_each_sched_entity(se) {
  1586. if (se->on_rq)
  1587. break;
  1588. cfs_rq = cfs_rq_of(se);
  1589. enqueue_entity(cfs_rq, se, flags);
  1590. /*
  1591. * end evaluation on encountering a throttled cfs_rq
  1592. *
  1593. * note: in the case of encountering a throttled cfs_rq we will
  1594. * post the final h_nr_running increment below.
  1595. */
  1596. if (cfs_rq_throttled(cfs_rq))
  1597. break;
  1598. cfs_rq->h_nr_running++;
  1599. flags = ENQUEUE_WAKEUP;
  1600. }
  1601. for_each_sched_entity(se) {
  1602. cfs_rq = cfs_rq_of(se);
  1603. cfs_rq->h_nr_running++;
  1604. if (cfs_rq_throttled(cfs_rq))
  1605. break;
  1606. update_cfs_load(cfs_rq, 0);
  1607. update_cfs_shares(cfs_rq);
  1608. }
  1609. if (!se)
  1610. inc_nr_running(rq);
  1611. hrtick_update(rq);
  1612. }
  1613. static void set_next_buddy(struct sched_entity *se);
  1614. /*
  1615. * The dequeue_task method is called before nr_running is
  1616. * decreased. We remove the task from the rbtree and
  1617. * update the fair scheduling stats:
  1618. */
  1619. static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
  1620. {
  1621. struct cfs_rq *cfs_rq;
  1622. struct sched_entity *se = &p->se;
  1623. int task_sleep = flags & DEQUEUE_SLEEP;
  1624. for_each_sched_entity(se) {
  1625. cfs_rq = cfs_rq_of(se);
  1626. dequeue_entity(cfs_rq, se, flags);
  1627. /*
  1628. * end evaluation on encountering a throttled cfs_rq
  1629. *
  1630. * note: in the case of encountering a throttled cfs_rq we will
  1631. * post the final h_nr_running decrement below.
  1632. */
  1633. if (cfs_rq_throttled(cfs_rq))
  1634. break;
  1635. cfs_rq->h_nr_running--;
  1636. /* Don't dequeue parent if it has other entities besides us */
  1637. if (cfs_rq->load.weight) {
  1638. /*
  1639. * Bias pick_next to pick a task from this cfs_rq, as
  1640. * p is sleeping when it is within its sched_slice.
  1641. */
  1642. if (task_sleep && parent_entity(se))
  1643. set_next_buddy(parent_entity(se));
  1644. /* avoid re-evaluating load for this entity */
  1645. se = parent_entity(se);
  1646. break;
  1647. }
  1648. flags |= DEQUEUE_SLEEP;
  1649. }
  1650. for_each_sched_entity(se) {
  1651. cfs_rq = cfs_rq_of(se);
  1652. cfs_rq->h_nr_running--;
  1653. if (cfs_rq_throttled(cfs_rq))
  1654. break;
  1655. update_cfs_load(cfs_rq, 0);
  1656. update_cfs_shares(cfs_rq);
  1657. }
  1658. if (!se)
  1659. dec_nr_running(rq);
  1660. hrtick_update(rq);
  1661. }
  1662. #ifdef CONFIG_SMP
  1663. static void task_waking_fair(struct task_struct *p)
  1664. {
  1665. struct sched_entity *se = &p->se;
  1666. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  1667. u64 min_vruntime;
  1668. #ifndef CONFIG_64BIT
  1669. u64 min_vruntime_copy;
  1670. do {
  1671. min_vruntime_copy = cfs_rq->min_vruntime_copy;
  1672. smp_rmb();
  1673. min_vruntime = cfs_rq->min_vruntime;
  1674. } while (min_vruntime != min_vruntime_copy);
  1675. #else
  1676. min_vruntime = cfs_rq->min_vruntime;
  1677. #endif
  1678. se->vruntime -= min_vruntime;
  1679. }
  1680. #ifdef CONFIG_FAIR_GROUP_SCHED
  1681. /*
  1682. * effective_load() calculates the load change as seen from the root_task_group
  1683. *
  1684. * Adding load to a group doesn't make a group heavier, but can cause movement
  1685. * of group shares between cpus. Assuming the shares were perfectly aligned one
  1686. * can calculate the shift in shares.
  1687. */
  1688. static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
  1689. {
  1690. struct sched_entity *se = tg->se[cpu];
  1691. if (!tg->parent)
  1692. return wl;
  1693. for_each_sched_entity(se) {
  1694. long lw, w;
  1695. tg = se->my_q->tg;
  1696. w = se->my_q->load.weight;
  1697. /* use this cpu's instantaneous contribution */
  1698. lw = atomic_read(&tg->load_weight);
  1699. lw -= se->my_q->load_contribution;
  1700. lw += w + wg;
  1701. wl += w;
  1702. if (lw > 0 && wl < lw)
  1703. wl = (wl * tg->shares) / lw;
  1704. else
  1705. wl = tg->shares;
  1706. /* zero point is MIN_SHARES */
  1707. if (wl < MIN_SHARES)
  1708. wl = MIN_SHARES;
  1709. wl -= se->load.weight;
  1710. wg = 0;
  1711. }
  1712. return wl;
  1713. }
  1714. #else
  1715. static inline unsigned long effective_load(struct task_group *tg, int cpu,
  1716. unsigned long wl, unsigned long wg)
  1717. {
  1718. return wl;
  1719. }
  1720. #endif
  1721. static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
  1722. {
  1723. s64 this_load, load;
  1724. int idx, this_cpu, prev_cpu;
  1725. unsigned long tl_per_task;
  1726. struct task_group *tg;
  1727. unsigned long weight;
  1728. int balanced;
  1729. idx = sd->wake_idx;
  1730. this_cpu = smp_processor_id();
  1731. prev_cpu = task_cpu(p);
  1732. load = source_load(prev_cpu, idx);
  1733. this_load = target_load(this_cpu, idx);
  1734. /*
  1735. * If sync wakeup then subtract the (maximum possible)
  1736. * effect of the currently running task from the load
  1737. * of the current CPU:
  1738. */
  1739. if (sync) {
  1740. tg = task_group(current);
  1741. weight = current->se.load.weight;
  1742. this_load += effective_load(tg, this_cpu, -weight, -weight);
  1743. load += effective_load(tg, prev_cpu, 0, -weight);
  1744. }
  1745. tg = task_group(p);
  1746. weight = p->se.load.weight;
  1747. /*
  1748. * In low-load situations, where prev_cpu is idle and this_cpu is idle
  1749. * due to the sync cause above having dropped this_load to 0, we'll
  1750. * always have an imbalance, but there's really nothing you can do
  1751. * about that, so that's good too.
  1752. *
  1753. * Otherwise check if either cpus are near enough in load to allow this
  1754. * task to be woken on this_cpu.
  1755. */
  1756. if (this_load > 0) {
  1757. s64 this_eff_load, prev_eff_load;
  1758. this_eff_load = 100;
  1759. this_eff_load *= power_of(prev_cpu);
  1760. this_eff_load *= this_load +
  1761. effective_load(tg, this_cpu, weight, weight);
  1762. prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
  1763. prev_eff_load *= power_of(this_cpu);
  1764. prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
  1765. balanced = this_eff_load <= prev_eff_load;
  1766. } else
  1767. balanced = true;
  1768. /*
  1769. * If the currently running task will sleep within
  1770. * a reasonable amount of time then attract this newly
  1771. * woken task:
  1772. */
  1773. if (sync && balanced)
  1774. return 1;
  1775. schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
  1776. tl_per_task = cpu_avg_load_per_task(this_cpu);
  1777. if (balanced ||
  1778. (this_load <= load &&
  1779. this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
  1780. /*
  1781. * This domain has SD_WAKE_AFFINE and
  1782. * p is cache cold in this domain, and
  1783. * there is no bad imbalance.
  1784. */
  1785. schedstat_inc(sd, ttwu_move_affine);
  1786. schedstat_inc(p, se.statistics.nr_wakeups_affine);
  1787. return 1;
  1788. }
  1789. return 0;
  1790. }
  1791. /*
  1792. * find_idlest_group finds and returns the least busy CPU group within the
  1793. * domain.
  1794. */
  1795. static struct sched_group *
  1796. find_idlest_group(struct sched_domain *sd, struct task_struct *p,
  1797. int this_cpu, int load_idx)
  1798. {
  1799. struct sched_group *idlest = NULL, *group = sd->groups;
  1800. unsigned long min_load = ULONG_MAX, this_load = 0;
  1801. int imbalance = 100 + (sd->imbalance_pct-100)/2;
  1802. do {
  1803. unsigned long load, avg_load;
  1804. int local_group;
  1805. int i;
  1806. /* Skip over this group if it has no CPUs allowed */
  1807. if (!cpumask_intersects(sched_group_cpus(group),
  1808. &p->cpus_allowed))
  1809. continue;
  1810. local_group = cpumask_test_cpu(this_cpu,
  1811. sched_group_cpus(group));
  1812. /* Tally up the load of all CPUs in the group */
  1813. avg_load = 0;
  1814. for_each_cpu(i, sched_group_cpus(group)) {
  1815. /* Bias balancing toward cpus of our domain */
  1816. if (local_group)
  1817. load = source_load(i, load_idx);
  1818. else
  1819. load = target_load(i, load_idx);
  1820. avg_load += load;
  1821. }
  1822. /* Adjust by relative CPU power of the group */
  1823. avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
  1824. if (local_group) {
  1825. this_load = avg_load;
  1826. } else if (avg_load < min_load) {
  1827. min_load = avg_load;
  1828. idlest = group;
  1829. }
  1830. } while (group = group->next, group != sd->groups);
  1831. if (!idlest || 100*this_load < imbalance*min_load)
  1832. return NULL;
  1833. return idlest;
  1834. }
  1835. /*
  1836. * find_idlest_cpu - find the idlest cpu among the cpus in group.
  1837. */
  1838. static int
  1839. find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  1840. {
  1841. unsigned long load, min_load = ULONG_MAX;
  1842. int idlest = -1;
  1843. int i;
  1844. /* Traverse only the allowed CPUs */
  1845. for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
  1846. load = weighted_cpuload(i);
  1847. if (load < min_load || (load == min_load && i == this_cpu)) {
  1848. min_load = load;
  1849. idlest = i;
  1850. }
  1851. }
  1852. return idlest;
  1853. }
  1854. /*
  1855. * Try and locate an idle CPU in the sched_domain.
  1856. */
  1857. static int select_idle_sibling(struct task_struct *p, int target)
  1858. {
  1859. int cpu = smp_processor_id();
  1860. int prev_cpu = task_cpu(p);
  1861. struct sched_domain *sd;
  1862. int i;
  1863. /*
  1864. * If the task is going to be woken-up on this cpu and if it is
  1865. * already idle, then it is the right target.
  1866. */
  1867. if (target == cpu && idle_cpu(cpu))
  1868. return cpu;
  1869. /*
  1870. * If the task is going to be woken-up on the cpu where it previously
  1871. * ran and if it is currently idle, then it the right target.
  1872. */
  1873. if (target == prev_cpu && idle_cpu(prev_cpu))
  1874. return prev_cpu;
  1875. /*
  1876. * Otherwise, iterate the domains and find an elegible idle cpu.
  1877. */
  1878. rcu_read_lock();
  1879. for_each_domain(target, sd) {
  1880. if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
  1881. break;
  1882. for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
  1883. if (idle_cpu(i)) {
  1884. target = i;
  1885. break;
  1886. }
  1887. }
  1888. /*
  1889. * Lets stop looking for an idle sibling when we reached
  1890. * the domain that spans the current cpu and prev_cpu.
  1891. */
  1892. if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
  1893. cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
  1894. break;
  1895. }
  1896. rcu_read_unlock();
  1897. return target;
  1898. }
  1899. /*
  1900. * sched_balance_self: balance the current task (running on cpu) in domains
  1901. * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
  1902. * SD_BALANCE_EXEC.
  1903. *
  1904. * Balance, ie. select the least loaded group.
  1905. *
  1906. * Returns the target CPU number, or the same CPU if no balancing is needed.
  1907. *
  1908. * preempt must be disabled.
  1909. */
  1910. static int
  1911. select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
  1912. {
  1913. struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
  1914. int cpu = smp_processor_id();
  1915. int prev_cpu = task_cpu(p);
  1916. int new_cpu = cpu;
  1917. int want_affine = 0;
  1918. int want_sd = 1;
  1919. int sync = wake_flags & WF_SYNC;
  1920. if (sd_flag & SD_BALANCE_WAKE) {
  1921. if (cpumask_test_cpu(cpu, &p->cpus_allowed))
  1922. want_affine = 1;
  1923. new_cpu = prev_cpu;
  1924. }
  1925. rcu_read_lock();
  1926. for_each_domain(cpu, tmp) {
  1927. if (!(tmp->flags & SD_LOAD_BALANCE))
  1928. continue;
  1929. /*
  1930. * If power savings logic is enabled for a domain, see if we
  1931. * are not overloaded, if so, don't balance wider.
  1932. */
  1933. if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
  1934. unsigned long power = 0;
  1935. unsigned long nr_running = 0;
  1936. unsigned long capacity;
  1937. int i;
  1938. for_each_cpu(i, sched_domain_span(tmp)) {
  1939. power += power_of(i);
  1940. nr_running += cpu_rq(i)->cfs.nr_running;
  1941. }
  1942. capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
  1943. if (tmp->flags & SD_POWERSAVINGS_BALANCE)
  1944. nr_running /= 2;
  1945. if (nr_running < capacity)
  1946. want_sd = 0;
  1947. }
  1948. /*
  1949. * If both cpu and prev_cpu are part of this domain,
  1950. * cpu is a valid SD_WAKE_AFFINE target.
  1951. */
  1952. if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
  1953. cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
  1954. affine_sd = tmp;
  1955. want_affine = 0;
  1956. }
  1957. if (!want_sd && !want_affine)
  1958. break;
  1959. if (!(tmp->flags & sd_flag))
  1960. continue;
  1961. if (want_sd)
  1962. sd = tmp;
  1963. }
  1964. if (affine_sd) {
  1965. if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
  1966. prev_cpu = cpu;
  1967. new_cpu = select_idle_sibling(p, prev_cpu);
  1968. goto unlock;
  1969. }
  1970. while (sd) {
  1971. int load_idx = sd->forkexec_idx;
  1972. struct sched_group *group;
  1973. int weight;
  1974. if (!(sd->flags & sd_flag)) {
  1975. sd = sd->child;
  1976. continue;
  1977. }
  1978. if (sd_flag & SD_BALANCE_WAKE)
  1979. load_idx = sd->wake_idx;
  1980. group = find_idlest_group(sd, p, cpu, load_idx);
  1981. if (!group) {
  1982. sd = sd->child;
  1983. continue;
  1984. }
  1985. new_cpu = find_idlest_cpu(group, p, cpu);
  1986. if (new_cpu == -1 || new_cpu == cpu) {
  1987. /* Now try balancing at a lower domain level of cpu */
  1988. sd = sd->child;
  1989. continue;
  1990. }
  1991. /* Now try balancing at a lower domain level of new_cpu */
  1992. cpu = new_cpu;
  1993. weight = sd->span_weight;
  1994. sd = NULL;
  1995. for_each_domain(cpu, tmp) {
  1996. if (weight <= tmp->span_weight)
  1997. break;
  1998. if (tmp->flags & sd_flag)
  1999. sd = tmp;
  2000. }
  2001. /* while loop will break here if sd == NULL */
  2002. }
  2003. unlock:
  2004. rcu_read_unlock();
  2005. return new_cpu;
  2006. }
  2007. #endif /* CONFIG_SMP */
  2008. static unsigned long
  2009. wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
  2010. {
  2011. unsigned long gran = sysctl_sched_wakeup_granularity;
  2012. /*
  2013. * Since its curr running now, convert the gran from real-time
  2014. * to virtual-time in his units.
  2015. *
  2016. * By using 'se' instead of 'curr' we penalize light tasks, so
  2017. * they get preempted easier. That is, if 'se' < 'curr' then
  2018. * the resulting gran will be larger, therefore penalizing the
  2019. * lighter, if otoh 'se' > 'curr' then the resulting gran will
  2020. * be smaller, again penalizing the lighter task.
  2021. *
  2022. * This is especially important for buddies when the leftmost
  2023. * task is higher priority than the buddy.
  2024. */
  2025. return calc_delta_fair(gran, se);
  2026. }
  2027. /*
  2028. * Should 'se' preempt 'curr'.
  2029. *
  2030. * |s1
  2031. * |s2
  2032. * |s3
  2033. * g
  2034. * |<--->|c
  2035. *
  2036. * w(c, s1) = -1
  2037. * w(c, s2) = 0
  2038. * w(c, s3) = 1
  2039. *
  2040. */
  2041. static int
  2042. wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
  2043. {
  2044. s64 gran, vdiff = curr->vruntime - se->vruntime;
  2045. if (vdiff <= 0)
  2046. return -1;
  2047. gran = wakeup_gran(curr, se);
  2048. if (vdiff > gran)
  2049. return 1;
  2050. return 0;
  2051. }
  2052. static void set_last_buddy(struct sched_entity *se)
  2053. {
  2054. if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
  2055. return;
  2056. for_each_sched_entity(se)
  2057. cfs_rq_of(se)->last = se;
  2058. }
  2059. static void set_next_buddy(struct sched_entity *se)
  2060. {
  2061. if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
  2062. return;
  2063. for_each_sched_entity(se)
  2064. cfs_rq_of(se)->next = se;
  2065. }
  2066. static void set_skip_buddy(struct sched_entity *se)
  2067. {
  2068. for_each_sched_entity(se)
  2069. cfs_rq_of(se)->skip = se;
  2070. }
  2071. /*
  2072. * Preempt the current task with a newly woken task if needed:
  2073. */
  2074. static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
  2075. {
  2076. struct task_struct *curr = rq->curr;
  2077. struct sched_entity *se = &curr->se, *pse = &p->se;
  2078. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  2079. int scale = cfs_rq->nr_running >= sched_nr_latency;
  2080. int next_buddy_marked = 0;
  2081. if (unlikely(se == pse))
  2082. return;
  2083. /*
  2084. * This is possible from callers such as pull_task(), in which we
  2085. * unconditionally check_prempt_curr() after an enqueue (which may have
  2086. * lead to a throttle). This both saves work and prevents false
  2087. * next-buddy nomination below.
  2088. */
  2089. if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
  2090. return;
  2091. if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
  2092. set_next_buddy(pse);
  2093. next_buddy_marked = 1;
  2094. }
  2095. /*
  2096. * We can come here with TIF_NEED_RESCHED already set from new task
  2097. * wake up path.
  2098. *
  2099. * Note: this also catches the edge-case of curr being in a throttled
  2100. * group (e.g. via set_curr_task), since update_curr() (in the
  2101. * enqueue of curr) will have resulted in resched being set. This
  2102. * prevents us from potentially nominating it as a false LAST_BUDDY
  2103. * below.
  2104. */
  2105. if (test_tsk_need_resched(curr))
  2106. return;
  2107. /* Idle tasks are by definition preempted by non-idle tasks. */
  2108. if (unlikely(curr->policy == SCHED_IDLE) &&
  2109. likely(p->policy != SCHED_IDLE))
  2110. goto preempt;
  2111. /*
  2112. * Batch and idle tasks do not preempt non-idle tasks (their preemption
  2113. * is driven by the tick):
  2114. */
  2115. if (unlikely(p->policy != SCHED_NORMAL))
  2116. return;
  2117. find_matching_se(&se, &pse);
  2118. update_curr(cfs_rq_of(se));
  2119. BUG_ON(!pse);
  2120. if (wakeup_preempt_entity(se, pse) == 1) {
  2121. /*
  2122. * Bias pick_next to pick the sched entity that is
  2123. * triggering this preemption.
  2124. */
  2125. if (!next_buddy_marked)
  2126. set_next_buddy(pse);
  2127. goto preempt;
  2128. }
  2129. return;
  2130. preempt:
  2131. resched_task(curr);
  2132. /*
  2133. * Only set the backward buddy when the current task is still
  2134. * on the rq. This can happen when a wakeup gets interleaved
  2135. * with schedule on the ->pre_schedule() or idle_balance()
  2136. * point, either of which can * drop the rq lock.
  2137. *
  2138. * Also, during early boot the idle thread is in the fair class,
  2139. * for obvious reasons its a bad idea to schedule back to it.
  2140. */
  2141. if (unlikely(!se->on_rq || curr == rq->idle))
  2142. return;
  2143. if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
  2144. set_last_buddy(se);
  2145. }
  2146. static struct task_struct *pick_next_task_fair(struct rq *rq)
  2147. {
  2148. struct task_struct *p;
  2149. struct cfs_rq *cfs_rq = &rq->cfs;
  2150. struct sched_entity *se;
  2151. if (!cfs_rq->nr_running)
  2152. return NULL;
  2153. do {
  2154. se = pick_next_entity(cfs_rq);
  2155. set_next_entity(cfs_rq, se);
  2156. cfs_rq = group_cfs_rq(se);
  2157. } while (cfs_rq);
  2158. p = task_of(se);
  2159. hrtick_start_fair(rq, p);
  2160. return p;
  2161. }
  2162. /*
  2163. * Account for a descheduled task:
  2164. */
  2165. static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
  2166. {
  2167. struct sched_entity *se = &prev->se;
  2168. struct cfs_rq *cfs_rq;
  2169. for_each_sched_entity(se) {
  2170. cfs_rq = cfs_rq_of(se);
  2171. put_prev_entity(cfs_rq, se);
  2172. }
  2173. }
  2174. /*
  2175. * sched_yield() is very simple
  2176. *
  2177. * The magic of dealing with the ->skip buddy is in pick_next_entity.
  2178. */
  2179. static void yield_task_fair(struct rq *rq)
  2180. {
  2181. struct task_struct *curr = rq->curr;
  2182. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  2183. struct sched_entity *se = &curr->se;
  2184. /*
  2185. * Are we the only task in the tree?
  2186. */
  2187. if (unlikely(rq->nr_running == 1))
  2188. return;
  2189. clear_buddies(cfs_rq, se);
  2190. if (curr->policy != SCHED_BATCH) {
  2191. update_rq_clock(rq);
  2192. /*
  2193. * Update run-time statistics of the 'current'.
  2194. */
  2195. update_curr(cfs_rq);
  2196. }
  2197. set_skip_buddy(se);
  2198. }
  2199. static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
  2200. {
  2201. struct sched_entity *se = &p->se;
  2202. /* throttled hierarchies are not runnable */
  2203. if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
  2204. return false;
  2205. /* Tell the scheduler that we'd really like pse to run next. */
  2206. set_next_buddy(se);
  2207. yield_task_fair(rq);
  2208. return true;
  2209. }
  2210. #ifdef CONFIG_SMP
  2211. /**************************************************
  2212. * Fair scheduling class load-balancing methods:
  2213. */
  2214. /*
  2215. * pull_task - move a task from a remote runqueue to the local runqueue.
  2216. * Both runqueues must be locked.
  2217. */
  2218. static void pull_task(struct rq *src_rq, struct task_struct *p,
  2219. struct rq *this_rq, int this_cpu)
  2220. {
  2221. deactivate_task(src_rq, p, 0);
  2222. set_task_cpu(p, this_cpu);
  2223. activate_task(this_rq, p, 0);
  2224. check_preempt_curr(this_rq, p, 0);
  2225. }
  2226. /*
  2227. * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  2228. */
  2229. static
  2230. int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
  2231. struct sched_domain *sd, enum cpu_idle_type idle,
  2232. int *all_pinned)
  2233. {
  2234. int tsk_cache_hot = 0;
  2235. /*
  2236. * We do not migrate tasks that are:
  2237. * 1) running (obviously), or
  2238. * 2) cannot be migrated to this CPU due to cpus_allowed, or
  2239. * 3) are cache-hot on their current CPU.
  2240. */
  2241. if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
  2242. schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
  2243. return 0;
  2244. }
  2245. *all_pinned = 0;
  2246. if (task_running(rq, p)) {
  2247. schedstat_inc(p, se.statistics.nr_failed_migrations_running);
  2248. return 0;
  2249. }
  2250. /*
  2251. * Aggressive migration if:
  2252. * 1) task is cache cold, or
  2253. * 2) too many balance attempts have failed.
  2254. */
  2255. tsk_cache_hot = task_hot(p, rq->clock_task, sd);
  2256. if (!tsk_cache_hot ||
  2257. sd->nr_balance_failed > sd->cache_nice_tries) {
  2258. #ifdef CONFIG_SCHEDSTATS
  2259. if (tsk_cache_hot) {
  2260. schedstat_inc(sd, lb_hot_gained[idle]);
  2261. schedstat_inc(p, se.statistics.nr_forced_migrations);
  2262. }
  2263. #endif
  2264. return 1;
  2265. }
  2266. if (tsk_cache_hot) {
  2267. schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
  2268. return 0;
  2269. }
  2270. return 1;
  2271. }
  2272. /*
  2273. * move_one_task tries to move exactly one task from busiest to this_rq, as
  2274. * part of active balancing operations within "domain".
  2275. * Returns 1 if successful and 0 otherwise.
  2276. *
  2277. * Called with both runqueues locked.
  2278. */
  2279. static int
  2280. move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2281. struct sched_domain *sd, enum cpu_idle_type idle)
  2282. {
  2283. struct task_struct *p, *n;
  2284. struct cfs_rq *cfs_rq;
  2285. int pinned = 0;
  2286. for_each_leaf_cfs_rq(busiest, cfs_rq) {
  2287. list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
  2288. if (throttled_lb_pair(task_group(p),
  2289. busiest->cpu, this_cpu))
  2290. break;
  2291. if (!can_migrate_task(p, busiest, this_cpu,
  2292. sd, idle, &pinned))
  2293. continue;
  2294. pull_task(busiest, p, this_rq, this_cpu);
  2295. /*
  2296. * Right now, this is only the second place pull_task()
  2297. * is called, so we can safely collect pull_task()
  2298. * stats here rather than inside pull_task().
  2299. */
  2300. schedstat_inc(sd, lb_gained[idle]);
  2301. return 1;
  2302. }
  2303. }
  2304. return 0;
  2305. }
  2306. static unsigned long
  2307. balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2308. unsigned long max_load_move, struct sched_domain *sd,
  2309. enum cpu_idle_type idle, int *all_pinned,
  2310. struct cfs_rq *busiest_cfs_rq)
  2311. {
  2312. int loops = 0, pulled = 0;
  2313. long rem_load_move = max_load_move;
  2314. struct task_struct *p, *n;
  2315. if (max_load_move == 0)
  2316. goto out;
  2317. list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
  2318. if (loops++ > sysctl_sched_nr_migrate)
  2319. break;
  2320. if ((p->se.load.weight >> 1) > rem_load_move ||
  2321. !can_migrate_task(p, busiest, this_cpu, sd, idle,
  2322. all_pinned))
  2323. continue;
  2324. pull_task(busiest, p, this_rq, this_cpu);
  2325. pulled++;
  2326. rem_load_move -= p->se.load.weight;
  2327. #ifdef CONFIG_PREEMPT
  2328. /*
  2329. * NEWIDLE balancing is a source of latency, so preemptible
  2330. * kernels will stop after the first task is pulled to minimize
  2331. * the critical section.
  2332. */
  2333. if (idle == CPU_NEWLY_IDLE)
  2334. break;
  2335. #endif
  2336. /*
  2337. * We only want to steal up to the prescribed amount of
  2338. * weighted load.
  2339. */
  2340. if (rem_load_move <= 0)
  2341. break;
  2342. }
  2343. out:
  2344. /*
  2345. * Right now, this is one of only two places pull_task() is called,
  2346. * so we can safely collect pull_task() stats here rather than
  2347. * inside pull_task().
  2348. */
  2349. schedstat_add(sd, lb_gained[idle], pulled);
  2350. return max_load_move - rem_load_move;
  2351. }
  2352. #ifdef CONFIG_FAIR_GROUP_SCHED
  2353. /*
  2354. * update tg->load_weight by folding this cpu's load_avg
  2355. */
  2356. static int update_shares_cpu(struct task_group *tg, int cpu)
  2357. {
  2358. struct cfs_rq *cfs_rq;
  2359. unsigned long flags;
  2360. struct rq *rq;
  2361. if (!tg->se[cpu])
  2362. return 0;
  2363. rq = cpu_rq(cpu);
  2364. cfs_rq = tg->cfs_rq[cpu];
  2365. raw_spin_lock_irqsave(&rq->lock, flags);
  2366. update_rq_clock(rq);
  2367. update_cfs_load(cfs_rq, 1);
  2368. /*
  2369. * We need to update shares after updating tg->load_weight in
  2370. * order to adjust the weight of groups with long running tasks.
  2371. */
  2372. update_cfs_shares(cfs_rq);
  2373. raw_spin_unlock_irqrestore(&rq->lock, flags);
  2374. return 0;
  2375. }
  2376. static void update_shares(int cpu)
  2377. {
  2378. struct cfs_rq *cfs_rq;
  2379. struct rq *rq = cpu_rq(cpu);
  2380. rcu_read_lock();
  2381. /*
  2382. * Iterates the task_group tree in a bottom up fashion, see
  2383. * list_add_leaf_cfs_rq() for details.
  2384. */
  2385. for_each_leaf_cfs_rq(rq, cfs_rq) {
  2386. /* throttled entities do not contribute to load */
  2387. if (throttled_hierarchy(cfs_rq))
  2388. continue;
  2389. update_shares_cpu(cfs_rq->tg, cpu);
  2390. }
  2391. rcu_read_unlock();
  2392. }
  2393. /*
  2394. * Compute the cpu's hierarchical load factor for each task group.
  2395. * This needs to be done in a top-down fashion because the load of a child
  2396. * group is a fraction of its parents load.
  2397. */
  2398. static int tg_load_down(struct task_group *tg, void *data)
  2399. {
  2400. unsigned long load;
  2401. long cpu = (long)data;
  2402. if (!tg->parent) {
  2403. load = cpu_rq(cpu)->load.weight;
  2404. } else {
  2405. load = tg->parent->cfs_rq[cpu]->h_load;
  2406. load *= tg->se[cpu]->load.weight;
  2407. load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
  2408. }
  2409. tg->cfs_rq[cpu]->h_load = load;
  2410. return 0;
  2411. }
  2412. static void update_h_load(long cpu)
  2413. {
  2414. walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
  2415. }
  2416. static unsigned long
  2417. load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2418. unsigned long max_load_move,
  2419. struct sched_domain *sd, enum cpu_idle_type idle,
  2420. int *all_pinned)
  2421. {
  2422. long rem_load_move = max_load_move;
  2423. struct cfs_rq *busiest_cfs_rq;
  2424. rcu_read_lock();
  2425. update_h_load(cpu_of(busiest));
  2426. for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
  2427. unsigned long busiest_h_load = busiest_cfs_rq->h_load;
  2428. unsigned long busiest_weight = busiest_cfs_rq->load.weight;
  2429. u64 rem_load, moved_load;
  2430. /*
  2431. * empty group or part of a throttled hierarchy
  2432. */
  2433. if (!busiest_cfs_rq->task_weight ||
  2434. throttled_lb_pair(busiest_cfs_rq->tg, cpu_of(busiest), this_cpu))
  2435. continue;
  2436. rem_load = (u64)rem_load_move * busiest_weight;
  2437. rem_load = div_u64(rem_load, busiest_h_load + 1);
  2438. moved_load = balance_tasks(this_rq, this_cpu, busiest,
  2439. rem_load, sd, idle, all_pinned,
  2440. busiest_cfs_rq);
  2441. if (!moved_load)
  2442. continue;
  2443. moved_load *= busiest_h_load;
  2444. moved_load = div_u64(moved_load, busiest_weight + 1);
  2445. rem_load_move -= moved_load;
  2446. if (rem_load_move < 0)
  2447. break;
  2448. }
  2449. rcu_read_unlock();
  2450. return max_load_move - rem_load_move;
  2451. }
  2452. #else
  2453. static inline void update_shares(int cpu)
  2454. {
  2455. }
  2456. static unsigned long
  2457. load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2458. unsigned long max_load_move,
  2459. struct sched_domain *sd, enum cpu_idle_type idle,
  2460. int *all_pinned)
  2461. {
  2462. return balance_tasks(this_rq, this_cpu, busiest,
  2463. max_load_move, sd, idle, all_pinned,
  2464. &busiest->cfs);
  2465. }
  2466. #endif
  2467. /*
  2468. * move_tasks tries to move up to max_load_move weighted load from busiest to
  2469. * this_rq, as part of a balancing operation within domain "sd".
  2470. * Returns 1 if successful and 0 otherwise.
  2471. *
  2472. * Called with both runqueues locked.
  2473. */
  2474. static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2475. unsigned long max_load_move,
  2476. struct sched_domain *sd, enum cpu_idle_type idle,
  2477. int *all_pinned)
  2478. {
  2479. unsigned long total_load_moved = 0, load_moved;
  2480. do {
  2481. load_moved = load_balance_fair(this_rq, this_cpu, busiest,
  2482. max_load_move - total_load_moved,
  2483. sd, idle, all_pinned);
  2484. total_load_moved += load_moved;
  2485. #ifdef CONFIG_PREEMPT
  2486. /*
  2487. * NEWIDLE balancing is a source of latency, so preemptible
  2488. * kernels will stop after the first task is pulled to minimize
  2489. * the critical section.
  2490. */
  2491. if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
  2492. break;
  2493. if (raw_spin_is_contended(&this_rq->lock) ||
  2494. raw_spin_is_contended(&busiest->lock))
  2495. break;
  2496. #endif
  2497. } while (load_moved && max_load_move > total_load_moved);
  2498. return total_load_moved > 0;
  2499. }
  2500. /********** Helpers for find_busiest_group ************************/
  2501. /*
  2502. * sd_lb_stats - Structure to store the statistics of a sched_domain
  2503. * during load balancing.
  2504. */
  2505. struct sd_lb_stats {
  2506. struct sched_group *busiest; /* Busiest group in this sd */
  2507. struct sched_group *this; /* Local group in this sd */
  2508. unsigned long total_load; /* Total load of all groups in sd */
  2509. unsigned long total_pwr; /* Total power of all groups in sd */
  2510. unsigned long avg_load; /* Average load across all groups in sd */
  2511. /** Statistics of this group */
  2512. unsigned long this_load;
  2513. unsigned long this_load_per_task;
  2514. unsigned long this_nr_running;
  2515. unsigned long this_has_capacity;
  2516. unsigned int this_idle_cpus;
  2517. /* Statistics of the busiest group */
  2518. unsigned int busiest_idle_cpus;
  2519. unsigned long max_load;
  2520. unsigned long busiest_load_per_task;
  2521. unsigned long busiest_nr_running;
  2522. unsigned long busiest_group_capacity;
  2523. unsigned long busiest_has_capacity;
  2524. unsigned int busiest_group_weight;
  2525. int group_imb; /* Is there imbalance in this sd */
  2526. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  2527. int power_savings_balance; /* Is powersave balance needed for this sd */
  2528. struct sched_group *group_min; /* Least loaded group in sd */
  2529. struct sched_group *group_leader; /* Group which relieves group_min */
  2530. unsigned long min_load_per_task; /* load_per_task in group_min */
  2531. unsigned long leader_nr_running; /* Nr running of group_leader */
  2532. unsigned long min_nr_running; /* Nr running of group_min */
  2533. #endif
  2534. };
  2535. /*
  2536. * sg_lb_stats - stats of a sched_group required for load_balancing
  2537. */
  2538. struct sg_lb_stats {
  2539. unsigned long avg_load; /*Avg load across the CPUs of the group */
  2540. unsigned long group_load; /* Total load over the CPUs of the group */
  2541. unsigned long sum_nr_running; /* Nr tasks running in the group */
  2542. unsigned long sum_weighted_load; /* Weighted load of group's tasks */
  2543. unsigned long group_capacity;
  2544. unsigned long idle_cpus;
  2545. unsigned long group_weight;
  2546. int group_imb; /* Is there an imbalance in the group ? */
  2547. int group_has_capacity; /* Is there extra capacity in the group? */
  2548. };
  2549. /**
  2550. * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
  2551. * @group: The group whose first cpu is to be returned.
  2552. */
  2553. static inline unsigned int group_first_cpu(struct sched_group *group)
  2554. {
  2555. return cpumask_first(sched_group_cpus(group));
  2556. }
  2557. /**
  2558. * get_sd_load_idx - Obtain the load index for a given sched domain.
  2559. * @sd: The sched_domain whose load_idx is to be obtained.
  2560. * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
  2561. */
  2562. static inline int get_sd_load_idx(struct sched_domain *sd,
  2563. enum cpu_idle_type idle)
  2564. {
  2565. int load_idx;
  2566. switch (idle) {
  2567. case CPU_NOT_IDLE:
  2568. load_idx = sd->busy_idx;
  2569. break;
  2570. case CPU_NEWLY_IDLE:
  2571. load_idx = sd->newidle_idx;
  2572. break;
  2573. default:
  2574. load_idx = sd->idle_idx;
  2575. break;
  2576. }
  2577. return load_idx;
  2578. }
  2579. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  2580. /**
  2581. * init_sd_power_savings_stats - Initialize power savings statistics for
  2582. * the given sched_domain, during load balancing.
  2583. *
  2584. * @sd: Sched domain whose power-savings statistics are to be initialized.
  2585. * @sds: Variable containing the statistics for sd.
  2586. * @idle: Idle status of the CPU at which we're performing load-balancing.
  2587. */
  2588. static inline void init_sd_power_savings_stats(struct sched_domain *sd,
  2589. struct sd_lb_stats *sds, enum cpu_idle_type idle)
  2590. {
  2591. /*
  2592. * Busy processors will not participate in power savings
  2593. * balance.
  2594. */
  2595. if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
  2596. sds->power_savings_balance = 0;
  2597. else {
  2598. sds->power_savings_balance = 1;
  2599. sds->min_nr_running = ULONG_MAX;
  2600. sds->leader_nr_running = 0;
  2601. }
  2602. }
  2603. /**
  2604. * update_sd_power_savings_stats - Update the power saving stats for a
  2605. * sched_domain while performing load balancing.
  2606. *
  2607. * @group: sched_group belonging to the sched_domain under consideration.
  2608. * @sds: Variable containing the statistics of the sched_domain
  2609. * @local_group: Does group contain the CPU for which we're performing
  2610. * load balancing ?
  2611. * @sgs: Variable containing the statistics of the group.
  2612. */
  2613. static inline void update_sd_power_savings_stats(struct sched_group *group,
  2614. struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
  2615. {
  2616. if (!sds->power_savings_balance)
  2617. return;
  2618. /*
  2619. * If the local group is idle or completely loaded
  2620. * no need to do power savings balance at this domain
  2621. */
  2622. if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
  2623. !sds->this_nr_running))
  2624. sds->power_savings_balance = 0;
  2625. /*
  2626. * If a group is already running at full capacity or idle,
  2627. * don't include that group in power savings calculations
  2628. */
  2629. if (!sds->power_savings_balance ||
  2630. sgs->sum_nr_running >= sgs->group_capacity ||
  2631. !sgs->sum_nr_running)
  2632. return;
  2633. /*
  2634. * Calculate the group which has the least non-idle load.
  2635. * This is the group from where we need to pick up the load
  2636. * for saving power
  2637. */
  2638. if ((sgs->sum_nr_running < sds->min_nr_running) ||
  2639. (sgs->sum_nr_running == sds->min_nr_running &&
  2640. group_first_cpu(group) > group_first_cpu(sds->group_min))) {
  2641. sds->group_min = group;
  2642. sds->min_nr_running = sgs->sum_nr_running;
  2643. sds->min_load_per_task = sgs->sum_weighted_load /
  2644. sgs->sum_nr_running;
  2645. }
  2646. /*
  2647. * Calculate the group which is almost near its
  2648. * capacity but still has some space to pick up some load
  2649. * from other group and save more power
  2650. */
  2651. if (sgs->sum_nr_running + 1 > sgs->group_capacity)
  2652. return;
  2653. if (sgs->sum_nr_running > sds->leader_nr_running ||
  2654. (sgs->sum_nr_running == sds->leader_nr_running &&
  2655. group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
  2656. sds->group_leader = group;
  2657. sds->leader_nr_running = sgs->sum_nr_running;
  2658. }
  2659. }
  2660. /**
  2661. * check_power_save_busiest_group - see if there is potential for some power-savings balance
  2662. * @sds: Variable containing the statistics of the sched_domain
  2663. * under consideration.
  2664. * @this_cpu: Cpu at which we're currently performing load-balancing.
  2665. * @imbalance: Variable to store the imbalance.
  2666. *
  2667. * Description:
  2668. * Check if we have potential to perform some power-savings balance.
  2669. * If yes, set the busiest group to be the least loaded group in the
  2670. * sched_domain, so that it's CPUs can be put to idle.
  2671. *
  2672. * Returns 1 if there is potential to perform power-savings balance.
  2673. * Else returns 0.
  2674. */
  2675. static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
  2676. int this_cpu, unsigned long *imbalance)
  2677. {
  2678. if (!sds->power_savings_balance)
  2679. return 0;
  2680. if (sds->this != sds->group_leader ||
  2681. sds->group_leader == sds->group_min)
  2682. return 0;
  2683. *imbalance = sds->min_load_per_task;
  2684. sds->busiest = sds->group_min;
  2685. return 1;
  2686. }
  2687. #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
  2688. static inline void init_sd_power_savings_stats(struct sched_domain *sd,
  2689. struct sd_lb_stats *sds, enum cpu_idle_type idle)
  2690. {
  2691. return;
  2692. }
  2693. static inline void update_sd_power_savings_stats(struct sched_group *group,
  2694. struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
  2695. {
  2696. return;
  2697. }
  2698. static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
  2699. int this_cpu, unsigned long *imbalance)
  2700. {
  2701. return 0;
  2702. }
  2703. #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
  2704. unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
  2705. {
  2706. return SCHED_POWER_SCALE;
  2707. }
  2708. unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
  2709. {
  2710. return default_scale_freq_power(sd, cpu);
  2711. }
  2712. unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
  2713. {
  2714. unsigned long weight = sd->span_weight;
  2715. unsigned long smt_gain = sd->smt_gain;
  2716. smt_gain /= weight;
  2717. return smt_gain;
  2718. }
  2719. unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
  2720. {
  2721. return default_scale_smt_power(sd, cpu);
  2722. }
  2723. unsigned long scale_rt_power(int cpu)
  2724. {
  2725. struct rq *rq = cpu_rq(cpu);
  2726. u64 total, available;
  2727. total = sched_avg_period() + (rq->clock - rq->age_stamp);
  2728. if (unlikely(total < rq->rt_avg)) {
  2729. /* Ensures that power won't end up being negative */
  2730. available = 0;
  2731. } else {
  2732. available = total - rq->rt_avg;
  2733. }
  2734. if (unlikely((s64)total < SCHED_POWER_SCALE))
  2735. total = SCHED_POWER_SCALE;
  2736. total >>= SCHED_POWER_SHIFT;
  2737. return div_u64(available, total);
  2738. }
  2739. static void update_cpu_power(struct sched_domain *sd, int cpu)
  2740. {
  2741. unsigned long weight = sd->span_weight;
  2742. unsigned long power = SCHED_POWER_SCALE;
  2743. struct sched_group *sdg = sd->groups;
  2744. if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
  2745. if (sched_feat(ARCH_POWER))
  2746. power *= arch_scale_smt_power(sd, cpu);
  2747. else
  2748. power *= default_scale_smt_power(sd, cpu);
  2749. power >>= SCHED_POWER_SHIFT;
  2750. }
  2751. sdg->sgp->power_orig = power;
  2752. if (sched_feat(ARCH_POWER))
  2753. power *= arch_scale_freq_power(sd, cpu);
  2754. else
  2755. power *= default_scale_freq_power(sd, cpu);
  2756. power >>= SCHED_POWER_SHIFT;
  2757. power *= scale_rt_power(cpu);
  2758. power >>= SCHED_POWER_SHIFT;
  2759. if (!power)
  2760. power = 1;
  2761. cpu_rq(cpu)->cpu_power = power;
  2762. sdg->sgp->power = power;
  2763. }
  2764. static void update_group_power(struct sched_domain *sd, int cpu)
  2765. {
  2766. struct sched_domain *child = sd->child;
  2767. struct sched_group *group, *sdg = sd->groups;
  2768. unsigned long power;
  2769. if (!child) {
  2770. update_cpu_power(sd, cpu);
  2771. return;
  2772. }
  2773. power = 0;
  2774. group = child->groups;
  2775. do {
  2776. power += group->sgp->power;
  2777. group = group->next;
  2778. } while (group != child->groups);
  2779. sdg->sgp->power = power;
  2780. }
  2781. /*
  2782. * Try and fix up capacity for tiny siblings, this is needed when
  2783. * things like SD_ASYM_PACKING need f_b_g to select another sibling
  2784. * which on its own isn't powerful enough.
  2785. *
  2786. * See update_sd_pick_busiest() and check_asym_packing().
  2787. */
  2788. static inline int
  2789. fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
  2790. {
  2791. /*
  2792. * Only siblings can have significantly less than SCHED_POWER_SCALE
  2793. */
  2794. if (!(sd->flags & SD_SHARE_CPUPOWER))
  2795. return 0;
  2796. /*
  2797. * If ~90% of the cpu_power is still there, we're good.
  2798. */
  2799. if (group->sgp->power * 32 > group->sgp->power_orig * 29)
  2800. return 1;
  2801. return 0;
  2802. }
  2803. /**
  2804. * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  2805. * @sd: The sched_domain whose statistics are to be updated.
  2806. * @group: sched_group whose statistics are to be updated.
  2807. * @this_cpu: Cpu for which load balance is currently performed.
  2808. * @idle: Idle status of this_cpu
  2809. * @load_idx: Load index of sched_domain of this_cpu for load calc.
  2810. * @local_group: Does group contain this_cpu.
  2811. * @cpus: Set of cpus considered for load balancing.
  2812. * @balance: Should we balance.
  2813. * @sgs: variable to hold the statistics for this group.
  2814. */
  2815. static inline void update_sg_lb_stats(struct sched_domain *sd,
  2816. struct sched_group *group, int this_cpu,
  2817. enum cpu_idle_type idle, int load_idx,
  2818. int local_group, const struct cpumask *cpus,
  2819. int *balance, struct sg_lb_stats *sgs)
  2820. {
  2821. unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
  2822. int i;
  2823. unsigned int balance_cpu = -1, first_idle_cpu = 0;
  2824. unsigned long avg_load_per_task = 0;
  2825. if (local_group)
  2826. balance_cpu = group_first_cpu(group);
  2827. /* Tally up the load of all CPUs in the group */
  2828. max_cpu_load = 0;
  2829. min_cpu_load = ~0UL;
  2830. max_nr_running = 0;
  2831. for_each_cpu_and(i, sched_group_cpus(group), cpus) {
  2832. struct rq *rq = cpu_rq(i);
  2833. /* Bias balancing toward cpus of our domain */
  2834. if (local_group) {
  2835. if (idle_cpu(i) && !first_idle_cpu) {
  2836. first_idle_cpu = 1;
  2837. balance_cpu = i;
  2838. }
  2839. load = target_load(i, load_idx);
  2840. } else {
  2841. load = source_load(i, load_idx);
  2842. if (load > max_cpu_load) {
  2843. max_cpu_load = load;
  2844. max_nr_running = rq->nr_running;
  2845. }
  2846. if (min_cpu_load > load)
  2847. min_cpu_load = load;
  2848. }
  2849. sgs->group_load += load;
  2850. sgs->sum_nr_running += rq->nr_running;
  2851. sgs->sum_weighted_load += weighted_cpuload(i);
  2852. if (idle_cpu(i))
  2853. sgs->idle_cpus++;
  2854. }
  2855. /*
  2856. * First idle cpu or the first cpu(busiest) in this sched group
  2857. * is eligible for doing load balancing at this and above
  2858. * domains. In the newly idle case, we will allow all the cpu's
  2859. * to do the newly idle load balance.
  2860. */
  2861. if (idle != CPU_NEWLY_IDLE && local_group) {
  2862. if (balance_cpu != this_cpu) {
  2863. *balance = 0;
  2864. return;
  2865. }
  2866. update_group_power(sd, this_cpu);
  2867. }
  2868. /* Adjust by relative CPU power of the group */
  2869. sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
  2870. /*
  2871. * Consider the group unbalanced when the imbalance is larger
  2872. * than the average weight of a task.
  2873. *
  2874. * APZ: with cgroup the avg task weight can vary wildly and
  2875. * might not be a suitable number - should we keep a
  2876. * normalized nr_running number somewhere that negates
  2877. * the hierarchy?
  2878. */
  2879. if (sgs->sum_nr_running)
  2880. avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
  2881. if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
  2882. sgs->group_imb = 1;
  2883. sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
  2884. SCHED_POWER_SCALE);
  2885. if (!sgs->group_capacity)
  2886. sgs->group_capacity = fix_small_capacity(sd, group);
  2887. sgs->group_weight = group->group_weight;
  2888. if (sgs->group_capacity > sgs->sum_nr_running)
  2889. sgs->group_has_capacity = 1;
  2890. }
  2891. /**
  2892. * update_sd_pick_busiest - return 1 on busiest group
  2893. * @sd: sched_domain whose statistics are to be checked
  2894. * @sds: sched_domain statistics
  2895. * @sg: sched_group candidate to be checked for being the busiest
  2896. * @sgs: sched_group statistics
  2897. * @this_cpu: the current cpu
  2898. *
  2899. * Determine if @sg is a busier group than the previously selected
  2900. * busiest group.
  2901. */
  2902. static bool update_sd_pick_busiest(struct sched_domain *sd,
  2903. struct sd_lb_stats *sds,
  2904. struct sched_group *sg,
  2905. struct sg_lb_stats *sgs,
  2906. int this_cpu)
  2907. {
  2908. if (sgs->avg_load <= sds->max_load)
  2909. return false;
  2910. if (sgs->sum_nr_running > sgs->group_capacity)
  2911. return true;
  2912. if (sgs->group_imb)
  2913. return true;
  2914. /*
  2915. * ASYM_PACKING needs to move all the work to the lowest
  2916. * numbered CPUs in the group, therefore mark all groups
  2917. * higher than ourself as busy.
  2918. */
  2919. if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
  2920. this_cpu < group_first_cpu(sg)) {
  2921. if (!sds->busiest)
  2922. return true;
  2923. if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
  2924. return true;
  2925. }
  2926. return false;
  2927. }
  2928. /**
  2929. * update_sd_lb_stats - Update sched_group's statistics for load balancing.
  2930. * @sd: sched_domain whose statistics are to be updated.
  2931. * @this_cpu: Cpu for which load balance is currently performed.
  2932. * @idle: Idle status of this_cpu
  2933. * @cpus: Set of cpus considered for load balancing.
  2934. * @balance: Should we balance.
  2935. * @sds: variable to hold the statistics for this sched_domain.
  2936. */
  2937. static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
  2938. enum cpu_idle_type idle, const struct cpumask *cpus,
  2939. int *balance, struct sd_lb_stats *sds)
  2940. {
  2941. struct sched_domain *child = sd->child;
  2942. struct sched_group *sg = sd->groups;
  2943. struct sg_lb_stats sgs;
  2944. int load_idx, prefer_sibling = 0;
  2945. if (child && child->flags & SD_PREFER_SIBLING)
  2946. prefer_sibling = 1;
  2947. init_sd_power_savings_stats(sd, sds, idle);
  2948. load_idx = get_sd_load_idx(sd, idle);
  2949. do {
  2950. int local_group;
  2951. local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
  2952. memset(&sgs, 0, sizeof(sgs));
  2953. update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
  2954. local_group, cpus, balance, &sgs);
  2955. if (local_group && !(*balance))
  2956. return;
  2957. sds->total_load += sgs.group_load;
  2958. sds->total_pwr += sg->sgp->power;
  2959. /*
  2960. * In case the child domain prefers tasks go to siblings
  2961. * first, lower the sg capacity to one so that we'll try
  2962. * and move all the excess tasks away. We lower the capacity
  2963. * of a group only if the local group has the capacity to fit
  2964. * these excess tasks, i.e. nr_running < group_capacity. The
  2965. * extra check prevents the case where you always pull from the
  2966. * heaviest group when it is already under-utilized (possible
  2967. * with a large weight task outweighs the tasks on the system).
  2968. */
  2969. if (prefer_sibling && !local_group && sds->this_has_capacity)
  2970. sgs.group_capacity = min(sgs.group_capacity, 1UL);
  2971. if (local_group) {
  2972. sds->this_load = sgs.avg_load;
  2973. sds->this = sg;
  2974. sds->this_nr_running = sgs.sum_nr_running;
  2975. sds->this_load_per_task = sgs.sum_weighted_load;
  2976. sds->this_has_capacity = sgs.group_has_capacity;
  2977. sds->this_idle_cpus = sgs.idle_cpus;
  2978. } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
  2979. sds->max_load = sgs.avg_load;
  2980. sds->busiest = sg;
  2981. sds->busiest_nr_running = sgs.sum_nr_running;
  2982. sds->busiest_idle_cpus = sgs.idle_cpus;
  2983. sds->busiest_group_capacity = sgs.group_capacity;
  2984. sds->busiest_load_per_task = sgs.sum_weighted_load;
  2985. sds->busiest_has_capacity = sgs.group_has_capacity;
  2986. sds->busiest_group_weight = sgs.group_weight;
  2987. sds->group_imb = sgs.group_imb;
  2988. }
  2989. update_sd_power_savings_stats(sg, sds, local_group, &sgs);
  2990. sg = sg->next;
  2991. } while (sg != sd->groups);
  2992. }
  2993. int __weak arch_sd_sibling_asym_packing(void)
  2994. {
  2995. return 0*SD_ASYM_PACKING;
  2996. }
  2997. /**
  2998. * check_asym_packing - Check to see if the group is packed into the
  2999. * sched doman.
  3000. *
  3001. * This is primarily intended to used at the sibling level. Some
  3002. * cores like POWER7 prefer to use lower numbered SMT threads. In the
  3003. * case of POWER7, it can move to lower SMT modes only when higher
  3004. * threads are idle. When in lower SMT modes, the threads will
  3005. * perform better since they share less core resources. Hence when we
  3006. * have idle threads, we want them to be the higher ones.
  3007. *
  3008. * This packing function is run on idle threads. It checks to see if
  3009. * the busiest CPU in this domain (core in the P7 case) has a higher
  3010. * CPU number than the packing function is being run on. Here we are
  3011. * assuming lower CPU number will be equivalent to lower a SMT thread
  3012. * number.
  3013. *
  3014. * Returns 1 when packing is required and a task should be moved to
  3015. * this CPU. The amount of the imbalance is returned in *imbalance.
  3016. *
  3017. * @sd: The sched_domain whose packing is to be checked.
  3018. * @sds: Statistics of the sched_domain which is to be packed
  3019. * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
  3020. * @imbalance: returns amount of imbalanced due to packing.
  3021. */
  3022. static int check_asym_packing(struct sched_domain *sd,
  3023. struct sd_lb_stats *sds,
  3024. int this_cpu, unsigned long *imbalance)
  3025. {
  3026. int busiest_cpu;
  3027. if (!(sd->flags & SD_ASYM_PACKING))
  3028. return 0;
  3029. if (!sds->busiest)
  3030. return 0;
  3031. busiest_cpu = group_first_cpu(sds->busiest);
  3032. if (this_cpu > busiest_cpu)
  3033. return 0;
  3034. *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
  3035. SCHED_POWER_SCALE);
  3036. return 1;
  3037. }
  3038. /**
  3039. * fix_small_imbalance - Calculate the minor imbalance that exists
  3040. * amongst the groups of a sched_domain, during
  3041. * load balancing.
  3042. * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
  3043. * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
  3044. * @imbalance: Variable to store the imbalance.
  3045. */
  3046. static inline void fix_small_imbalance(struct sd_lb_stats *sds,
  3047. int this_cpu, unsigned long *imbalance)
  3048. {
  3049. unsigned long tmp, pwr_now = 0, pwr_move = 0;
  3050. unsigned int imbn = 2;
  3051. unsigned long scaled_busy_load_per_task;
  3052. if (sds->this_nr_running) {
  3053. sds->this_load_per_task /= sds->this_nr_running;
  3054. if (sds->busiest_load_per_task >
  3055. sds->this_load_per_task)
  3056. imbn = 1;
  3057. } else
  3058. sds->this_load_per_task =
  3059. cpu_avg_load_per_task(this_cpu);
  3060. scaled_busy_load_per_task = sds->busiest_load_per_task
  3061. * SCHED_POWER_SCALE;
  3062. scaled_busy_load_per_task /= sds->busiest->sgp->power;
  3063. if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
  3064. (scaled_busy_load_per_task * imbn)) {
  3065. *imbalance = sds->busiest_load_per_task;
  3066. return;
  3067. }
  3068. /*
  3069. * OK, we don't have enough imbalance to justify moving tasks,
  3070. * however we may be able to increase total CPU power used by
  3071. * moving them.
  3072. */
  3073. pwr_now += sds->busiest->sgp->power *
  3074. min(sds->busiest_load_per_task, sds->max_load);
  3075. pwr_now += sds->this->sgp->power *
  3076. min(sds->this_load_per_task, sds->this_load);
  3077. pwr_now /= SCHED_POWER_SCALE;
  3078. /* Amount of load we'd subtract */
  3079. tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
  3080. sds->busiest->sgp->power;
  3081. if (sds->max_load > tmp)
  3082. pwr_move += sds->busiest->sgp->power *
  3083. min(sds->busiest_load_per_task, sds->max_load - tmp);
  3084. /* Amount of load we'd add */
  3085. if (sds->max_load * sds->busiest->sgp->power <
  3086. sds->busiest_load_per_task * SCHED_POWER_SCALE)
  3087. tmp = (sds->max_load * sds->busiest->sgp->power) /
  3088. sds->this->sgp->power;
  3089. else
  3090. tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
  3091. sds->this->sgp->power;
  3092. pwr_move += sds->this->sgp->power *
  3093. min(sds->this_load_per_task, sds->this_load + tmp);
  3094. pwr_move /= SCHED_POWER_SCALE;
  3095. /* Move if we gain throughput */
  3096. if (pwr_move > pwr_now)
  3097. *imbalance = sds->busiest_load_per_task;
  3098. }
  3099. /**
  3100. * calculate_imbalance - Calculate the amount of imbalance present within the
  3101. * groups of a given sched_domain during load balance.
  3102. * @sds: statistics of the sched_domain whose imbalance is to be calculated.
  3103. * @this_cpu: Cpu for which currently load balance is being performed.
  3104. * @imbalance: The variable to store the imbalance.
  3105. */
  3106. static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
  3107. unsigned long *imbalance)
  3108. {
  3109. unsigned long max_pull, load_above_capacity = ~0UL;
  3110. sds->busiest_load_per_task /= sds->busiest_nr_running;
  3111. if (sds->group_imb) {
  3112. sds->busiest_load_per_task =
  3113. min(sds->busiest_load_per_task, sds->avg_load);
  3114. }
  3115. /*
  3116. * In the presence of smp nice balancing, certain scenarios can have
  3117. * max load less than avg load(as we skip the groups at or below
  3118. * its cpu_power, while calculating max_load..)
  3119. */
  3120. if (sds->max_load < sds->avg_load) {
  3121. *imbalance = 0;
  3122. return fix_small_imbalance(sds, this_cpu, imbalance);
  3123. }
  3124. if (!sds->group_imb) {
  3125. /*
  3126. * Don't want to pull so many tasks that a group would go idle.
  3127. */
  3128. load_above_capacity = (sds->busiest_nr_running -
  3129. sds->busiest_group_capacity);
  3130. load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
  3131. load_above_capacity /= sds->busiest->sgp->power;
  3132. }
  3133. /*
  3134. * We're trying to get all the cpus to the average_load, so we don't
  3135. * want to push ourselves above the average load, nor do we wish to
  3136. * reduce the max loaded cpu below the average load. At the same time,
  3137. * we also don't want to reduce the group load below the group capacity
  3138. * (so that we can implement power-savings policies etc). Thus we look
  3139. * for the minimum possible imbalance.
  3140. * Be careful of negative numbers as they'll appear as very large values
  3141. * with unsigned longs.
  3142. */
  3143. max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
  3144. /* How much load to actually move to equalise the imbalance */
  3145. *imbalance = min(max_pull * sds->busiest->sgp->power,
  3146. (sds->avg_load - sds->this_load) * sds->this->sgp->power)
  3147. / SCHED_POWER_SCALE;
  3148. /*
  3149. * if *imbalance is less than the average load per runnable task
  3150. * there is no guarantee that any tasks will be moved so we'll have
  3151. * a think about bumping its value to force at least one task to be
  3152. * moved
  3153. */
  3154. if (*imbalance < sds->busiest_load_per_task)
  3155. return fix_small_imbalance(sds, this_cpu, imbalance);
  3156. }
  3157. /******* find_busiest_group() helpers end here *********************/
  3158. /**
  3159. * find_busiest_group - Returns the busiest group within the sched_domain
  3160. * if there is an imbalance. If there isn't an imbalance, and
  3161. * the user has opted for power-savings, it returns a group whose
  3162. * CPUs can be put to idle by rebalancing those tasks elsewhere, if
  3163. * such a group exists.
  3164. *
  3165. * Also calculates the amount of weighted load which should be moved
  3166. * to restore balance.
  3167. *
  3168. * @sd: The sched_domain whose busiest group is to be returned.
  3169. * @this_cpu: The cpu for which load balancing is currently being performed.
  3170. * @imbalance: Variable which stores amount of weighted load which should
  3171. * be moved to restore balance/put a group to idle.
  3172. * @idle: The idle status of this_cpu.
  3173. * @cpus: The set of CPUs under consideration for load-balancing.
  3174. * @balance: Pointer to a variable indicating if this_cpu
  3175. * is the appropriate cpu to perform load balancing at this_level.
  3176. *
  3177. * Returns: - the busiest group if imbalance exists.
  3178. * - If no imbalance and user has opted for power-savings balance,
  3179. * return the least loaded group whose CPUs can be
  3180. * put to idle by rebalancing its tasks onto our group.
  3181. */
  3182. static struct sched_group *
  3183. find_busiest_group(struct sched_domain *sd, int this_cpu,
  3184. unsigned long *imbalance, enum cpu_idle_type idle,
  3185. const struct cpumask *cpus, int *balance)
  3186. {
  3187. struct sd_lb_stats sds;
  3188. memset(&sds, 0, sizeof(sds));
  3189. /*
  3190. * Compute the various statistics relavent for load balancing at
  3191. * this level.
  3192. */
  3193. update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
  3194. /*
  3195. * this_cpu is not the appropriate cpu to perform load balancing at
  3196. * this level.
  3197. */
  3198. if (!(*balance))
  3199. goto ret;
  3200. if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
  3201. check_asym_packing(sd, &sds, this_cpu, imbalance))
  3202. return sds.busiest;
  3203. /* There is no busy sibling group to pull tasks from */
  3204. if (!sds.busiest || sds.busiest_nr_running == 0)
  3205. goto out_balanced;
  3206. sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
  3207. /*
  3208. * If the busiest group is imbalanced the below checks don't
  3209. * work because they assumes all things are equal, which typically
  3210. * isn't true due to cpus_allowed constraints and the like.
  3211. */
  3212. if (sds.group_imb)
  3213. goto force_balance;
  3214. /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
  3215. if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
  3216. !sds.busiest_has_capacity)
  3217. goto force_balance;
  3218. /*
  3219. * If the local group is more busy than the selected busiest group
  3220. * don't try and pull any tasks.
  3221. */
  3222. if (sds.this_load >= sds.max_load)
  3223. goto out_balanced;
  3224. /*
  3225. * Don't pull any tasks if this group is already above the domain
  3226. * average load.
  3227. */
  3228. if (sds.this_load >= sds.avg_load)
  3229. goto out_balanced;
  3230. if (idle == CPU_IDLE) {
  3231. /*
  3232. * This cpu is idle. If the busiest group load doesn't
  3233. * have more tasks than the number of available cpu's and
  3234. * there is no imbalance between this and busiest group
  3235. * wrt to idle cpu's, it is balanced.
  3236. */
  3237. if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
  3238. sds.busiest_nr_running <= sds.busiest_group_weight)
  3239. goto out_balanced;
  3240. } else {
  3241. /*
  3242. * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
  3243. * imbalance_pct to be conservative.
  3244. */
  3245. if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
  3246. goto out_balanced;
  3247. }
  3248. force_balance:
  3249. /* Looks like there is an imbalance. Compute it */
  3250. calculate_imbalance(&sds, this_cpu, imbalance);
  3251. return sds.busiest;
  3252. out_balanced:
  3253. /*
  3254. * There is no obvious imbalance. But check if we can do some balancing
  3255. * to save power.
  3256. */
  3257. if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
  3258. return sds.busiest;
  3259. ret:
  3260. *imbalance = 0;
  3261. return NULL;
  3262. }
  3263. /*
  3264. * find_busiest_queue - find the busiest runqueue among the cpus in group.
  3265. */
  3266. static struct rq *
  3267. find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
  3268. enum cpu_idle_type idle, unsigned long imbalance,
  3269. const struct cpumask *cpus)
  3270. {
  3271. struct rq *busiest = NULL, *rq;
  3272. unsigned long max_load = 0;
  3273. int i;
  3274. for_each_cpu(i, sched_group_cpus(group)) {
  3275. unsigned long power = power_of(i);
  3276. unsigned long capacity = DIV_ROUND_CLOSEST(power,
  3277. SCHED_POWER_SCALE);
  3278. unsigned long wl;
  3279. if (!capacity)
  3280. capacity = fix_small_capacity(sd, group);
  3281. if (!cpumask_test_cpu(i, cpus))
  3282. continue;
  3283. rq = cpu_rq(i);
  3284. wl = weighted_cpuload(i);
  3285. /*
  3286. * When comparing with imbalance, use weighted_cpuload()
  3287. * which is not scaled with the cpu power.
  3288. */
  3289. if (capacity && rq->nr_running == 1 && wl > imbalance)
  3290. continue;
  3291. /*
  3292. * For the load comparisons with the other cpu's, consider
  3293. * the weighted_cpuload() scaled with the cpu power, so that
  3294. * the load can be moved away from the cpu that is potentially
  3295. * running at a lower capacity.
  3296. */
  3297. wl = (wl * SCHED_POWER_SCALE) / power;
  3298. if (wl > max_load) {
  3299. max_load = wl;
  3300. busiest = rq;
  3301. }
  3302. }
  3303. return busiest;
  3304. }
  3305. /*
  3306. * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
  3307. * so long as it is large enough.
  3308. */
  3309. #define MAX_PINNED_INTERVAL 512
  3310. /* Working cpumask for load_balance and load_balance_newidle. */
  3311. static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
  3312. static int need_active_balance(struct sched_domain *sd, int idle,
  3313. int busiest_cpu, int this_cpu)
  3314. {
  3315. if (idle == CPU_NEWLY_IDLE) {
  3316. /*
  3317. * ASYM_PACKING needs to force migrate tasks from busy but
  3318. * higher numbered CPUs in order to pack all tasks in the
  3319. * lowest numbered CPUs.
  3320. */
  3321. if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
  3322. return 1;
  3323. /*
  3324. * The only task running in a non-idle cpu can be moved to this
  3325. * cpu in an attempt to completely freeup the other CPU
  3326. * package.
  3327. *
  3328. * The package power saving logic comes from
  3329. * find_busiest_group(). If there are no imbalance, then
  3330. * f_b_g() will return NULL. However when sched_mc={1,2} then
  3331. * f_b_g() will select a group from which a running task may be
  3332. * pulled to this cpu in order to make the other package idle.
  3333. * If there is no opportunity to make a package idle and if
  3334. * there are no imbalance, then f_b_g() will return NULL and no
  3335. * action will be taken in load_balance_newidle().
  3336. *
  3337. * Under normal task pull operation due to imbalance, there
  3338. * will be more than one task in the source run queue and
  3339. * move_tasks() will succeed. ld_moved will be true and this
  3340. * active balance code will not be triggered.
  3341. */
  3342. if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
  3343. return 0;
  3344. }
  3345. return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
  3346. }
  3347. static int active_load_balance_cpu_stop(void *data);
  3348. /*
  3349. * Check this_cpu to ensure it is balanced within domain. Attempt to move
  3350. * tasks if there is an imbalance.
  3351. */
  3352. static int load_balance(int this_cpu, struct rq *this_rq,
  3353. struct sched_domain *sd, enum cpu_idle_type idle,
  3354. int *balance)
  3355. {
  3356. int ld_moved, all_pinned = 0, active_balance = 0;
  3357. struct sched_group *group;
  3358. unsigned long imbalance;
  3359. struct rq *busiest;
  3360. unsigned long flags;
  3361. struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
  3362. cpumask_copy(cpus, cpu_active_mask);
  3363. schedstat_inc(sd, lb_count[idle]);
  3364. redo:
  3365. group = find_busiest_group(sd, this_cpu, &imbalance, idle,
  3366. cpus, balance);
  3367. if (*balance == 0)
  3368. goto out_balanced;
  3369. if (!group) {
  3370. schedstat_inc(sd, lb_nobusyg[idle]);
  3371. goto out_balanced;
  3372. }
  3373. busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
  3374. if (!busiest) {
  3375. schedstat_inc(sd, lb_nobusyq[idle]);
  3376. goto out_balanced;
  3377. }
  3378. BUG_ON(busiest == this_rq);
  3379. schedstat_add(sd, lb_imbalance[idle], imbalance);
  3380. ld_moved = 0;
  3381. if (busiest->nr_running > 1) {
  3382. /*
  3383. * Attempt to move tasks. If find_busiest_group has found
  3384. * an imbalance but busiest->nr_running <= 1, the group is
  3385. * still unbalanced. ld_moved simply stays zero, so it is
  3386. * correctly treated as an imbalance.
  3387. */
  3388. all_pinned = 1;
  3389. local_irq_save(flags);
  3390. double_rq_lock(this_rq, busiest);
  3391. ld_moved = move_tasks(this_rq, this_cpu, busiest,
  3392. imbalance, sd, idle, &all_pinned);
  3393. double_rq_unlock(this_rq, busiest);
  3394. local_irq_restore(flags);
  3395. /*
  3396. * some other cpu did the load balance for us.
  3397. */
  3398. if (ld_moved && this_cpu != smp_processor_id())
  3399. resched_cpu(this_cpu);
  3400. /* All tasks on this runqueue were pinned by CPU affinity */
  3401. if (unlikely(all_pinned)) {
  3402. cpumask_clear_cpu(cpu_of(busiest), cpus);
  3403. if (!cpumask_empty(cpus))
  3404. goto redo;
  3405. goto out_balanced;
  3406. }
  3407. }
  3408. if (!ld_moved) {
  3409. schedstat_inc(sd, lb_failed[idle]);
  3410. /*
  3411. * Increment the failure counter only on periodic balance.
  3412. * We do not want newidle balance, which can be very
  3413. * frequent, pollute the failure counter causing
  3414. * excessive cache_hot migrations and active balances.
  3415. */
  3416. if (idle != CPU_NEWLY_IDLE)
  3417. sd->nr_balance_failed++;
  3418. if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
  3419. raw_spin_lock_irqsave(&busiest->lock, flags);
  3420. /* don't kick the active_load_balance_cpu_stop,
  3421. * if the curr task on busiest cpu can't be
  3422. * moved to this_cpu
  3423. */
  3424. if (!cpumask_test_cpu(this_cpu,
  3425. &busiest->curr->cpus_allowed)) {
  3426. raw_spin_unlock_irqrestore(&busiest->lock,
  3427. flags);
  3428. all_pinned = 1;
  3429. goto out_one_pinned;
  3430. }
  3431. /*
  3432. * ->active_balance synchronizes accesses to
  3433. * ->active_balance_work. Once set, it's cleared
  3434. * only after active load balance is finished.
  3435. */
  3436. if (!busiest->active_balance) {
  3437. busiest->active_balance = 1;
  3438. busiest->push_cpu = this_cpu;
  3439. active_balance = 1;
  3440. }
  3441. raw_spin_unlock_irqrestore(&busiest->lock, flags);
  3442. if (active_balance)
  3443. stop_one_cpu_nowait(cpu_of(busiest),
  3444. active_load_balance_cpu_stop, busiest,
  3445. &busiest->active_balance_work);
  3446. /*
  3447. * We've kicked active balancing, reset the failure
  3448. * counter.
  3449. */
  3450. sd->nr_balance_failed = sd->cache_nice_tries+1;
  3451. }
  3452. } else
  3453. sd->nr_balance_failed = 0;
  3454. if (likely(!active_balance)) {
  3455. /* We were unbalanced, so reset the balancing interval */
  3456. sd->balance_interval = sd->min_interval;
  3457. } else {
  3458. /*
  3459. * If we've begun active balancing, start to back off. This
  3460. * case may not be covered by the all_pinned logic if there
  3461. * is only 1 task on the busy runqueue (because we don't call
  3462. * move_tasks).
  3463. */
  3464. if (sd->balance_interval < sd->max_interval)
  3465. sd->balance_interval *= 2;
  3466. }
  3467. goto out;
  3468. out_balanced:
  3469. schedstat_inc(sd, lb_balanced[idle]);
  3470. sd->nr_balance_failed = 0;
  3471. out_one_pinned:
  3472. /* tune up the balancing interval */
  3473. if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
  3474. (sd->balance_interval < sd->max_interval))
  3475. sd->balance_interval *= 2;
  3476. ld_moved = 0;
  3477. out:
  3478. return ld_moved;
  3479. }
  3480. /*
  3481. * idle_balance is called by schedule() if this_cpu is about to become
  3482. * idle. Attempts to pull tasks from other CPUs.
  3483. */
  3484. static void idle_balance(int this_cpu, struct rq *this_rq)
  3485. {
  3486. struct sched_domain *sd;
  3487. int pulled_task = 0;
  3488. unsigned long next_balance = jiffies + HZ;
  3489. this_rq->idle_stamp = this_rq->clock;
  3490. if (this_rq->avg_idle < sysctl_sched_migration_cost)
  3491. return;
  3492. /*
  3493. * Drop the rq->lock, but keep IRQ/preempt disabled.
  3494. */
  3495. raw_spin_unlock(&this_rq->lock);
  3496. update_shares(this_cpu);
  3497. rcu_read_lock();
  3498. for_each_domain(this_cpu, sd) {
  3499. unsigned long interval;
  3500. int balance = 1;
  3501. if (!(sd->flags & SD_LOAD_BALANCE))
  3502. continue;
  3503. if (sd->flags & SD_BALANCE_NEWIDLE) {
  3504. /* If we've pulled tasks over stop searching: */
  3505. pulled_task = load_balance(this_cpu, this_rq,
  3506. sd, CPU_NEWLY_IDLE, &balance);
  3507. }
  3508. interval = msecs_to_jiffies(sd->balance_interval);
  3509. if (time_after(next_balance, sd->last_balance + interval))
  3510. next_balance = sd->last_balance + interval;
  3511. if (pulled_task) {
  3512. this_rq->idle_stamp = 0;
  3513. break;
  3514. }
  3515. }
  3516. rcu_read_unlock();
  3517. raw_spin_lock(&this_rq->lock);
  3518. if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
  3519. /*
  3520. * We are going idle. next_balance may be set based on
  3521. * a busy processor. So reset next_balance.
  3522. */
  3523. this_rq->next_balance = next_balance;
  3524. }
  3525. }
  3526. /*
  3527. * active_load_balance_cpu_stop is run by cpu stopper. It pushes
  3528. * running tasks off the busiest CPU onto idle CPUs. It requires at
  3529. * least 1 task to be running on each physical CPU where possible, and
  3530. * avoids physical / logical imbalances.
  3531. */
  3532. static int active_load_balance_cpu_stop(void *data)
  3533. {
  3534. struct rq *busiest_rq = data;
  3535. int busiest_cpu = cpu_of(busiest_rq);
  3536. int target_cpu = busiest_rq->push_cpu;
  3537. struct rq *target_rq = cpu_rq(target_cpu);
  3538. struct sched_domain *sd;
  3539. raw_spin_lock_irq(&busiest_rq->lock);
  3540. /* make sure the requested cpu hasn't gone down in the meantime */
  3541. if (unlikely(busiest_cpu != smp_processor_id() ||
  3542. !busiest_rq->active_balance))
  3543. goto out_unlock;
  3544. /* Is there any task to move? */
  3545. if (busiest_rq->nr_running <= 1)
  3546. goto out_unlock;
  3547. /*
  3548. * This condition is "impossible", if it occurs
  3549. * we need to fix it. Originally reported by
  3550. * Bjorn Helgaas on a 128-cpu setup.
  3551. */
  3552. BUG_ON(busiest_rq == target_rq);
  3553. /* move a task from busiest_rq to target_rq */
  3554. double_lock_balance(busiest_rq, target_rq);
  3555. /* Search for an sd spanning us and the target CPU. */
  3556. rcu_read_lock();
  3557. for_each_domain(target_cpu, sd) {
  3558. if ((sd->flags & SD_LOAD_BALANCE) &&
  3559. cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
  3560. break;
  3561. }
  3562. if (likely(sd)) {
  3563. schedstat_inc(sd, alb_count);
  3564. if (move_one_task(target_rq, target_cpu, busiest_rq,
  3565. sd, CPU_IDLE))
  3566. schedstat_inc(sd, alb_pushed);
  3567. else
  3568. schedstat_inc(sd, alb_failed);
  3569. }
  3570. rcu_read_unlock();
  3571. double_unlock_balance(busiest_rq, target_rq);
  3572. out_unlock:
  3573. busiest_rq->active_balance = 0;
  3574. raw_spin_unlock_irq(&busiest_rq->lock);
  3575. return 0;
  3576. }
  3577. #ifdef CONFIG_NO_HZ
  3578. static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
  3579. static void trigger_sched_softirq(void *data)
  3580. {
  3581. raise_softirq_irqoff(SCHED_SOFTIRQ);
  3582. }
  3583. static inline void init_sched_softirq_csd(struct call_single_data *csd)
  3584. {
  3585. csd->func = trigger_sched_softirq;
  3586. csd->info = NULL;
  3587. csd->flags = 0;
  3588. csd->priv = 0;
  3589. }
  3590. /*
  3591. * idle load balancing details
  3592. * - One of the idle CPUs nominates itself as idle load_balancer, while
  3593. * entering idle.
  3594. * - This idle load balancer CPU will also go into tickless mode when
  3595. * it is idle, just like all other idle CPUs
  3596. * - When one of the busy CPUs notice that there may be an idle rebalancing
  3597. * needed, they will kick the idle load balancer, which then does idle
  3598. * load balancing for all the idle CPUs.
  3599. */
  3600. static struct {
  3601. atomic_t load_balancer;
  3602. atomic_t first_pick_cpu;
  3603. atomic_t second_pick_cpu;
  3604. cpumask_var_t idle_cpus_mask;
  3605. cpumask_var_t grp_idle_mask;
  3606. unsigned long next_balance; /* in jiffy units */
  3607. } nohz ____cacheline_aligned;
  3608. int get_nohz_load_balancer(void)
  3609. {
  3610. return atomic_read(&nohz.load_balancer);
  3611. }
  3612. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  3613. /**
  3614. * lowest_flag_domain - Return lowest sched_domain containing flag.
  3615. * @cpu: The cpu whose lowest level of sched domain is to
  3616. * be returned.
  3617. * @flag: The flag to check for the lowest sched_domain
  3618. * for the given cpu.
  3619. *
  3620. * Returns the lowest sched_domain of a cpu which contains the given flag.
  3621. */
  3622. static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
  3623. {
  3624. struct sched_domain *sd;
  3625. for_each_domain(cpu, sd)
  3626. if (sd->flags & flag)
  3627. break;
  3628. return sd;
  3629. }
  3630. /**
  3631. * for_each_flag_domain - Iterates over sched_domains containing the flag.
  3632. * @cpu: The cpu whose domains we're iterating over.
  3633. * @sd: variable holding the value of the power_savings_sd
  3634. * for cpu.
  3635. * @flag: The flag to filter the sched_domains to be iterated.
  3636. *
  3637. * Iterates over all the scheduler domains for a given cpu that has the 'flag'
  3638. * set, starting from the lowest sched_domain to the highest.
  3639. */
  3640. #define for_each_flag_domain(cpu, sd, flag) \
  3641. for (sd = lowest_flag_domain(cpu, flag); \
  3642. (sd && (sd->flags & flag)); sd = sd->parent)
  3643. /**
  3644. * is_semi_idle_group - Checks if the given sched_group is semi-idle.
  3645. * @ilb_group: group to be checked for semi-idleness
  3646. *
  3647. * Returns: 1 if the group is semi-idle. 0 otherwise.
  3648. *
  3649. * We define a sched_group to be semi idle if it has atleast one idle-CPU
  3650. * and atleast one non-idle CPU. This helper function checks if the given
  3651. * sched_group is semi-idle or not.
  3652. */
  3653. static inline int is_semi_idle_group(struct sched_group *ilb_group)
  3654. {
  3655. cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
  3656. sched_group_cpus(ilb_group));
  3657. /*
  3658. * A sched_group is semi-idle when it has atleast one busy cpu
  3659. * and atleast one idle cpu.
  3660. */
  3661. if (cpumask_empty(nohz.grp_idle_mask))
  3662. return 0;
  3663. if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
  3664. return 0;
  3665. return 1;
  3666. }
  3667. /**
  3668. * find_new_ilb - Finds the optimum idle load balancer for nomination.
  3669. * @cpu: The cpu which is nominating a new idle_load_balancer.
  3670. *
  3671. * Returns: Returns the id of the idle load balancer if it exists,
  3672. * Else, returns >= nr_cpu_ids.
  3673. *
  3674. * This algorithm picks the idle load balancer such that it belongs to a
  3675. * semi-idle powersavings sched_domain. The idea is to try and avoid
  3676. * completely idle packages/cores just for the purpose of idle load balancing
  3677. * when there are other idle cpu's which are better suited for that job.
  3678. */
  3679. static int find_new_ilb(int cpu)
  3680. {
  3681. struct sched_domain *sd;
  3682. struct sched_group *ilb_group;
  3683. int ilb = nr_cpu_ids;
  3684. /*
  3685. * Have idle load balancer selection from semi-idle packages only
  3686. * when power-aware load balancing is enabled
  3687. */
  3688. if (!(sched_smt_power_savings || sched_mc_power_savings))
  3689. goto out_done;
  3690. /*
  3691. * Optimize for the case when we have no idle CPUs or only one
  3692. * idle CPU. Don't walk the sched_domain hierarchy in such cases
  3693. */
  3694. if (cpumask_weight(nohz.idle_cpus_mask) < 2)
  3695. goto out_done;
  3696. rcu_read_lock();
  3697. for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
  3698. ilb_group = sd->groups;
  3699. do {
  3700. if (is_semi_idle_group(ilb_group)) {
  3701. ilb = cpumask_first(nohz.grp_idle_mask);
  3702. goto unlock;
  3703. }
  3704. ilb_group = ilb_group->next;
  3705. } while (ilb_group != sd->groups);
  3706. }
  3707. unlock:
  3708. rcu_read_unlock();
  3709. out_done:
  3710. return ilb;
  3711. }
  3712. #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
  3713. static inline int find_new_ilb(int call_cpu)
  3714. {
  3715. return nr_cpu_ids;
  3716. }
  3717. #endif
  3718. /*
  3719. * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
  3720. * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
  3721. * CPU (if there is one).
  3722. */
  3723. static void nohz_balancer_kick(int cpu)
  3724. {
  3725. int ilb_cpu;
  3726. nohz.next_balance++;
  3727. ilb_cpu = get_nohz_load_balancer();
  3728. if (ilb_cpu >= nr_cpu_ids) {
  3729. ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
  3730. if (ilb_cpu >= nr_cpu_ids)
  3731. return;
  3732. }
  3733. if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
  3734. struct call_single_data *cp;
  3735. cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
  3736. cp = &per_cpu(remote_sched_softirq_cb, cpu);
  3737. __smp_call_function_single(ilb_cpu, cp, 0);
  3738. }
  3739. return;
  3740. }
  3741. /*
  3742. * This routine will try to nominate the ilb (idle load balancing)
  3743. * owner among the cpus whose ticks are stopped. ilb owner will do the idle
  3744. * load balancing on behalf of all those cpus.
  3745. *
  3746. * When the ilb owner becomes busy, we will not have new ilb owner until some
  3747. * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
  3748. * idle load balancing by kicking one of the idle CPUs.
  3749. *
  3750. * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
  3751. * ilb owner CPU in future (when there is a need for idle load balancing on
  3752. * behalf of all idle CPUs).
  3753. */
  3754. void select_nohz_load_balancer(int stop_tick)
  3755. {
  3756. int cpu = smp_processor_id();
  3757. if (stop_tick) {
  3758. if (!cpu_active(cpu)) {
  3759. if (atomic_read(&nohz.load_balancer) != cpu)
  3760. return;
  3761. /*
  3762. * If we are going offline and still the leader,
  3763. * give up!
  3764. */
  3765. if (atomic_cmpxchg(&nohz.load_balancer, cpu,
  3766. nr_cpu_ids) != cpu)
  3767. BUG();
  3768. return;
  3769. }
  3770. cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
  3771. if (atomic_read(&nohz.first_pick_cpu) == cpu)
  3772. atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
  3773. if (atomic_read(&nohz.second_pick_cpu) == cpu)
  3774. atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
  3775. if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
  3776. int new_ilb;
  3777. /* make me the ilb owner */
  3778. if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
  3779. cpu) != nr_cpu_ids)
  3780. return;
  3781. /*
  3782. * Check to see if there is a more power-efficient
  3783. * ilb.
  3784. */
  3785. new_ilb = find_new_ilb(cpu);
  3786. if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
  3787. atomic_set(&nohz.load_balancer, nr_cpu_ids);
  3788. resched_cpu(new_ilb);
  3789. return;
  3790. }
  3791. return;
  3792. }
  3793. } else {
  3794. if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
  3795. return;
  3796. cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
  3797. if (atomic_read(&nohz.load_balancer) == cpu)
  3798. if (atomic_cmpxchg(&nohz.load_balancer, cpu,
  3799. nr_cpu_ids) != cpu)
  3800. BUG();
  3801. }
  3802. return;
  3803. }
  3804. #endif
  3805. static DEFINE_SPINLOCK(balancing);
  3806. static unsigned long __read_mostly max_load_balance_interval = HZ/10;
  3807. /*
  3808. * Scale the max load_balance interval with the number of CPUs in the system.
  3809. * This trades load-balance latency on larger machines for less cross talk.
  3810. */
  3811. static void update_max_interval(void)
  3812. {
  3813. max_load_balance_interval = HZ*num_online_cpus()/10;
  3814. }
  3815. /*
  3816. * It checks each scheduling domain to see if it is due to be balanced,
  3817. * and initiates a balancing operation if so.
  3818. *
  3819. * Balancing parameters are set up in arch_init_sched_domains.
  3820. */
  3821. static void rebalance_domains(int cpu, enum cpu_idle_type idle)
  3822. {
  3823. int balance = 1;
  3824. struct rq *rq = cpu_rq(cpu);
  3825. unsigned long interval;
  3826. struct sched_domain *sd;
  3827. /* Earliest time when we have to do rebalance again */
  3828. unsigned long next_balance = jiffies + 60*HZ;
  3829. int update_next_balance = 0;
  3830. int need_serialize;
  3831. update_shares(cpu);
  3832. rcu_read_lock();
  3833. for_each_domain(cpu, sd) {
  3834. if (!(sd->flags & SD_LOAD_BALANCE))
  3835. continue;
  3836. interval = sd->balance_interval;
  3837. if (idle != CPU_IDLE)
  3838. interval *= sd->busy_factor;
  3839. /* scale ms to jiffies */
  3840. interval = msecs_to_jiffies(interval);
  3841. interval = clamp(interval, 1UL, max_load_balance_interval);
  3842. need_serialize = sd->flags & SD_SERIALIZE;
  3843. if (need_serialize) {
  3844. if (!spin_trylock(&balancing))
  3845. goto out;
  3846. }
  3847. if (time_after_eq(jiffies, sd->last_balance + interval)) {
  3848. if (load_balance(cpu, rq, sd, idle, &balance)) {
  3849. /*
  3850. * We've pulled tasks over so either we're no
  3851. * longer idle.
  3852. */
  3853. idle = CPU_NOT_IDLE;
  3854. }
  3855. sd->last_balance = jiffies;
  3856. }
  3857. if (need_serialize)
  3858. spin_unlock(&balancing);
  3859. out:
  3860. if (time_after(next_balance, sd->last_balance + interval)) {
  3861. next_balance = sd->last_balance + interval;
  3862. update_next_balance = 1;
  3863. }
  3864. /*
  3865. * Stop the load balance at this level. There is another
  3866. * CPU in our sched group which is doing load balancing more
  3867. * actively.
  3868. */
  3869. if (!balance)
  3870. break;
  3871. }
  3872. rcu_read_unlock();
  3873. /*
  3874. * next_balance will be updated only when there is a need.
  3875. * When the cpu is attached to null domain for ex, it will not be
  3876. * updated.
  3877. */
  3878. if (likely(update_next_balance))
  3879. rq->next_balance = next_balance;
  3880. }
  3881. #ifdef CONFIG_NO_HZ
  3882. /*
  3883. * In CONFIG_NO_HZ case, the idle balance kickee will do the
  3884. * rebalancing for all the cpus for whom scheduler ticks are stopped.
  3885. */
  3886. static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
  3887. {
  3888. struct rq *this_rq = cpu_rq(this_cpu);
  3889. struct rq *rq;
  3890. int balance_cpu;
  3891. if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
  3892. return;
  3893. for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
  3894. if (balance_cpu == this_cpu)
  3895. continue;
  3896. /*
  3897. * If this cpu gets work to do, stop the load balancing
  3898. * work being done for other cpus. Next load
  3899. * balancing owner will pick it up.
  3900. */
  3901. if (need_resched()) {
  3902. this_rq->nohz_balance_kick = 0;
  3903. break;
  3904. }
  3905. raw_spin_lock_irq(&this_rq->lock);
  3906. update_rq_clock(this_rq);
  3907. update_cpu_load(this_rq);
  3908. raw_spin_unlock_irq(&this_rq->lock);
  3909. rebalance_domains(balance_cpu, CPU_IDLE);
  3910. rq = cpu_rq(balance_cpu);
  3911. if (time_after(this_rq->next_balance, rq->next_balance))
  3912. this_rq->next_balance = rq->next_balance;
  3913. }
  3914. nohz.next_balance = this_rq->next_balance;
  3915. this_rq->nohz_balance_kick = 0;
  3916. }
  3917. /*
  3918. * Current heuristic for kicking the idle load balancer
  3919. * - first_pick_cpu is the one of the busy CPUs. It will kick
  3920. * idle load balancer when it has more than one process active. This
  3921. * eliminates the need for idle load balancing altogether when we have
  3922. * only one running process in the system (common case).
  3923. * - If there are more than one busy CPU, idle load balancer may have
  3924. * to run for active_load_balance to happen (i.e., two busy CPUs are
  3925. * SMT or core siblings and can run better if they move to different
  3926. * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
  3927. * which will kick idle load balancer as soon as it has any load.
  3928. */
  3929. static inline int nohz_kick_needed(struct rq *rq, int cpu)
  3930. {
  3931. unsigned long now = jiffies;
  3932. int ret;
  3933. int first_pick_cpu, second_pick_cpu;
  3934. if (time_before(now, nohz.next_balance))
  3935. return 0;
  3936. if (rq->idle_at_tick)
  3937. return 0;
  3938. first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
  3939. second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
  3940. if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
  3941. second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
  3942. return 0;
  3943. ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
  3944. if (ret == nr_cpu_ids || ret == cpu) {
  3945. atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
  3946. if (rq->nr_running > 1)
  3947. return 1;
  3948. } else {
  3949. ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
  3950. if (ret == nr_cpu_ids || ret == cpu) {
  3951. if (rq->nr_running)
  3952. return 1;
  3953. }
  3954. }
  3955. return 0;
  3956. }
  3957. #else
  3958. static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
  3959. #endif
  3960. /*
  3961. * run_rebalance_domains is triggered when needed from the scheduler tick.
  3962. * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
  3963. */
  3964. static void run_rebalance_domains(struct softirq_action *h)
  3965. {
  3966. int this_cpu = smp_processor_id();
  3967. struct rq *this_rq = cpu_rq(this_cpu);
  3968. enum cpu_idle_type idle = this_rq->idle_at_tick ?
  3969. CPU_IDLE : CPU_NOT_IDLE;
  3970. rebalance_domains(this_cpu, idle);
  3971. /*
  3972. * If this cpu has a pending nohz_balance_kick, then do the
  3973. * balancing on behalf of the other idle cpus whose ticks are
  3974. * stopped.
  3975. */
  3976. nohz_idle_balance(this_cpu, idle);
  3977. }
  3978. static inline int on_null_domain(int cpu)
  3979. {
  3980. return !rcu_dereference_sched(cpu_rq(cpu)->sd);
  3981. }
  3982. /*
  3983. * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
  3984. */
  3985. static inline void trigger_load_balance(struct rq *rq, int cpu)
  3986. {
  3987. /* Don't need to rebalance while attached to NULL domain */
  3988. if (time_after_eq(jiffies, rq->next_balance) &&
  3989. likely(!on_null_domain(cpu)))
  3990. raise_softirq(SCHED_SOFTIRQ);
  3991. #ifdef CONFIG_NO_HZ
  3992. else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
  3993. nohz_balancer_kick(cpu);
  3994. #endif
  3995. }
  3996. static void rq_online_fair(struct rq *rq)
  3997. {
  3998. update_sysctl();
  3999. }
  4000. static void rq_offline_fair(struct rq *rq)
  4001. {
  4002. update_sysctl();
  4003. }
  4004. #else /* CONFIG_SMP */
  4005. /*
  4006. * on UP we do not need to balance between CPUs:
  4007. */
  4008. static inline void idle_balance(int cpu, struct rq *rq)
  4009. {
  4010. }
  4011. #endif /* CONFIG_SMP */
  4012. /*
  4013. * scheduler tick hitting a task of our scheduling class:
  4014. */
  4015. static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
  4016. {
  4017. struct cfs_rq *cfs_rq;
  4018. struct sched_entity *se = &curr->se;
  4019. for_each_sched_entity(se) {
  4020. cfs_rq = cfs_rq_of(se);
  4021. entity_tick(cfs_rq, se, queued);
  4022. }
  4023. }
  4024. /*
  4025. * called on fork with the child task as argument from the parent's context
  4026. * - child not yet on the tasklist
  4027. * - preemption disabled
  4028. */
  4029. static void task_fork_fair(struct task_struct *p)
  4030. {
  4031. struct cfs_rq *cfs_rq = task_cfs_rq(current);
  4032. struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
  4033. int this_cpu = smp_processor_id();
  4034. struct rq *rq = this_rq();
  4035. unsigned long flags;
  4036. raw_spin_lock_irqsave(&rq->lock, flags);
  4037. update_rq_clock(rq);
  4038. if (unlikely(task_cpu(p) != this_cpu)) {
  4039. rcu_read_lock();
  4040. __set_task_cpu(p, this_cpu);
  4041. rcu_read_unlock();
  4042. }
  4043. update_curr(cfs_rq);
  4044. if (curr)
  4045. se->vruntime = curr->vruntime;
  4046. place_entity(cfs_rq, se, 1);
  4047. if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
  4048. /*
  4049. * Upon rescheduling, sched_class::put_prev_task() will place
  4050. * 'current' within the tree based on its new key value.
  4051. */
  4052. swap(curr->vruntime, se->vruntime);
  4053. resched_task(rq->curr);
  4054. }
  4055. se->vruntime -= cfs_rq->min_vruntime;
  4056. raw_spin_unlock_irqrestore(&rq->lock, flags);
  4057. }
  4058. /*
  4059. * Priority of the task has changed. Check to see if we preempt
  4060. * the current task.
  4061. */
  4062. static void
  4063. prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
  4064. {
  4065. if (!p->se.on_rq)
  4066. return;
  4067. /*
  4068. * Reschedule if we are currently running on this runqueue and
  4069. * our priority decreased, or if we are not currently running on
  4070. * this runqueue and our priority is higher than the current's
  4071. */
  4072. if (rq->curr == p) {
  4073. if (p->prio > oldprio)
  4074. resched_task(rq->curr);
  4075. } else
  4076. check_preempt_curr(rq, p, 0);
  4077. }
  4078. static void switched_from_fair(struct rq *rq, struct task_struct *p)
  4079. {
  4080. struct sched_entity *se = &p->se;
  4081. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  4082. /*
  4083. * Ensure the task's vruntime is normalized, so that when its
  4084. * switched back to the fair class the enqueue_entity(.flags=0) will
  4085. * do the right thing.
  4086. *
  4087. * If it was on_rq, then the dequeue_entity(.flags=0) will already
  4088. * have normalized the vruntime, if it was !on_rq, then only when
  4089. * the task is sleeping will it still have non-normalized vruntime.
  4090. */
  4091. if (!se->on_rq && p->state != TASK_RUNNING) {
  4092. /*
  4093. * Fix up our vruntime so that the current sleep doesn't
  4094. * cause 'unlimited' sleep bonus.
  4095. */
  4096. place_entity(cfs_rq, se, 0);
  4097. se->vruntime -= cfs_rq->min_vruntime;
  4098. }
  4099. }
  4100. /*
  4101. * We switched to the sched_fair class.
  4102. */
  4103. static void switched_to_fair(struct rq *rq, struct task_struct *p)
  4104. {
  4105. if (!p->se.on_rq)
  4106. return;
  4107. /*
  4108. * We were most likely switched from sched_rt, so
  4109. * kick off the schedule if running, otherwise just see
  4110. * if we can still preempt the current task.
  4111. */
  4112. if (rq->curr == p)
  4113. resched_task(rq->curr);
  4114. else
  4115. check_preempt_curr(rq, p, 0);
  4116. }
  4117. /* Account for a task changing its policy or group.
  4118. *
  4119. * This routine is mostly called to set cfs_rq->curr field when a task
  4120. * migrates between groups/classes.
  4121. */
  4122. static void set_curr_task_fair(struct rq *rq)
  4123. {
  4124. struct sched_entity *se = &rq->curr->se;
  4125. for_each_sched_entity(se) {
  4126. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  4127. set_next_entity(cfs_rq, se);
  4128. /* ensure bandwidth has been allocated on our new cfs_rq */
  4129. account_cfs_rq_runtime(cfs_rq, 0);
  4130. }
  4131. }
  4132. #ifdef CONFIG_FAIR_GROUP_SCHED
  4133. static void task_move_group_fair(struct task_struct *p, int on_rq)
  4134. {
  4135. /*
  4136. * If the task was not on the rq at the time of this cgroup movement
  4137. * it must have been asleep, sleeping tasks keep their ->vruntime
  4138. * absolute on their old rq until wakeup (needed for the fair sleeper
  4139. * bonus in place_entity()).
  4140. *
  4141. * If it was on the rq, we've just 'preempted' it, which does convert
  4142. * ->vruntime to a relative base.
  4143. *
  4144. * Make sure both cases convert their relative position when migrating
  4145. * to another cgroup's rq. This does somewhat interfere with the
  4146. * fair sleeper stuff for the first placement, but who cares.
  4147. */
  4148. if (!on_rq)
  4149. p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
  4150. set_task_rq(p, task_cpu(p));
  4151. if (!on_rq)
  4152. p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
  4153. }
  4154. #endif
  4155. static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
  4156. {
  4157. struct sched_entity *se = &task->se;
  4158. unsigned int rr_interval = 0;
  4159. /*
  4160. * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
  4161. * idle runqueue:
  4162. */
  4163. if (rq->cfs.load.weight)
  4164. rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
  4165. return rr_interval;
  4166. }
  4167. /*
  4168. * All the scheduling class methods:
  4169. */
  4170. static const struct sched_class fair_sched_class = {
  4171. .next = &idle_sched_class,
  4172. .enqueue_task = enqueue_task_fair,
  4173. .dequeue_task = dequeue_task_fair,
  4174. .yield_task = yield_task_fair,
  4175. .yield_to_task = yield_to_task_fair,
  4176. .check_preempt_curr = check_preempt_wakeup,
  4177. .pick_next_task = pick_next_task_fair,
  4178. .put_prev_task = put_prev_task_fair,
  4179. #ifdef CONFIG_SMP
  4180. .select_task_rq = select_task_rq_fair,
  4181. .rq_online = rq_online_fair,
  4182. .rq_offline = rq_offline_fair,
  4183. .task_waking = task_waking_fair,
  4184. #endif
  4185. .set_curr_task = set_curr_task_fair,
  4186. .task_tick = task_tick_fair,
  4187. .task_fork = task_fork_fair,
  4188. .prio_changed = prio_changed_fair,
  4189. .switched_from = switched_from_fair,
  4190. .switched_to = switched_to_fair,
  4191. .get_rr_interval = get_rr_interval_fair,
  4192. #ifdef CONFIG_FAIR_GROUP_SCHED
  4193. .task_move_group = task_move_group_fair,
  4194. #endif
  4195. };
  4196. #ifdef CONFIG_SCHED_DEBUG
  4197. static void print_cfs_stats(struct seq_file *m, int cpu)
  4198. {
  4199. struct cfs_rq *cfs_rq;
  4200. rcu_read_lock();
  4201. for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
  4202. print_cfs_rq(m, cpu, cfs_rq);
  4203. rcu_read_unlock();
  4204. }
  4205. #endif